Alter Index
Alter Index
Alter Index
---------------
ALTER INDEX <INDEXNAME> REBUILD;
-- Alter Sequence:
-------------------
select "QBEXSQL"."BillingDocNumbers".nextval
from dummy;
-- Alter Table:
---------------
CREATE COLUMN TABLE "QBEXSQL"."EMPtab4ALTER"
(
"EMPID" INT ,
"EMPNAME" VARCHAR(20),
"DEPTNO" VARCHAR(5)
);
-- Drop Column
---------------
ALTER TABLE "QBEXSQL"."EMPtab4ALTER"
DROP ( "DEPTNAME" );
-- Add Constraint
-----------------
ALTER TABLE "QBEXSQL"."EMPtab4ALTER"
ADD CONSTRAINT PK PRIMARY KEY ("EMPID");
-- Preload Clause
-----------------
-- Add Partition
-----------------
ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"
ADD PARTITION VALUE = '2015-08' ;
-- Drop Partition:
-------------------
-- Auto Merge
-------------
ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"
DISABLE AUTOMERGE ;
-- Unload Priority
------------------
ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"
UNLOAD PRIORITY 6 ;
-- Auto Merge
-- Merge Dog
-- Create DB Table:
-------------------
--1. Create Simple COLUMN Table
-- Comments:
------------
CREATE COLUMN TABLE "QBEXSQL"."EMPCOMMENTS"
(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);
-- Table Partitioning
----------------------
-- Hash
-- Create Index
----------------
-- The CREATE INDEX statement creates an index on a table with the selected column(s).
-- When column data types are character string types, binary string types, decimal types,
-- or when the constraint is a composite key, or a non-unique constraint, the default index type is CPBTREE.
-- In other cases BTREE will be used. If neither BTREE nor CPBTREE keyword is specified,
-- then the SAP HANA database chooses the appropriate index type.
---------------------------------------------------------------------------------------------------------------------------------
--INDEXES : Indexes currently defined on tables.
--INDEX_COLUMNS : Index column information.
-- Range Partitioning
---------------------
(
PARTITION '2015-01-01' <= VALUES < '2015-06-01',
PARTITION VALUE = '2015-07-15', PARTITION OTHERS
);
(
PARTITION '2015-01' <= VALUES < '2015-06',
PARTITION VALUE = '2015-07', PARTITION OTHERS
);
INSERT INTO "QBEXSQL"."RANGEPARTMONTH"
VALUES (1,'2015-01-15',15000.00);
-- Others:
-----------
1. Rename Column:
-----------------
rename COLUMN "QBEXSQL"."EMPtab4ALTER"."EMPID" TO "EMPNO";
2. Rename Table:
-----------------
RENAME TABLE "QBEXSQL"."EMPtab4ALTER1" TO "FORALTERS";
3. Rename Index:
-----------------
RENAME INDEX <OLDNAME> TO <NEWNAME>;
-- Round Robin:
---------------
-- No Primary key
-- Schema
----------
Create Schema "QBEXSQL";
-- Create Sequence
------------------
CREATE SEQUENCE "QBEXSQL"."BillingDocNumbers"
START WITH 1
INCREMENT BY 1
MINVALUE 1
MAXVALUE 100
CYCLE
CACHE 5
RESET BY
( SELECT MAX("EMPID") FROM "QBEXSQL"."EMPSIMPLEIND");
-- Unload Priority
-------------------
-- 0 --- 9
1. Delete
2. Explain Plan
3. Insert
4. Load
5. Merge Delta
6. Replace / Upsert
7. Select
8. Truncate Table
9. Unload
10. Update
-- Delete:
----------
DELETE FROM "QBEXSQL"."DemoUpdate2";
-- Delta Merge:
---------------
- Main Store / Delta Store
- Types of Merging:
--------------------
- Auto Merge - Merge Dog - Decisiicon Function -
- Smart Merge - Decisin Function - Sql Statement
- Memory Merge - Mergeing in Main Memory - No changes to data persistenecy
- Hard Merge - Sql Statement - No Decision function - Token - triggers
- Forced Merge - Sql Satetmnent - Token - Triggered - even ehen thw server is busy
- Critical Merge - Delta Store - Treshhold value - Trigreed by System
--Hard Merge
MERGE DELTA OF "QBEXSQL"."DemoMerging";
-- Smart Merge
MERGE DELTA OF "QBEXSQL"."DemoMerging"
WITH PARAMETERS ('SMART_MERGE' = 'ON');
-- Memory Merge
MERGE DELTA OF "QBEXSQL"."DemoMerging"
WITH PARAMETERS ('SMART_MERGE' = 'ON', 'MEMORY_MERGE' = 'ON');
-- Forced Merge
-- Auto Merge
-- Critical Merge
-- Explain Plan:
----------------
-- Use Tables "product", "customer", "fctsales"
SELECT T2."COMPANYNAME",
T3."PRODUCTID",
SUM(T1."NETSALES")
FROM "QBEXSQL"."FCTSALES" T1
INNER JOIN "QBEXSQL"."CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
INNER JOIN "QBEXSQL"."PRODUCT" T3
ON T3."PRODUCTID" = T1."PRODUCTID"
GROUP BY T2."COMPANYNAME",T3."PRODUCTID";
-
EXPLAIN PLAN SET STATEMENT_NAME = 'MyfirstQuery' for
SELECT T2."COMPANYNAME",
T3."PRODUCTID",
SUM(T1."NETSALES")
FROM "QBEXSQL"."FCTSALES" T1
INNER JOIN "QBEXSQL"."CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
INNER JOIN "QBEXSQL"."PRODUCT" T3
ON T3."PRODUCTID" = T1."PRODUCTID"
GROUP BY T2."COMPANYNAME",T3."PRODUCTID";
-- Insert:
----------
-- The INSERT statement adds a record to a table.
-- LOAD
-------
--The LOAD statement explicitly loads column store table data into memory instead of upon first access.
-- Select
---------
-- SELECT <Select_clause>
-- <from_clause>
-- <where_clause>
--<groupby_clause>
--<having_clause>
--<set Operator>
--<order by Clause>
--<limit>
-- SELECT
----------
SET SCHEMA "QBEXSQL";
SELECT *
FROM "CUSTOMER";
SELECT "COUNTRYNAME"
FROM "CUSTOMER";
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" = 1;
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" < 5;
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" > 110;
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" != 1;
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" <> 1;
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" >= 110;
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" BETWEEN 1 AND 5;
SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" NOT BETWEEN 1 AND 5;
SELECT *
FROM "DemoMerging1"
WHERE "ORDERDATE" IS NOT NULL;
SELECT *
FROM "DemoMerging1"
WHERE "ORDERDATE" IS NULL;
SELECT *
FROM "CUSTOMER"
WHERE CONTAINS ("COMPANYNAME", 'Zap Corp.');
SELECT *, Score()
FROM "CUSTOMER"
WHERE CONTAINS ("COMPANYNAME", 'Zap Corp.', FUZZY(0.5));
SELECT MIN("NETSALES")
FROM "FCTSALES";
SELECT M("NETSALES")
FROM "FCTSALES";
-- Set Operator
-- join
--------
SELECT T2."COMPANYNAME", SUM(T1."NETSALES")
FROM "FCTSALES" T1
INNER JOIN "CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
GROUP BY T2."COMPANYNAME";
create column table t1 ( id int primary key, customer varchar(5), year int, product varchar(5), sales int );
insert into t1 values(1, 'C1', 2009, 'P1', 100);
insert into t1 values(2, 'C1', 2009, 'P2', 200);
insert into t1 values(3, 'C1', 2010, 'P1', 50);
insert into t1 values(4, 'C1', 2010, 'P2', 150);
insert into t1 values(5, 'C2', 2009, 'P1', 200);
insert into t1 values(6, 'C2', 2009, 'P2', 300);
insert into t1 values(7, 'C2', 2010, 'P1', 100);
-- Truncate:
------------
--Deletes all rows from a table. TRUNCATE is faster than DELETE FROM when deleting all records from a table,
--but TRUNCATE cannot be rolled back.
--To be able to rollback from record deletion, DELETE should be used.
-- Use "QBEXSQL"."DemoInsert"
COMMIT;
ROLLBACK;
SELECT * FROM T;
DROP TABLE T;
TRUNCATE TABLE T;
ROLLBACK;
SELECT * FROM T;
-- Unload:
----------
--The UNLOAD statement unloads the column store table from memory. This can be done to free up memory.
--The table will be loaded again on next access.
UNLOAD "QBEXSQL"."DemoLoad";
-- Update
----------
--use Table "QBEXSQL"."DemoForData", "QBEXSQL"."DemoInsert"
UPDATE "QBEXSQL"."DemoInsert"
SET "REGION" = 'EAST'
WHERE "ORDERID" = 1;
UPDATE "QBEXSQL"."DemoInsert"
SET "REGION" = 'EAST',
"ORDERDATE" = '9999-01-01'
WHERE "ORDERID" = 2;
-- Upsert / replace:
---------------------
-- Use Table "QBEXSQL"."DemoUpdate", "QBEXSQL"."DemoForData", "QBEXSQL"."DemoInsert",
"QBEXSQL"."DemoUpdate2"
UPSERT "QBEXSQL"."DemoUpdate2"
VALUES (1,99)
WHERE "KEY" = 1;
UPSERT "QBEXSQL"."DemoUpdate2"
VALUES (9,99)
WHERE "KEY" = 9;
UPSERT "QBEXSQL"."DemoUpdate2"
VALUES (88,99)
WITH PRIMARY KEY;
UPSERT <TABLENAME>
<SUB QUERY>;
-- Functions:
-------------
-- Functions:
-------------
1. Procedure can return zero or n values whereas function can return one value which is mandatory.
2. Procedures can have input/output parameters for it whereas functions can have only input parameters.
3. Procedure allows select as well as DML statement in it whereas function allows only select statement in it.
4. Functions can be called from procedure whereas procedures cannot be called from function.
5. Exception can be handled by try-catch block in a procedure whereas try-catch block cannot be used in a function.
7. Procedures can not be utilized in a select statement whereas function can be embedded in a select statement.
8. UDF can be used in the SQL statements anywhere in the WHERE/HAVING/SELECT section where as
Storedprocedures cannot be.
9. UDFs that return tables can be treated as another rowset. This can be used in JOINs with other tables.
1. Procedure can return zero or n values whereas function can return one value which is mandatory.
2. Procedures can have input/output parameters for it whereas functions can have only input parameters.
3. Procedure allows select as well as DML statement in it whereas function allows only select statement in it.
4. Functions can be called from procedure whereas procedures cannot be called from function.
5. Exception can be handled by try-catch block in a procedure whereas try-catch block cannot be used in a function.
7. Procedures can not be utilized in a select statement whereas function can be embedded in a select statement.
8. UDF can be used in the SQL statements anywhere in the WHERE/HAVING/SELECT section where as
Storedprocedures cannot be.
9. UDFs that return tables can be treated as another rowset. This can be used in JOINs with other tables.
-- Returns table
-----------------
END;
-- TO_TIMESTAMP
SELECT TO_TIMESTAMP ('2010-01-11 13:30:00', 'YYYY-MM-DD HH24:MI:SS') "to timestamp" FROM DUMMY;
SELECT TO_TIMESTAMP ('2010-01-11 13:30:00', 'YYYY-MM-DD HH:MI:SS') "to timestamp" FROM DUMMY;
-- Add Days
SELECT ADD_DAYS (TO_DATE ('2009-12-05', 'YYYY-MM-DD'), 30) "add days" FROM DUMMY;
-- Current Date
-- Current Time
-- Current Timestamp
-- Days Between
-- Extract
SELECT EXTRACT (YEAR FROM TO_DATE ('2010-01-04', 'YYYY-MM-DD')) "extract" FROM DUMMY;
-- MONTH
-- Month NAme
-- Now
-- Week
-- WeekDay
-- Number Functions:
--------------------
-- ABS
-- CEIL - Returns the first integer that is greater or equal to the value n.
-- Floor - Returns the largest integer not greater than the numeric argument n.
-- POWER
-- Round
-- sqrt
-- SQL Functions:
-----------------
-- Data Type Conversion Functions
-- DateTime Functions
-- Fulltext Functions
-- Number Functions
-- String Functions
-- Window Functions
-- Miscellaneous Functions
-- String Functions :
--------------------
-- CHAR - To Convert ASCII value value of a number
-- CONCAT
-- LCASE
-- LEFT
SELECT LEFT ('Hello', 3) "left" FROM DUMMY;
-- LENGTH
-- LOCATE
-- RIGHT
-- RPAD
-- Substring After
-- Substring Before
-- Sub String
-- Aggregate Functions:
-----------------------
select class, val, offset,
COUNT(*) over (partition by class) as c1,
COUNT(offset) over (partition by class) as c2,
COUNT(*) over (partition by class order by val) as c3,
COUNT(offset) over (partition by class order by val) as c4,
MAX(val) over (partition by class) as m1,
MAX(val) over (partition by class order by val) as m2
from T;
-- Window Functions:
---------------------
RANK() - Returns rank of a row within a partition, starting from 1.
Returns duplicate values in the ranking sequence when there are ties between values.
FIRST_VALUE ( <expression> ) - Returns the value of <expression> evaluated at the first row of the window frame.
LAST_VALUE ( <expression> ) - Returns the value of <expression> evaluated at the last row of the window frame.
NTH_VALUE ( <expression> , <n> ) - Returns the value of <expression> evaluated at the <n>-th row
from the first row of the window frame.
Returns value of the <offset> rows after current row. The <offset> should be non-negative and default is 1.
If the <offset> crosses boundaries of the partition <default_expr> value is returned.
If the <default_expr> is not specified null value is returned. The <offset> and <default_expr> are evaluated at current
row.
The output of LEAD function can be non-deterministic among tie values.
-- Ranking Functions:
---------------------
-- Window Functions
--------------------
======================================================
<window_function_type> ::=
<rank_func> | <dense_rank_func> | <row_number_func> |
<percent_rank_func> | <cume_dist_func> | <ntile_func> |
<lead_func> | <lag_func> | <first_value_func> | <last_value_func> |
<nth_value_func> | <window_aggregate_func>
=========================================================
==========================================================
<window_order_by_clause> ::= ORDER BY {<window_order_by_expression>}
<window_order_by_expression> ::= <expression> [ ASC | DESC ] [ NULLS FIRST | NULLS LAST ]
Parallelization
Partitioning allows operations to be parallelized by using several execution threads for each table.
Partition pruning
Queries are analyzed to determine whether or not they match the given partitioning specification of a table.
If a match is found, it is possible to determine the actual partitions that hold the data being queried.
Using this method, the overall load on the system can be reduced, thus improving the response time.
For example, if a table is partitioned by year, a query restricted to the data of one year is executed
only on the partition with data for this year.
The performance of the delta merge operation depends on the size of the main index.
If data is only being modified on some partitions, fewer partitions will need to be delta merged
and therefore performance will be better.
Applications may actively control partitions, for example, by adding partitions to store the data for an upcoming
month.
Ex: Control the user only to check the data of country Mexico.
How to create:
Click on catalouge - models - Analytical privileges
Name : < >
How to test.
Create user (give access too sys_bi,syc_bic,sys_repo, and give access to the modesls in current schema )
Ooptions include to edit,maintain,activate.
The same screen would have tabs for Granted roles / system priv / obejct priv / Analytical priv / System priv
Execute the report and you can see the data for only authorised privilege.
Dynamic Authorization
--------------------
The authorization should check the table and based on the entry maintained in the table data retrived.
Analytical View
Properties
------------
Analytical views can be created without attribute views
Attribute views can be consumed in Analytical views.
Measures can only be selected from single fact table.
Multiple fact tables can be used.
Define Central entity for fact tables.
Formulate star join using fact tables and attribute views.
Data Category
- cube.
Cache Validation:
- Hourly / Daily.
If Complex join conditions are used two column views will be created
- OLAP Catalouge Column View.
- Calculation Cataluge Column View.
Calculated Column:
------------------
- Calculated Columns (Hidden / Calculate Before Aggregation - MTD with ex rates of EOM)
what type of calculated column.
- Measure
- Attribute
- Counter.
Calculated columns from attribute views can be seen in Analytical views.
Restricted Columns.
-------------------
Display the data measures for the required period.
- EX Gross Amount for 2012. rest will be ???? similar to conditions to bex.
Filters
------
Possible in Data foundation.
Currency Conversion
-------------------
Variable -
--------
Applies on the data after the whole data is fetched.
Goto Semantics - Variables / input parameters - Create Variable.
Selection type
- Single
- Interval
- Range
- Multiple Entries.
Default Value - Constant / Expression.
Input Parameters
----------------
Applies on the data like a where clause.
Parameter types
- Direct
- Column
- Derived from table.
- Static list.
Default Value - Constant / Expression.
Cascading Promts - Region (EMEA) acts as input for Country (DE FR UK)
You can place the input parameter in the filter as place holder which receives
the value during runtime.
Additional columns would be created for History enable tables, which can be seen in the table M_CS_ALL_Columns.
- $trex_udit$
- $Row_id$
- $Valid_from$
- $Valid_to$
Allow relational optimization: when you have complex calculations in your analytical views which tends to create subselects
and on top of olap catalouge column view one more
calculation column view in these circumstances to improve performance we check allow relatinal optimization.
Attribute View
PORTS
INDEX - 3XX15
XS - 80XX
HANA as Modelling Tool
-----------------------
Packages
Attribute & Measures
Information Views
-Attribute Views
-Analytical Views
-Calculation Views
Analytical Privileges
Stored Procedures
Decision Tables
Attribute - MD in BW or a Dimension
Measure - Measures or Quantified like KF in BW
- Column views
- EPM models
- EPM query sources
- Functions
- Indexes
- Procedures
- Sequences
- Synonyms
- Triggers
- Views
- Tables
-Content - All modelling objects
-Schema
-Attribute Views
-Analytical Views
-Calculation Views
-Analytical Privileges
-Procedures
-Decision Tables
Attribute Views can be used in Analytical views
Attribute Views can be used in calculation views
Analytical views can be used in calculation views
calculation views can be used in calculation views
Attribute views and Analytical views can be created with calculation views.
The same joins can be used in BO, however since BO is application layer
the joins performed in HANA would push the logic deep into HANA DB
Types of Attributes
--------------------
- Simple
- Calculated
- Local (remove reference to behave differently)
Types of Measures
-----------------
- Simple
- Calculated
- Restricted
- Counters
Atrribute views - Semantics - Datafoundation
Types
- Standard
- Time
- Derived (only desc can be changed) (Ex. Supplier & Biz Partner)
Hierarchies
----------
- Level based. - City / Country etc
- Parent Child based. - Employee / Manager
Other options include
Multiparent
Orphan nodes show under root node.
Data filled with Quick View - Fill time data input granularity Year / Month / Day.
Generate Concatenate attributes - in case of multiple keys ($MAT$Plant) new key would be created
can be checked in the table M_CS_ALL_Coloumns.
How to do simple reporting - using Analysis for OLAP which integrates with excel.
How to display Label - Go to Semantics and change the label column of the attribute.
- Text Join
- Referential Join
- Inner Join
- Left outer join
- Right outer Join.
If Complex join conditions are used two column views will be created
- OLAP Catalouge Column View.
- Calculation Cataluge Column View.
Calculation View
Everything that can be done by Attribute views and Analytical views can be done with CALC views.
Data Categories
- Cube - Measures / Reporting possible.
- Dimension - No Measures allowed
- Blank - Measures allowed but no reporting, can be used in another CV
Analytical View.
- Column store tables
- Attribute views
- Attribute View
- Analytical View
- Database table
- Column view
- Table UDF
- Join
- Union
- Projection
- Rank
- Aggregation
Graphical calculation view can be used in Script based view, Script based view can be used in graphical.
Joins such as Temporal and Referential joins can only be used in the star join created with Calculation view with data category
of cube with star join.
In calculation view with type dimension - joins such as inner , left outer, right outer, text j
If there are many joins execute the calculation view in SQL engine.
Create an attribute view of type time and use the same in calculation view in the join node and map the date from the output to
the time.
This would bring the hierarchies and other related time objects into your projection.
NULL behaviour ( IN SQL ENGINE OPERATIONS WILL NULL ARE ALWAYS NULL)
---------------------
1 + Null =1 NULL
1* NULL =1 NULL
UNION NODE
-----------
Union is used to join the data which was earlier split in Bw for scalability.
Ranking
--------
Options in Ranking
-----------------
Sorting Direction
- Ascending
- Descending
Threshold - 3 i.e TOP 3 in India, TOP 3 in AMerica etc.
The above can be made dynamic by IP
Partition by - Region.
You should create the columns which would create the structure of the VAR_OUT, you can take the help of the tables while
creating the columns.
If the table from which the data is being read is has got a different column name then use "as" in SQL Query
YOU CAN USE INPUT PARAMETERS WHERE EVER REQUIRED WITH PLACEHOLDERS $$REGION$$
HOWEVER USE "AS" SO THAT THE NAME OF THE COLUMN IN QUERY MATCHES WITH THE COLUMN
MENTIONED IN THE VAR_OUT STRUCTURE.
VAR_OUT = Select sum(" ") as Grossamount, sum(" ") as Netamount from "_sys_bic"." "."<View name>
(Placeholder."$$ipreg$$" => "EMEA"); or use an input parameter instead of emea i.e., :IPREG
If there is a requirement to run the procedure daily, you can make use of .xs job which can be used to run the procedure in
HANA native development.
You have a calculation view where you have to input MTD YTD MANUALLY,
If :< > is not null then ===CHECK FOR INPUTTED DATE NULL / NOT NULL
<> = :< >
then
Select to_date(:< >,"YYYYMMDD") INTO < > FROM DUMMY;
During this process Calculatin view gets converted as Table UDF, hence TUDF is also one of the sources of CV.
However the created CV cannot be activated from HANA Modeller, go to HANA Native development and activate it.
Hierarchies would only be accessible if the underlying attribute views are used in Starjoin.
What is Keep Flag:
------------------
If you have scenarios where you are doing exception aggregation, which might need reporting at a bottom level aggregation
and if the column defining
the granularity is not included in the result set this can cause an issue and hence all such columns have to be marked as keep
flag.
This is similar to calculate before aggregation in AV.
If aggregation is done only on prodcat which is sports the price would be aggregated and multiplied since price is specific to
each product.
Transparent filter
------------------
If queries containts filters that you have not choosen to project and if you have defined a counter on a CV which you want to
use in another calculation view
then you need to set transparent flag as true for such columns of all nodes that contains this column and for the node in
caculation view that countains the counter.
If we have sales person in (S1 S2 S3) and if you choose to not include sales person in projection 1 and if the same is sent to
aggregation note to the top this would
give wrong outpout. (refer Day 26 26:54)
Dynamic Join:
============
Dynamic join is used when join is played on two columns, such as region and country, if you woud like to calcuate % of
marks, like percentile, the value can change if
you are calculating on region or on country, if dynamic join is not selected calculation always happens on the lowest
granulaity.
If we set this join, aggregation happens before the join is played on and gives desired output.
optimize join
============
Join performance is improved if multiple columns are joined and if the cardinality is n:1 in left outer join, by doing so
additional data is removed while making the join.
Propagate to semantics: If you would like to dd a filed in the bottom node and instead of adding it to each and every node,
right click and say propagate to semantics,
which would add in all nodes.
Input parameters in calculation view
-------------------------------------
DIRECT
COLUMN
DERIVED FROM TABLE
STATIC LIST
DERIVED FROM PROCEDURE
-- create procedure " "." " (out result_country string);
Language SQLscript
SQL securityinvoker
Reads SQL data
AS
BEGIN
declare v_country sring;
declare v_region := "APJ";
Declare cursor cursor_tmp for
Select "country" from <>.<> where region = :v_region;
Open cursor_tmp;
fetch cursor_tmp into v_country;
result_country := v_country;
end;
Select the column against the QA and SAP and give "price" * 10 and press ALT+ENTER
you can include both the views in calculation view projections and create a comparitive analysis model.
This is used to make use of ECC extractors and load the data into HANA
If we have a customer who has got ECC with only BW component and got many data sources
in such scenarios we use DXC
Customer has got ECC and BW, few of the data sources to DXC and for few he would like to
send the data to hana.
some enntities where we do not have BW reporting and we would like to send the data into HANA
You can make your ECC exclusive for DXC or a mixed approach
se38 - sap_rsadmin_maintain
1 Private Views
2 Reuse Views
3 Query Views
Reuse Views = Multiple private views
Query Views = Multiple Reuse views
If the schema avilable in the r/3 is probschema where the models are available and in qty if it is qtyschema or schema in
hana live is dirrent from the schema we have in our system then do schema mapping so that the objects wuld come n save in
the local schema
The logic applies for multiple ecc systems consolidated into single ECC and having a BW on top of it.
Visualize Plan:
===============
Right click on SQL query and say visualise the plan, which would give the run time of the selected queries.
DAta base schemas and catalouge objects are not transported, only content is transported
only views are transported however the underlying data is not transported, the data in dev and qty can be different.
Content - packages are only transportable items in hana native transport. data base is only exported and imported.
Steps to do transportation
==========================
Assign the package to the delivery unit / or double click on the package and assign to DU
When the views are transported to the qty the underlying table maybe referring to a different schema.
hence the schema needs to be replaced.
How to manage:
HTTP://sapqbex800/sap/hana/login/xs/lm
System:
Transport tab.
Steps:
Route Name:
Target
Source
Type:Full / Delt
Transport : DU / Product transpport.
<select package>
Go to file
Export
- content
- catalouge objects
Catalouge objects
- 7qbex - add tables that you would like to export.
Go to target system
File - import
if you would like to change the the schema
modify the schema name in the files exported to your local workstation.
Usually in SLT the tables are already created in the target system, however if you have created some
tables manually and you would like to export them in adhoc, you use export and import
The views between DEV n QTY are transfered by hana native transprt.
where as the data in the tables is transferred between the systems using the data
provisioning techniques.
Create a package and in tha right click and say others - SAP HANA transport container.
Attach delivery unit.
Inner Join-
only the matching values would come, for example if telangana MD is inner joined with country sales,
Left Outer
Right outer
Referentical joins
Referential join is used when there is a certain link between two tables like header n item.
Text Joins
If multiple values are maintained for customers / products in different languages, this can result in duplcate
records in such situation we would go for text join.
Temporal Join
temporal column should be on the right hand side and date to and from should be from the left hand side.
Sapital joins: join based on the longitude and latitute join between gps enable objects is called sapital join.
Smart data Access
IN HANA
This would list all the adapters available in HANA, this is a list of general ODBC connections
using which we can connect to the HANA.
These include
Oracle
MSSQL
ODBC
BWAdapter
HANA adapter. DPA
File adapter. DPA
This would give the list of agents registered, using the adapters we can connect to the source system
and consume the tables as virtual tables into HANA
Source name :
Adapter name:MSSQL ECC Adapter
Source location: Index server.
Once the connection is established - You can see the tables in the provisioning folder in HANA
This is realtime view of the data as we are consuming the tables virtually.
ECC can be accessed by connecting to SQL server and read its tables.
HANA has already got File adapter which can be used to extract the flatfile.
from SP9 we have a concept called smart data quality and integration whihc can be done in realtime
and in batchmode where you can do transfrmations.
Steps
-----
Get into HANA native development.
Systems- SP9
- Content
- Schema
- Create package.
ONCE SDA installed HANA would also support ETL concepts aswell and this is still under development.
Sample questions
----------------
1 Why using both SLT and BODS
Real time anaysis is done based on the no perisheble products, based on foot fall where no of passengers enterning the malls
etcs.
Use BW as the ETL concept and using HANA only for modelling, if you want virtual tables to be consumed use SDA
not allowed to load HANA live on BW, but that was done on ECC, but hana live is on ecc and hence
they connected ECC HANA and BW HANA using SDA.
HANA is not a realtime databdase, but by making the use of SLT we can make it behave like a real time by bringing the data
into
hana in real time.
Transaction systems include CRM & APO, hcm or an 3rd party system.
SLT acts as middle ware ABAP server between the source and the target.
SLT can also push the data into PBW from latest versions.
Components of SLT
- READ engine.
- Write Engine.
- mapping and transformation engine.
Installation of SLT
KERNEL 7.0
ABAP 7.2
SLT
SAP ECC - SAP SLT is connected using the RFC connection, where as HANA as data base and it is connected using DB
connection.
To avoid multiple logins, HANA holds a data provisioning cockpit where you can operate the replication using SLT.
however actual replication is done by the SLT . THIS CAN ALSO BE DONE FROM SLT.
- VBAK
- VBAP
- VBUK
REPLICATION STEPS.
-----------------
1 All historical data is loaded into hana first - similar to init load in bw
2 once loading is done a data base trigger is created in the source system/
3 3 triggers would be created for insert update and delete.
4 Log tables are created - consists of primarykey of the table.
Background Mechanism
--------------------
As soon as the records are updated in the source tables the data base triggers gets fired
and the same is captured in the log tables, the same is pushed into HANA in real time
Data can be transformed such as filering additions calculatios etc in real time using the
SLT mapping and transformation engine.
For each SLT configuation there will be a schema created in HANA and all the tables replicated
using the configuation will be saved that schemma in HANA.
SLT server can be maintained in the ECC system as all the technical configuration need is preconfigured in ECC.
The configuation can be done between multiple ECC to single HANA or multiple ECC to multiple HANA.
if the same structred tables are sent to hana, if the data is similar then create a new column with sourcesystem
similar to compounding attribute in bw
SLT Configuration:
The new schema would be created in HANA with the same name as the connection.
How to replicate
-----------------
QUICK view - data provisioning - Click on replicate.
Other options inlcude.
LOAD - Brings only historical data
Replicate - Brings historical data and creates a log with a trigger in source. .
STOP replication - deletes the triggers and logs
Suspend - triggers and logs stays intact and data will be captured in ECC logs
Resume. - resumes the logs
Tabs include
AdministrationDATa - Processing Steps - Table overiew - data transfer monitor - application logs - load statistcs - experts.
In table overiew you can see the tables that are being replicated.
List triggers
--------------
Tcode: IUUC_Triggers
You will find jobs & Connections. / you can also start and stop the masterjob from here.
Transformations in SLT:
======================
Use cases
Conversion of Data
Filtering
Structural changes - Add or Remove columns while we transfer the data to hana.
Partitioning of the table in SLT while being transferred to HANA.
Tabs in LTRC
1. IUUC REPL TBSTG: Table settings, this would help in defining the type of target table and the no of columns
in that table
HERE YOU CAN ADD ADDITIONAL COLUMN OR CHANGE THE STRUCTURE IN THE TARGET
STRUCTURE (CLICK ON EDIT TABLE STRUCTURE).
Partition can be done here in the partition syntax: [ partition by hash (vbeln) partitions 4 ]
2. IUUC ASS RUL MAP: Assignment of rule mappings, if there is a new clumn that has been added or if you would like to
filter the data or perform additional transformation this tab is used
if you would like to implement 5 years, insert 5 rows each row is one rule.
list of events.
----------------
E/BOP - Begin of processing
E/BOT - Begin of Transaction
E/BOL - Begin of Loop
E/BOR - Begin of Record
You can choose to write a line of code which should not be more than 72 charecters or else you can use an include program.
Out of 6 columns you see in this tab 2 are for event rest 4 are to pass parameters.
Export filed is the value for which you would like to populate data
-----------
Ex:
If you are inserting the source as a new column, in export column mention [sourceid] and import p1 as ['ECC']
Now in the code line -- e_sourceid = i_p1.
You can also create an include program and use it for transformation .EX. SE38 program.
You can use If condition in the programs and can use inbuilt macros of SLT such as skip_record to filter data.
3. IUUC SPC PROCOPT is used to filter out the data at the source itself and this filter is for delta records only.
The similar filter can be applied here so that it would apply directly at database level.
4.IUUC PERF OPTION this is the place where the no of jobs required to perform the transformations are mentioned.
Other settings:
---------------
No_drop : data in HANA will not be deleted before replication
ROW_store: Row store table will be created instead of column
rd_portion_size: no of records per portion.
There is RSTABLE, any action performed for replication an entry would be created in the RStable similar to log tables in
Evian.
rs
within HANA there is a schema called Sys_repl which will have the table RS_REPL_COMPONENTS, Every step of
configuration of slt is seen in this table.
Used to replicate
NON SAP data of custom tables from ECC
Add delta capabilities where no standard extractor available.
Scenario 1.
-----------
SAP ECC (ANY) - SLT - BW backed by HANA
make use of transient providers / virtual providers and report in BW.
Minimum requirement is BW 7.3 and to get realtime data into BW there is no need to HANA.
Steps
----
Go to SLT
Create configuration
Source : ECC (RFC)
Target: ECC / HANA ( RFC / DB connect)
Goto BW.
Create a source system connections.
CONN_SLT_S
and provide the SLT server details, once it is connection it would give the ODP repllication
scenario name created in SLT.
create DSO
Create Transformation between DSO &
*** for ODP no need of IP and data can be extracted using DTP
on first instance DTP is full.
Convert this DTP to a real time DTP to extract the data in BW in real time.
ECC data gets posted - Trigger activates - log updates - Transformations LTRS - LTRC replicate / Load - pushes the data in to
BW
When you want to consume the data source into multiple data sources, the ODP comes into picture.
All the options are same however only the user interface is different.
In the new version of LTRS you can also export a database view aswell.
For that go to LTRS - Table settings - give view name and further input in struture template, this would create a new table
using the
view template as the structure.
We might encounter an issue while pushing the data into BW using SLT, in such case a badi needs to be switched on / off.
/nLTRC
Go to expert functions
Click on activate / deactivate badi implementation.