#在Kafka中创建好对应的kafka topic(已创建的topic,可忽略,避免重复创建)./kafka-topics.sh --zookeeper node3:2181,node4:2181,node5:2181 --create --topic KAFKA-DB-BUSSINESS-DATA --partitions 3 --replication-factor 3
./kafka-topics.sh --zookeeper node3:2181,node4:2181,node5:2181 --create --topic KAFKA-ODS-TOPIC --partitions 3 --replication-factor 3
./kafka-topics.sh --zookeeper node3:2181,node4:2181,node5:2181 --create --topic KAFKA-DIM-TOPIC --partitions 3 --replication-factor 3
./kafka-topics.sh --zookeeper node3:2181,node4:2181,node5:2181 --create --topic KAFKA-DWD-USER-LOGIN-TOPIC --partitions 3 --replication-factor 3
./kafka-topics.sh --zookeeper node3:2181,node4:2181,node5:2181 --create --topic KAFKA-DWS-USER-LOGIN-WIDE-TOPIC --partitions 3 --replication-factor 3
#启动maxwell[root@node3 ~]# cd /software/maxwell-1.28.2/bin[root@node3 bin]# maxwell --config ../config.properties
#在Hive中创建好需要的Iceberg各层的表add jar /software/hive-3.1.2/lib/iceberg-hive-runtime-0.12.1.jar;add jar /software/hive-3.1.2/lib/libfb303-0.9.3.jar;
CREATE TABLE ODS_MEMBER_INFO (id string,user_id string,member_growth_score string,member_level string,balance string,gmt_create string,gmt_modified string)STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' LOCATION 'hdfs://mycluster/lakehousedata/icebergdb/ODS_MEMBER_INFO/' TBLPROPERTIES ('iceberg.catalog'='location_based_table','write.metadata.delete-after-commit.enabled'= 'true','write.metadata.previous-versions-max' = '3');
CREATE TABLE ODS_MEMBER_ADDRESS (id string,user_id string,province string,city string,area string,address string,log string,lat string,phone_number string,consignee_name string,gmt_create string,gmt_modified string)STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' LOCATION 'hdfs://mycluster/lakehousedata/icebergdb/ODS_MEMBER_ADDRESS/' TBLPROPERTIES ('iceberg.catalog'='location_based_table','write.metadata.delete-after-commit.enabled'= 'true','write.metadata.previous-versions-max' = '3');
CREATE TABLE ODS_USER_LOGIN (id string,user_id string,ip string,login_tm string,logout_tm string)STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' LOCATION 'hdfs://mycluster/lakehousedata/icebergdb/ODS_USER_LOGIN/' TBLPROPERTIES ('iceberg.catalog'='location_based_table','write.metadata.delete-after-commit.enabled'= 'true','write.metadata.previous-versions-max' = '3');
CREATE TABLE DWD_USER_LOGIN (id string,user_id string,ip string,login_tm string,logout_tm string)STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' LOCATION 'hdfs://mycluster/lakehousedata/icebergdb/DWD_USER_LOGIN/' TBLPROPERTIES ('iceberg.catalog'='location_based_table','write.metadata.delete-after-commit.enabled'= 'true','write.metadata.previous-versions-max' = '3');
CREATE TABLE DWS_USER_LOGIN (user_id string,ip string,gmt_create string,login_tm string,logout_tm string,member_level string,province string,city string,area string,address string,member_points string,balance string,member_growth_score string)STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' LOCATION 'hdfs://mycluster/lakehousedata/icebergdb/DWS_USER_LOGIN/' TBLPROPERTIES ('iceberg.catalog'='location_based_table','write.metadata.delete-after-commit.enabled'= 'true','write.metadata.previous-versions-max' = '3');
#启动Clickhouse[root@node1 ~]# service clickhouse-server start
#在Clickhouse中创建好对应表create table dm_user_login_info( dt String, province String, city String, user_id String, login_tm String, gmt_create String) engine = MergeTree() order by dt;
评论