【02】Kafka主题的增、删、查
增: bin/kafka-topics.sh --create --topic flink_kafka --partitions 3 --replication-factor 2 --bootstrap-server node1:9092,node2:9092,node3:9092
删: bin/kafka-topics.sh --delete --topic bigdata01 --bootstrap-server node1:9092,node2:9092,node3:9092
查看Topic信息: bin/kafka-topics.sh --describe --topic bigdata01 --bootstrap-server node1:9092,node2:9092,node3:9092
列举Topic: bin/kafka-topics.sh --list -bootstrap-server node1:9092,node2:9092,node3:9092
========= MySQL ==========
【01】. 导入SQL文件数据
source /opt/insurance.sql
mysql -uroot -p --default-character-set=utf8mb4 insurance </opt/insurance/1_data_mysql/insurance.sql
【02】. 备份MySQL数据库到一个SQL文件:
mysqldump -uroot -p --databases insurance > /opt/insurance/1_data_mysql/insurance.sql
=【 Linux 】==
cat /etc/profile
dos2unix ./sqoop.sh
yum -y install dos2unix
yum -y install lrzsz
cat -v ./sqoop.sh
chmod +x ./sqoop.sh
nc -lk 9999
=【 集群启动命令 】==
【01】. 启动Hadoop集群服务(HDFS和Yarn):
/export/server/hadoop/sbin/start-all.sh
【02】. 启动Hive服务(hive metastore 和hiveserver2): (jdbc:hive2://node3:10000)
nohup hive --service metastore &
nohup hiveserver2 start &
【03】. 启动spark-thrifserver服务 (jdbc:hive2://node3:10001)
/export/server/spark/sbin/start-thriftserver.sh
--hiveconf hive.server2.thrift.port=10001
--hiveconf hive.server2.thrift.bind.host=node3
--master local[*]
套路4步:
// 定义变量 Connection conn = null; Statement stmt = null; ResultSet rs = null; //加载MySQL驱动 Class.forName("com.mysql.jdbc.Driver"); // 获得数据库连接 Connection conn = DriverManager.getConnection("jdbc:msql://127.0.0.1:3306/samsung","root","123456"); // 创建Statement\PreparedStatement对象 Statement stmt = conn.createStatement(); ps = conn.prepareStatement(sql); // 执行查询,获取数据集 ResultSet rs = stmt.executeQuery(sql); ResultSet rs = ps.executeQuery() // 关闭连接 rs.cloas