2. 编写程序代码org.apache.flink flink-streaming-java_${scala.version}1.10.1 ${project.build.scope} org.apache.flink flink-core1.10.1 org.apache.flink flink-connector-kafka-0.11_2.121.10.1
package com.demo; import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; import java.util.Properties; public class KafkaStringCosumer { public static void main(String[] args) throws Exception { //创建执行环境 StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); Properties props = new Properties(); // 指定kafka的主机地址和端口号 props.setProperty("bootstrap.servers", "localhost:9092"); props.setProperty("group.id", "flink-group"); props.setProperty("auto.offset.reset", "latest"); FlinkKafkaConsumer0103. 启动kafka测试环境consumer = new FlinkKafkaConsumer010<>("flink-topic", new SimpleStringSchema(), props); DataStream dataStream = env.addSource(consumer); dataStream.print(); env.execute(); } }
1. 启动zookeeper。
2. 启动kafka服务。
.binwindowskafka-server-start.bat .configserver.properties
3. 创建主题
.binwindowskafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic flink-topic
4. 启动生产者
.binwindowskafka-console-producer.bat --broker-list localhost:9092 --topic flink-topic
5. 运行程序,生产者输入信息,在控制台查看接收到的信息输出。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)