C/C++教程

Flink流处理的resources文件

本文主要是介绍Flink流处理的resources文件,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

conf.properties

# mysql configuration
jdbc.driver=com.mysql.jdbc.Driver
jdbc.url=jdbc:mysql://node03:3306/vehicle_networking?characterEncoding=utf8&useSSL=false
jdbc.user=root
jdbc.password=123456
jdbc.dataSource.size=10

# hive configuration
hive.driver=org.apache.hive.jdbc.HiveDriver
hive.url=jdbc:hive2://node03:10000/itcast_ods
hive.user=root
hive.password=123456

# hdfsUri
hdfsUri=hdfs://node01:8020

#ZK configuration
zookeeper.clientPort=2181
zookeeper.quorum=node01,node02,node03

# kafka configuration
kafka.topic=vehicledata_copy
bootstrap.servers=node01:9092,node02:9092,node03:9092
key.partition.discovery.interval.millis=30000

# mongoDB configuration
mongo.host=node03
mongo.port=27017
mongo.userName=itcast
mongo.password=itcast
mongo.source=itcast

# redis configuration
redis.host=node03
redis.port=6379
redis.password=null
redis.database=0
redis.session.timeout=1000
redis.key.timeout=900

# vehinfo match sleep millionseconds
vehinfo.millionseconds=18000000
ruleinfo.millionseconds=100000
elerules.millionseconds=300000

#gaode API
gaode.key=f5b76cc61d3d1eaff717afd4bbf6c89d
gaode.address.url=https://restapi.amap.com/v3/geocode/regeo

core-site.xml

<?xml version="1.0" encoding="UTF-8"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!--

  Licensed under the Apache License, Version 2.0 (the "License");

  you may not use this file except in compliance with the License.

  You may obtain a copy of the License at



    http://www.apache.org/licenses/LICENSE-2.0



  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License. See accompanying LICENSE file.

-->



<!-- Put site-specific property overrides in this file. -->



<configuration>

    <property>

        <name>fs.default.name</name>

        <value>hdfs://node01:8020</value>

    </property>

    <property>

        <name>hadoop.tmp.dir</name>

        <value>/export/servers/hadoop-2.7.5/hadoopDatas/tempDatas</value>

    </property>

    <!--  缓冲区大小,实际工作中根据服务器性能动态调整 -->

    <property>

        <name>io.file.buffer.size</name>

        <value>4096</value>

    </property>

    <!--  开启hdfs的垃圾桶机制,删除掉的数据可以从垃圾桶中回收,单位分钟 -->

    <property>

        <name>fs.trash.interval</name>

        <value>10080</value>

    </property>

    <!--允许通过 httpfs 方式访问 hdfs 的主机名 -->

    <property>

        <name>hadoop.proxyuser.root.hosts</name>

        <value>*</value>

    </property>

    <!--允许通过 httpfs 方式访问 hdfs 的用户组 -->

    <property>

        <name>hadoop.proxyuser.root.groups</name>

        <value>*</value>

    </property>

    <property>

        <name>hadoop.proxyuser.hbase.hosts</name>

        <value>*</value>

    </property>

    <property>

        <name>hadoop.proxyuser.hbase.groups</name>

        <value>*</value>

    </property>

</configuration>


hbase-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-->
<configuration>
  <property>
    <name>hbase.rootdir</name>
    <value>hdfs://node01:8020/hbase</value>  
  </property>
  <property>
    <name>hbase.cluster.distributed</name>
    <value>true</value>
  </property>
  <!-- 0.98后的新变动,之前版本没有.port,默认端口为60000 -->
  <property>
    <name>hbase.master.port</name>
    <value>16000</value>
  </property>
  <property>
    <name>hbase.master.info.port</name>
    <value>60010</value>
  </property>
  <property>
    <name>hbase.zookeeper.quorum</name>
    <value>node01:2181,node02:2181,node03:2181</value>
  </property>
  <property>
    <name>hbase.zookeeper.property.dataDir</name>
    <value>/export/servers/zookeeper-3.4.9/zkdatas</value>
  </property>
  <property>
    <name>hbase.thrift.support.proxyuser</name>
    <value>true</value>
  </property>
  <property>
    <name>hbase.regionserver.thrift.http</name>
    <value>true</value>
  </property>
  <!-- add phoenix connect support config-->
  <property>
    <name>phoenix.schema.isNamespaceMappingEnabled</name>
    <value>true</value>
  </property>
  <property>
    <name>hbase.regionserver.wal.codec</name>
    <value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
  </property>  
  <property>
    <name>phoenix.schema.mapSystemTablesToNamespace</name>
    <value>true</value>
  </property>
</configuration>

hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!--

  Licensed under the Apache License, Version 2.0 (the "License");

  you may not use this file except in compliance with the License.

  You may obtain a copy of the License at



    http://www.apache.org/licenses/LICENSE-2.0



  Unless required by applicable law or agreed to in writing, software

  distributed under the License is distributed on an "AS IS" BASIS,

  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  See the License for the specific language governing permissions and

  limitations under the License. See accompanying LICENSE file.

-->



<!-- Put site-specific property overrides in this file. -->



<configuration>

    <property>

        <name>dfs.namenode.secondary.http-address</name>

        <value>node01:50090</value>

    </property>

    <!-- 指定namenode的访问地址和端口 -->

    <property>

        <name>dfs.namenode.http-address</name>

        <value>node01:50070</value>

    </property>

    <!-- 指定namenode元数据的存放位置 -->

    <property>

        <name>dfs.namenode.name.dir</name>

        <value>file:///export/servers/hadoop-2.7.5/hadoopDatas/namenodeDatas,file:///export/servers/hadoop-2.7.5/hadoopDatas/namenodeDatas2</value>

    </property>

    <!--  定义dataNode数据存储的节点位置,实际工作中,一般先确定磁盘的挂载目录,然后多个目录用,进行分割  -->

    <property>

        <name>dfs.datanode.data.dir</name>

        <value>file:///export/servers/hadoop-2.7.5/hadoopDatas/datanodeDatas,file:///export/servers/hadoop-2.7.5/hadoopDatas/datanodeDatas2</value>

    </property>

    <!-- 指定namenode日志文件的存放目录 -->

    <property>

        <name>dfs.namenode.edits.dir</name>

        <value>file:///export/servers/hadoop-2.7.5/hadoopDatas/nn/edits</value>

    </property>

    <property>

        <name>dfs.namenode.checkpoint.dir</name>

        <value>file:///export/servers/hadoop-2.7.5/hadoopDatas/snn/name</value>

    </property>

    <property>

        <name>dfs.namenode.checkpoint.edits.dir</name>

        <value>file:///export/servers/hadoop-2.7.5/hadoopDatas/dfs/snn/edits</value>

    </property>

    <!-- 文件切片的副本个数-->

    <property>

        <name>dfs.replication</name>

        <value>3</value>

    </property>

    <!-- 设置HDFS的文件权限-->

    <property>

        <name>dfs.permissions</name>

        <value>true</value>

    </property>

    <!-- 设置一个文件切片的大小:128M-->

    <property>

        <name>dfs.blocksize</name>

        <value>134217728</value>

    </property>

    <property>

       <name>dfs.client.read.shortcircuit</name>

       <value>true</value>

    </property>

    <property>

        <name>dfs.client.file-block-storage-locations.timeout.millis</name>

        <value>10000</value>

    </property>

    <property>

        <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>

        <value>true</value>

    </property>

    <property>

       <name>dfs.domain.socket.path</name>

       <value>/var/run/hdfs-sockets</value>

    </property>

    <property>

        <name>dfs.webhdfs.enabled</name>

        <value>true</value>

    </property>

    <property>

        <name>hbase.table.sanity.checks</name>

        <value>false</value>

    </property>

</configuration>


logback.xml

<?xml version="1.0" encoding="UTF-8"?>

<configuration>
    <!--http://logback.qos.ch/manual/configuration.html-->
    <!--设置日志输出到控制台-->
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <!-- encoder的默认实现类是ch.qos.logback.classic.encoder.PatternLayoutEncoder -->
        <encoder>
            <pattern>%date %level [%thread] %logger{10} [%file:%line] %msg%n</pattern>
        </encoder>
    </appender>
    <!-- name值可以是包名或具体的类名:该包(包括子包)下的类或该类将采用此logger -->
    <logger name="com.john.logging.b" level="INFO" additivity="false">
        <appender-ref ref="STDOUT" />
    </logger>

    <!--设置日志输出为文件-->
    <appender name="FILE" class="ch.qos.logback.core.FileAppender">
        <file>debug.log</file>
        <encoder>
            <pattern>%date %level [%thread] %logger{10} [%file:%line] %msg%n</pattern>
        </encoder>
    </appender>

    <!--日志输出级别-->
    <root level="ERROR">
        <appender-ref ref="STDOUT" />
        <appender-ref ref="FILE" />
    </root>
</configuration>
这篇关于Flink流处理的resources文件的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!