1.1、创建用户,配置免密
useradd hadoop;echo "Hadoop#149" | passwd --stdin hadoop#配置sudo免密sed -i '$ahadoop ALL=(ALL) NOPASSWD: NOPASSWD: ALL' /etc/sudoerssed -i 's/Defaults requirett/#Defaults requirett/g' /etc/sudoers
1.2、ssh免密配置
#切换到部署用户并配置ssh本机免密登录su hadoop;ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsacat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keyschmod 600 ~/.ssh/authorized_keys
1.3、关闭防火墙 firewalld 和 Selinux
#关闭防火墙:systemctl stop firewalldsystemctl status firewalldsystemctl disable firewalld#修改selinux的enforcing为disabled:sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config#查看selinux状态:sestatus
1.4、数据库初始化
在MySQL数据库中:set global validate_password_policy=0;set global validate_password_length=1;CREATE DATAbase dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;CREATE USER ds @'%' IDENTIFIED BY 'Changxin*8';GRANT ALL PRIVILEGES ON dolphinscheduler.* TO 'ds'@'%' IDENTIFIED BY 'Changxin*8';GRANT ALL PRIVILEGES ON dolphinscheduler.* TO 'ds'@'localhost' IDENTIFIED BY 'Changxin*8';flush privileges;
2、下载wget https://mirrors.tuna.tsinghua.edu.cn/apache/dolphinscheduler/2.0.3/apache-dolphinscheduler-2.0.3-bin.tar.gz --no-check-certificate
3、部署3.1、创建目录并赋权
#创建目录mkdir -p /opt/dolphin#赋权chown -R hadoop:hadoop /opt/dolphincd /opt/dolphintar -zxvf apache-dolphinscheduler-2.0.3-bin.tar.gz -C /opt/dolphin#修改目录权限,使得部署用户对dolphin-backend目录有操作权限chown -R hadoop:hadoop apache-dolphinscheduler-2.0.3-bin
3.2、修改配置 application-mysql.yaml
spring: datasource: driver-class-name: com.mysql.cj.jdbc.Driver url: jdbc:mysql://node03:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 username: root password: Changxin*8 hikari: connection-test-query: select 1 minimum-idle: 5 auto-commit: true validation-timeout: 3000 pool-name: DolphinScheduler maximum-pool-size: 50 connection-timeout: 30000 idle-timeout: 600000 leak-detection-threshold: 0 initialization-fail-timeout: 1
3.3、添加MySQL8驱动
#手动添加 [ mysql-connector-java 驱动 jar ] 包到 lib 目录下mysql-connector-java-8.0.27.jar
3.4、配置conf/config/install_config.conf
# ---------------------------------------------------------# INSTALL MACHINE# ---------------------------------------------------------# A comma separated list of machine hostname or IP would be installed DolphinScheduler,# including master, worker, api, alert、If you want to deploy in pseudo-distributed# mode, just write a pseudo-distributed hostname# Example for hostnames: ips="ds1,ds2,ds3,ds4,ds5", Example for IPs: ips="192.168.8.1,192.168.8.2,192.168.8.3,192.168.8.4,192.168.8.5"ips="node03"# Port of SSH protocol, default value is 22、For now we only support same port in all `ips` machine# modify it if you use different ssh portsshPort="22"# A comma separated list of machine hostname or IP would be installed Master server, it# must be a subset of configuration `ips`.# Example for hostnames: masters="ds1,ds2", Example for IPs: masters="192.168.8.1,192.168.8.2"masters="node03"# A comma separated list of machine
3.5、env/dolphinscheduler_env.sh
export HADOOP_HOME=/opt/module/hadoop-3.1.4export HADOOP_CONF_DIR=/opt/module/hadoop-3.1.4/etc/hadoop#export SPARK_HOME1=/opt/soft/spark1export SPARK_HOME2=/opt/module/spark-3.1.2export PYTHON_HOME=/usr/bin/pythonexport JAVA_HOME=/usr/java/jdk1.8.0_202-amd64export HIVE_HOME=/opt/module/hive-3.1.2export Flink_HOME=/opt/module/flink-1.13.5export DATAX_HOME=/opt/module/dataxexport PATH=$HADOOP_HOME/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$Flink_HOME/bin:$DATAX_HOME/bin:$PATH
3.5、修改配置common.properties
# user data local directory path, please make sure the directory exists and have read write permissionsdata.basedir.path=/opt/module/dolphinscheduler/tmp# resource storage type: HDFS, S3, NONEresource.storage.type=HDFS# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions、"/dolphinscheduler" is recommendedresource.upload.path=/dolphinscheduler# whether to startup kerberoshadoop.security.authentication.startup.state=false# java.security.krb5.conf pathjava.security.krb5.conf.path=/opt/module/dolphinscheduler/conf/krb5.conf# login user from keytab usernamelogin.user.keytab.username=hdfs-mycluster@ESZ.COM# login user from keytab pathlogin.user.keytab.path=/opt/module/dolphinscheduler/conf/hdfs.headless.keytab# kerberos expire time, the unit is hourkerberos.expire.time=2# resource view suffixs#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js# if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root pathhdfs.root.user=hadoop# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dirfs.defaultFS=hdfs://node03:9000# if resource.storage.type=S3, s3 endpointfs.s3a.endpoint=http://192.168.xx.xx:9010# if resource.storage.type=S3, s3 access keyfs.s3a.access.key=xxxxxxxxxx# if resource.storage.type=S3, s3 secret keyfs.s3a.secret.key=xxxxxxxxxx# resourcemanager port, the default value is 8088 if not specifiedresource.manager.httpaddress.port=9088# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value emptyyarn.resourcemanager.ha.rm.ids=node03# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostnameyarn.application.status.address=http://node03:%s/ws/v1/cluster/apps/%s# job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)yarn.job.history.status.address=http://node03:19888/ws/v1/history/mapreduce/jobs/%s# datasource encryption enabledatasource.encryption.enable=false# datasource encryption saltdatasource.encryption.salt=!@#$%^&*# Whether hive SQL is executed in the same sessionsupport.hive.oneSession=false# use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissionssudo.enable=true# network interface preferred like eth0, default: empty#dolphin.scheduler.network.interface.preferred=# network IP gets priority, default: inner outer#dolphin.scheduler.network.priority.strategy=default# system env path#dolphinscheduler.env.path=env/dolphinscheduler_env.sh# development statedevelopment.state=false#datasource.plugin.dir configdatasource.plugin.dir=lib/plugin/datasource
3.6、执行script 目录下的创建表及导入基础数据脚本
#日志最后一行出现 create DolphinScheduler success 表示引入脚本成功 检查创建的元数据库里有没有生成对应的表sh script/create-dolphinscheduler.sh
3.7、修改bin/dolphinscheduler-daemon.sh
if [ "$command" = "api-server" ]; then LOG_FILE="-Dlogging.config=classpath:logback-api.xml" CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $API_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},api,${DATAbase_TYPE}"elif [ "$command" = "master-server" ]; then LOG_FILE="-Dlogging.config=classpath:logback-master.xml" CLASS=org.apache.dolphinscheduler.server.master.MasterServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $MASTER_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},master,${DATAbase_TYPE}"elif [ "$command" = "worker-server" ]; then LOG_FILE="-Dlogging.config=classpath:logback-worker.xml" CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $WORKER_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},worker,${DATAbase_TYPE}"elif [ "$command" = "alert-server" ]; then LOG_FILE="-Dlogback.configurationFile=conf/logback-alert.xml" CLASS=org.apache.dolphinscheduler.alert.alertServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $alert_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},alert,${DATAbase_TYPE}"elif [ "$command" = "logger-server" ]; then CLASS=org.apache.dolphinscheduler.server.log.LoggerServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $LOGGER_SERVER_OPTS"elif [ "$command" = "standalone-server" ]; then CLASS=org.apache.dolphinscheduler.server.StandaloneServer export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},standalone,${DATAbase_TYPE}"elif [ "$command" = "python-gateway-server" ]; then CLASS=org.apache.dolphinscheduler.server.PythonGatewayServer export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},python-gateway,${DATAbase_TYPE}"else echo "Error: No command named '$command' was found." exit 1fi
3.8、修改script/dolphinscheduler-daemon.sh
if [ "$command" = "api-server" ]; then LOG_FILE="-Dlogging.config=classpath:logback-api.xml" CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $API_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},api,${DATAbase_TYPE}"elif [ "$command" = "master-server" ]; then LOG_FILE="-Dlogging.config=classpath:logback-master.xml" CLASS=org.apache.dolphinscheduler.server.master.MasterServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $MASTER_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},master,${DATAbase_TYPE}"elif [ "$command" = "worker-server" ]; then LOG_FILE="-Dlogging.config=classpath:logback-worker.xml" CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $WORKER_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},worker,${DATAbase_TYPE}"elif [ "$command" = "alert-server" ]; then LOG_FILE="-Dlogback.configurationFile=conf/logback-alert.xml" CLASS=org.apache.dolphinscheduler.alert.alertServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $alert_SERVER_OPTS" export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},alert,${DATAbase_TYPE}"elif [ "$command" = "logger-server" ]; then CLASS=org.apache.dolphinscheduler.server.log.LoggerServer HEAP_OPTS="-Xms1g -Xmx1g -Xmn512m" export DOLPHINSCHEDULER_OPTS="$HEAP_OPTS $DOLPHINSCHEDULER_OPTS $LOGGER_SERVER_OPTS"elif [ "$command" = "standalone-server" ]; then CLASS=org.apache.dolphinscheduler.server.StandaloneServer export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},standalone,${DATAbase_TYPE}"elif [ "$command" = "python-gateway-server" ]; then CLASS=org.apache.dolphinscheduler.server.PythonGatewayServer export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE},python-gateway,${DATAbase_TYPE}"else echo "Error: No command named '$command' was found." exit 1fi
3.9、切换到部署用户dolphinscheduler,然后执行一键部署脚本
#su - hadoopsh install.sh
3.10、 启动的服务
ApiApplicationServerWorkerServerLoggerServer alertServerMasterServer
3.11、启停命令
# 一键停止集群所有服务sh ./bin/stop-all.sh# 一键开启集群所有服务sh ./bin/start-all.sh# 启停 Mastersh ./bin/dolphinscheduler-daemon.sh stop master-serversh ./bin/dolphinscheduler-daemon.sh start master-server# 启停 Workersh ./bin/dolphinscheduler-daemon.sh start worker-serversh ./bin/dolphinscheduler-daemon.sh stop worker-server# 启停 Apish ./bin/dolphinscheduler-daemon.sh start api-serversh ./bin/dolphinscheduler-daemon.sh stop api-server# 启停 Loggersh ./bin/dolphinscheduler-daemon.sh start logger-serversh ./bin/dolphinscheduler-daemon.sh stop logger-server# 启停 alertsh ./bin/dolphinscheduler-daemon.sh start alert-serversh ./bin/dolphinscheduler-daemon.sh stop alert-server
4、测试4.1、WebUI
访问前端页面地址,出现前端登录页面 主机名是部署了ApiApplicationServer 的机器http://node03:12345/dolphinscheduler默认用户名密码:admin/dolphinscheduler123
4.2、数据中心
4.3、资源中心
4.4、函数管理-UDF函数
4.5、Hive-UDF函数测试
4.6、查看运行日志