- 接上一次单机方式的安装,开始进行分布式基于mysql存储方式的安装
- 启动mysql service mysql start
- 启动后使用root账户登录,在命令行下输入如下
CREATE USER 'hive'@'%' IDENTIFIED BY '123456';
CREATE USER 'hive'@'localhost' IDENTIFIED BY '123456';
GRANT ALL ON *.* TO 'hive'@'%' IDENTIFIED BY '123456';
GRANT ALL ON *.* TO 'hive'@'localhost' IDENTIFIED BY '123456';
flush privileges;
mysql -uhive -p123456
create database hiveDB DEFAULT CHARSET utf8;
<property>
<name>hive.exec.local.scratchdir</name>
<value>/opt/hive-2.1.1/iotmpdir/hive_exec_local_scratchdir/${user.name}</value>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/opt/hive-2.1.1/iotmpdir/hive_downloaded_resources_dir/${user.name}</value>
</property>
<property>
<name>hive.exec.parallel</name>
<value>true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://Master01:3306/hiveDB?characterEncoding=UTF-8</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/data/hive</value>
<description>location of default database for the warehouse</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/opt/hive-2.1.1/iotmpdir/hive_querylog_location/${user.name}</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/opt/hive-2.1.1/iotmpdir/hive_server2_logging_operation_log_location/${user.name}</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</description>
</property>
<property>
<name>hive.server2.thrift.port</name>
<value>10000</value>
</property>
<property>
<name>hive.server2.thrift.bind.host</name>
<value>Master01</value>
</property>
hadoop dfs -mkdir -p /data/hive
hadoop dfs -chmod 666 data
hadoop dfs -chmod 666 /data/hive
mkdir hive_downloaded_resources_dir
mkdir hive_server2_logging_operation_log_location
mkdir hive_exec_local_scratchdir
mkdir hive_querylog_location
分别设置它们的目录读写属性 chmod 666
/opt/hive-2.1.2/bin下执行 schematool -dbType mysql -initSchema
- 执行的时候,hive会在MySQL指定的数据库下创建很多表,直到最终提示 Success!
- hive即可启动 hive1,即CLI下的Hive
- beeline可启动客户端
启动hiveserver2, 命令:nohup hive --service hiveserver2 &
- 测试客户端是否可连接hive
beeline -u jdbc:hive2://Master01:10000 -n root
- 如果出现java.lang.RuntimeException: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.authorize.AuthorizationException): User root is not allowed to impersonate anonymous 错误,修改hadoop 配置文件 /opt/hadoop/core-site.xml,加入如下配置项
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
Hadoop.proxyuser.root.hosts配置项名称中root部分为报错User:* 中的用户名部分
- 最后测试 squrriel能否连接