Just Do IT !

Hive java API接口

字数统计: 977阅读时长: 5 min
2019/11/28 Share

实验环境

Hadoop2.8.0
Hbase-1.4.9
Centos7.2

pom.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<groupId>java_Hadoop</groupId>
<artifactId>java_Hadoop</artifactId>
<version>1.0-SNAPSHOT</version>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>6</source>
<target>6</target>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>2.3.0</version>
</dependency>
</dependencies>




</project>

Java代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
package Hive;

import Hdfs.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;

import java.io.File;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.util.Date;

public class Hive {

private static Configuration conf = new Configuration();
static {

conf.set("fs.defaultFS", "172.18.74.236:9000");

}
FileSystem fs = FileSystem.get(conf);

// 驱动,固定的
private static String driverName = "org.apache.hive.jdbc.HiveDriver";
// 默认就是10000端口,ip地址使用hive服务器的
private static String url = "jdbc:hive2://172.18.74.236:10000/default";
// hive连接的用户名和密码,默认就算是下面这两个
private static String user = "root";
private static String password = "输入你的密码";
public static String today = new SimpleDateFormat("yyyy-MM-dd").format(new Date());


// 公共使用的变量
private static Connection conn = null;
private static Statement stmt = null;
private static ResultSet rs = null;

public Hive() throws IOException {
}

// 加载驱动、创建连接
private static void init() throws Exception {
Class.forName(driverName);
conn = DriverManager.getConnection(url,user,password);
stmt = conn.createStatement();
}

// 释放资源
public static void destory() throws Exception {
if ( rs != null) {
rs.close();
}
if (stmt != null) {
stmt.close();
}
if (conn != null) {
conn.close();
}
}

// 主函数
public static void main(String[] args) throws Exception {
//建立连接
init();
//表名称
String Tablename = "Test";
//文件路径

String localFilePath = "E:\\Data.txt";
String hdfsFilePath = "/Test" +today.substring(0,7) + "/upload_date=" + today + "/";
File localfilepath = new File(localFilePath);

/**
* 1.查看hdfs中所有目录
* 2.创建文件夹
* 3.将本地文件上传到hdfs中
* 4.将hdfs的文件上传到hive表中
*/
// HdfsTest.getDiretoryFromHdfs("/");
// HdfsTest.mkdir(hdfsFilePath);
// HdfsTest.uploadFile(localFilePath,hdfsFilePath);
// HdfsTest.getDiretoryFromHdfs(hdfsFilePath);

// createTable(Tablename);
// showTables();
// loadData(hdfsFilePath+localfilepath.getName(),Tablename);
descTable(Tablename);

// selectData();
// countData();
// dropTable();
destory();



}

// 创建表
private static void createTable(String Tablename) throws Exception {
stmt.execute("drop table if exists " + Tablename );
String sql = "create table " + Tablename + " (id int, name string)";
stmt.execute(sql);
}

// 查询所有表
private static void showTables() throws Exception {
String sql = "show tables";
rs = stmt.executeQuery(sql);
while (rs.next()) {
System.out.println(rs.getString(1));
}
}

// 查看表结构
public static void descTable(String Tablename) throws Exception {
String sql = "desc "+Tablename+"";
rs = stmt.executeQuery(sql);
while (rs.next()) {
System.out.println(rs.getString(1) + "\t" + rs.getString(2));
}
}

// 加载数据
public static void loadData(String filePath,String Tablename) throws Exception {

String sql = "load data inpath '" + filePath + "' into table "+Tablename;
stmt.execute(sql);
}

// 查询数据
public static void selectData(String Tablename) throws Exception {
String sql = "select * from "+Tablename+"";
rs = stmt.executeQuery(sql);
while (rs.next()) {
System.out.println(rs.getString("foo") + "\t\t" + rs.getString("bar"));
}
}



// 删除数据库表
public static void dropTable(String Tablename) throws Exception {
String sql = "drop table if exists "+ Tablename;
stmt.execute(sql);
}

}

这里跟HDFS接口有所依赖,可以参考我这篇文章HDFS接口

CATALOG
  1. 1. 实验环境
  2. 2. pom.xml
  3. 3. Java代码