- 构建Hbase集群,请参考: Centos 下Hbase0.98.10-hadoop2 集群的配置
- 在Eclipse中创建Maven的工程
-
将集群的hbase-site.xml文件放到工程的classes目录下
-
配置操作系统的
C:\windows\system32\drivers\etc文件,将Hbase集群的IP以及域名配置到该文件中
192.168.40.108 hadoop108 192.168.40.148 hadoop148 192.168.40.104 hadoop104 192.168.40.107 hadoop107 192.168.40.105 hadoop105
-
编写Maven的pom.xml文件,依赖内容如下
<dependencies> <dependency> <groupId>org.apache.avro</groupId> <artifactId>avro</artifactId> <version>1.7.7</version> </dependency> <dependency> <groupId>org.apache.avro</groupId> <artifactId>avro-tools</artifactId> <version>1.7.7</version> </dependency> <dependency> <groupId>org.apache.avro</groupId> <artifactId>avro-maven-plugin</artifactId> <version>1.7.7</version> </dependency> <dependency> <groupId>org.apache.avro</groupId> <artifactId>avro-compiler</artifactId> <version>1.7.7</version> </dependency> <dependency> <groupId>org.apache.hbase</groupId> <artifactId>hbase-client</artifactId> <version>0.98.8-hadoop1</version> </dependency> <dependency> <groupId>org.apache.hbase</groupId> <artifactId>hbase</artifactId> <version>0.90.2</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-core</artifactId> <version>1.2.1</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope> </dependency> </dependencies>
-
编辑Java源码
package com.eric.hbase; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; public class BaseOperation { private static final String TABLE_NAME = "demo_table"; public static Configuration conf = null; public HTable table = null; public HBaseAdmin admin = null; static { conf = HBaseConfiguration.create(); System.out.println(conf.get("hbase.zookeeper.quorum")); } /** * 创建一张表 */ public static void creatTable(String tableName, String[] familys) throws Exception { HBaseAdmin admin = new HBaseAdmin(conf); if (admin.tableExists(tableName)) { System.out.println("table already exists!"); } else { HTableDescriptor tableDesc = new HTableDescriptor(tableName); for (int i = 0; i < familys.length; i++) { tableDesc.addFamily(new HColumnDescriptor(familys[i])); } admin.createTable(tableDesc); System.out.println("create table " + tableName + " ok."); } } /** * 删除表 */ public static void deleteTable(String tableName) throws Exception { try { HBaseAdmin admin = new HBaseAdmin(conf); admin.disableTable(tableName); admin.deleteTable(tableName); System.out.println("delete table " + tableName + " ok."); } catch (MasterNotRunningException e) { e.printStackTrace(); } catch (ZooKeeperConnectionException e) { e.printStackTrace(); } } /** * 插入一行记录 */ public static void addRecord(String tableName, String rowKey, String family, String qualifier, String value) throws Exception { try { HTable table = new HTable(conf, tableName); Put put = new Put(Bytes.toBytes(rowKey)); put.add(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes.toBytes(value)); table.put(put); System.out.println("insert recored " + rowKey + " to table " + tableName + " ok."); } catch (IOException e) { e.printStackTrace(); } } /** * 删除一行记录 */ public static void delRecord(String tableName, String rowKey) throws IOException { HTable table = new HTable(conf, tableName); List list = new ArrayList(); Delete del = new Delete(rowKey.getBytes()); list.add(del); table.delete(list); System.out.println("del recored " + rowKey + " ok."); } /** * 查找一行记录 */ public static void getOneRecord(String tableName, String rowKey) throws IOException { HTable table = new HTable(conf, tableName); Get get = new Get(rowKey.getBytes()); Result rs = table.get(get); for (KeyValue kv : rs.raw()) { System.out.print(new String(kv.getRow()) + " "); System.out.print(new String(kv.getFamily()) + ":"); System.out.print(new String(kv.getQualifier()) + " "); System.out.print(kv.getTimestamp() + " "); System.out.println(new String(kv.getValue())); } } /** * 显示所有数据 */ public static void getAllRecord(String tableName) { try { HTable table = new HTable(conf, tableName); Scan s = new Scan(); ResultScanner ss = table.getScanner(s); for (Result r : ss) { for (KeyValue kv : r.raw()) { System.out.print(new String(kv.getRow()) + " "); System.out.print(new String(kv.getFamily()) + ":"); System.out.print(new String(kv.getQualifier()) + " "); System.out.print(kv.getTimestamp() + " "); System.out.println(new String(kv.getValue())); } } } catch (IOException e) { e.printStackTrace(); } } public static void main(String[] agrs) { try { String tablename = "scores"; String[] familys = { "grade", "course" }; BaseOperation.creatTable(tablename, familys); // add record zkb BaseOperation.addRecord(tablename, "zkb", "grade", "", "5"); BaseOperation.addRecord(tablename, "zkb", "course", "", "90"); BaseOperation.addRecord(tablename, "zkb", "course", "math", "97"); BaseOperation.addRecord(tablename, "zkb", "course", "art", "87"); // add record baoniu BaseOperation.addRecord(tablename, "baoniu", "grade", "", "4"); BaseOperation .addRecord(tablename, "baoniu", "course", "math", "89"); System.out.println("===========get one record========"); BaseOperation.getOneRecord(tablename, "zkb"); System.out.println("===========show all record========"); BaseOperation.getAllRecord(tablename); System.out.println("===========del one record========"); BaseOperation.delRecord(tablename, "baoniu"); BaseOperation.getAllRecord(tablename); System.out.println("===========show all record========"); BaseOperation.getAllRecord(tablename); } catch (Exception e) { e.printStackTrace(); } } }
-
运行程序,输出如下:
hadoop107,hadoop108,hadoop104 log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory). log4j:WARN Please initialize the log4j system properly. log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info. table already exists! insert recored zkb to table scores ok. insert recored zkb to table scores ok. insert recored zkb to table scores ok. insert recored zkb to table scores ok. insert recored baoniu to table scores ok. insert recored baoniu to table scores ok. ===========get one record======== zkb course: 1425258910718 90 zkb course:art 1425258910727 87 zkb course:math 1425258910722 97 zkb grade: 1425258910705 5 ===========show all record======== baoniu course:math 1425258910734 89 baoniu grade: 1425258910730 4 zkb course: 1425258910718 90 zkb course:art 1425258910727 87 zkb course:math 1425258910722 97 zkb grade: 1425258910705 5 ===========del one record======== del recored baoniu ok. zkb course: 1425258910718 90 zkb course:art 1425258910727 87 zkb course:math 1425258910722 97 zkb grade: 1425258910705 5 ===========show all record======== zkb course: 1425258910718 90 zkb course:art 1425258910727 87 zkb course:math 1425258910722 97 zkb grade: 1425258910705 5
http://www.tuicool.com/articles/r6ZZBjU
http://xpenxpen.iteye.com/blog/2158922
http://www.cnblogs.com/ggjucheng/p/3381328.html
相关推荐
Hadoop2.2+Zookeeper3.4.5+HBase0.96集群环境搭建
HBASE的java版本的客户端,运行代码需要设定环境变量且打包成jar文件运行
jdk1.8.0_131、apache-zookeeper-3.8.0、hadoop-3.3.2、hbase-2.4.12 mysql5.7.38、mysql jdbc驱动mysql-connector-java-8.0.8-dmr-bin.jar、 apache-hive-3.1.3 2.本文软件均安装在自建的目录/export/server/下 ...
Hbase JAVA编程开发实验报告以及代码,1 按照“Hbase应用开发实验1.pdf”中给出的说明,完成相关相关实验及JAVA程序的编写、调试和运行,提交JAVA源程序(AccessObject.java, HBaseTester.java, User.java)及运行...
1、内容概要:Hadoop+Spark+Hive+HBase+Oozie+Kafka+Flume+Flink+Elasticsearch+Redash等大数据集群及组件搭建指南(详细搭建步骤+实践过程问题总结)。 2、适合人群:大数据运维、大数据相关技术及组件初学者。 3、...
最好用的开发api
基于SpringBoot + Kafka + Redis + InfluxDB + HBase + Grafana 的风控系统,项目经过严格测试,确保可以运行! 基于SpringBoot + Kafka + Redis + InfluxDB + HBase + Grafana 的风控系统,项目经过严格测试,确保...
本科毕业设计项目,基于spark streaming+flume+kafka+hbase的实时日志处理分析系统 基于spark streaming+flume+kafka+hbase的实时日志处理分析系统 本科毕业设计项目,基于spark streaming+flume+kafka+hbase的...
因为配置大数据的基础环境特别费事,因此这里搭建好了一份基础环境
可视化毕业设计:数据处理(MapReduce)+数据展示(hbase+java+echarts).zip
【Java毕业设计】可视化毕业设计:数据处理(MapReduce)+数据展示(hbase+java+echarts)
在eclipse中开发HBase程序的方法
该资源为java客户端连接hbase集群,在windows客户端配置hadoop环境所需要用到的工具类,有需自取
1、有多个工具可选择使用
基于Flume+Kafka+Hbase+Flink+FineBI的实时综合案例.txt基于Flume+Kafka+Hbase+Flink+FineBI的实时综合案例.txt基于Flume+Kafka+Hbase+Flink+FineBI的实时综合案例.txt基于Flume+Kafka+Hbase+Flink+FineBI的实时综合...
Hadoop2.6+HA+Zookeeper3.4.6+Hbase1.0.0 集群安装详细步骤
hbase客户端下载,可以直接连接插入、查看 hbase客户端下载,可以直接连接插入、查看 hbase客户端下载,可以直接连接插入、查看
项目以宜信贷风控系统实时数据采集系统为背景,主要描述了技术架构、核心技术难点及代码实现全过程,涉及技术包括但不限于:Kafka\zookeeper,Spark,SparkStreaming,HBase,实时访问技术,爬虫技术等
搭建Hadoop集群,并使用flume+kafka+storm+hbase实现日志抓取分析,使用一个主节点master、两个slave节点