[HADOOP] HBase를 클라이언트 API는 HBase를 연결하지
HADOOPHBase를 클라이언트 API는 HBase를 연결하지
내 HBase를에 데이터를 삽입하려면이 링크를 따라하고있다. 나는 모든 단계를 따라 코드 아래 작성 :
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
public class Startclass {
private static Configuration conf = HBaseConfiguration.create();
public static void addRecord(String tableName, String rowKey,
String family, String qualifier, String value) throws Exception {
try {
HTable table = new HTable(conf, tableName);
Put put = new Put(Bytes.toBytes(rowKey));
put.add(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes
.toBytes(value));
table.put(put);
System.out.println("insert recored " + rowKey + " to table "
+ tableName + " ok.");
} catch (IOException e){
e.printStackTrace();
}
}
public static void main(String[] agrs) {
try {
String tablename = "hl7";
Startclass.addRecord(tablename, "zkb", "pd", "dob", "10121993");
Startclass.addRecord(tablename, "zkb", "pd", "id", "007");
Startclass.addRecord(tablename, "zkb", "obr", "id", "007");
Startclass.addRecord(tablename, "zkb", "obr", "testname", "healthec");
Startclass.addRecord(tablename, "abc", "pd", "dob", "02051993");
Startclass.addRecord(tablename, "abc", "pd", "id", "011");
Startclass.addRecord(tablename, "abc", "obr", "id", "011");
Startclass.addRecord(tablename, "abc", "obr", "testname", "matrix");
} catch (Exception e) {
e.printStackTrace();
}
}
}
하지만 난 그 결과 아래에 무엇입니까
16/11/18 16:55:04 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:zookeeper.version=3.4.5-cdh5.8.0--1, built on 06/16/2016 19:37 GMT
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:host.name=quickstart.cloudera
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:java.version=1.7.0_67
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:java.vendor=Oracle Corporation
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:java.home=/usr/java/jdk1.7.0_67-cloudera/jre
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:java.class.path=/home/cloudera/workspace/HbaseJav/bin:/home/cloudera/workspace/HbaseJav/conf:/usr/lib/hadoop/client/avro.jar:/usr/lib/hadoop/client/commons-beanutils-core.jar:/usr/lib/hadoop/client/commons-beanutils.jar:/usr/lib/hadoop/client/commons-cli.jar:/usr/lib/hadoop/client/commons-codec.jar:/usr/lib/hadoop/client/commons-collections.jar:/usr/lib/hadoop/client/commons-compress.jar:/usr/lib/hadoop/client/commons-configuration.jar:/usr/lib/hadoop/client/commons-digester.jar:/usr/lib/hadoop/client/commons-httpclient.jar:/usr/lib/hadoop/client/commons-io.jar:/usr/lib/hadoop/client/commons-lang.jar:/usr/lib/hadoop/client/commons-logging.jar:/usr/lib/hadoop/client/commons-math3.jar:/usr/lib/hadoop/client/commons-net.jar:/usr/lib/hadoop/client/guava.jar:/usr/lib/hadoop/client/hadoop-annotations.jar:/usr/lib/hadoop/client/hadoop-auth.jar:/usr/lib/hadoop/client/hadoop-common.jar:/usr/lib/hadoop/client/hadoop-hdfs.jar:/usr/lib/hadoop/client/hadoop-mapreduce-client-app.jar:/usr/lib/hadoop/client/hadoop-mapreduce-client-common.jar:/usr/lib/hadoop/client/hadoop-mapreduce-client-core.jar:/usr/lib/hadoop/client/hadoop-mapreduce-client-jobclient.jar:/usr/lib/hadoop/client/hadoop-mapreduce-client-shuffle.jar:/usr/lib/hadoop/client/hadoop-yarn-api.jar:/usr/lib/hadoop/client/hadoop-yarn-client.jar:/usr/lib/hadoop/client/hadoop-yarn-common.jar:/usr/lib/hadoop/client/hadoop-yarn-server-common.jar:/usr/lib/hadoop/client/htrace-core4.jar:/usr/lib/hadoop/client/jetty-util.jar:/usr/lib/hadoop/client/jsr305.jar:/usr/lib/hadoop/client/log4j.jar:/usr/lib/hadoop/client/paranamer.jar:/usr/lib/hadoop/client/protobuf-java.jar:/usr/lib/hadoop/client/slf4j-api.jar:/usr/lib/hadoop/client/snappy-java.jar:/usr/lib/hadoop/client/xmlenc.jar:/usr/lib/hadoop/client/xz.jar:/usr/lib/hadoop/client/zookeeper.jar:/home/cloudera/lib/mrunit-0.9.0-incubating-hadoop2.jar:/home/cloudera/lib/junit-4.11.jar:/home/cloudera/lib/hamcrest-all-1.1.jar:/home/cloudera/hbase-0.92.1.jar:/usr/lib/hbase/lib/commons-configuration-1.6.jar:/usr/lib/hbase/lib/commons-lang-2.6.jar:/usr/lib/hbase/lib/commons-logging-1.2.jar:/usr/lib/hbase/lib/log4j-1.2.17.jar:/usr/lib/hbase/lib/slf4j-api-1.7.5.jar:/usr/lib/hbase/lib/slf4j-log4j12.jar:/usr/lib/hbase/lib/zookeeper.jar:/usr/lib/hadoop-0.20-mapreduce/hadoop-core-2.6.0-mr1-cdh5.8.0.jar:/usr/lib/hadoop-0.20-mapreduce/hadoop-core-mr1.jar
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:java.compiler=<NA>
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:os.name=Linux
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:os.arch=amd64
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:os.version=2.6.32-573.el6.x86_64
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:user.name=cloudera
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:user.home=/home/cloudera
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Client environment:user.dir=/home/cloudera/workspace/HbaseJav
16/11/18 16:55:05 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=localhost:2181 sessionTimeout=180000 watcher=hconnection
16/11/18 16:55:05 INFO zookeeper.RecoverableZooKeeper: The identifier of this process is 6315@quickstart.cloudera
16/11/18 16:55:05 INFO zookeeper.ClientCnxn: Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)
16/11/18 16:55:05 INFO zookeeper.ClientCnxn: Socket connection established, initiating session, client: /127.0.0.1:41539, server: localhost/127.0.0.1:2181
16/11/18 16:55:05 INFO zookeeper.ClientCnxn: Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x15876ebae4a0138, negotiated timeout = 60000
또한 적절한 열 가족의 이름으로 HBase를 테이블을 만들었습니다. 나는 내가 뭘 잘못 알고하지 않습니다. 도와주세요.
해결법
-
==============================
1.당신은 명령을 실행 한 경우 'HL7', 'PD'을 만드는 'OBR' 이 코드를 실행하기 전에, 방법이 될 수 없다, 그것은 테이블에 데이터를 삽입하지 않습니다. 그러한 가능성이 통과 잘못된 CONFIGS 또는 호환되지 않는 HBase를 (설치) -dependency (프로젝트)의 경우입니다.
당신은 명령을 실행 한 경우 'HL7', 'PD'을 만드는 'OBR' 이 코드를 실행하기 전에, 방법이 될 수 없다, 그것은 테이블에 데이터를 삽입하지 않습니다. 그러한 가능성이 통과 잘못된 CONFIGS 또는 호환되지 않는 HBase를 (설치) -dependency (프로젝트)의 경우입니다.
from https://stackoverflow.com/questions/40676953/hbase-client-api-not-connecting-to-hbase by cc-by-sa and MIT license
'HADOOP' 카테고리의 다른 글
[HADOOP] 돼지 - XPATH를 사용하여 XML 데이터를 검색 (0) | 2019.09.23 |
---|---|
[HADOOP] 의사 분산 모드 하둡 (0) | 2019.09.23 |
[HADOOP] 하둡 새로운 API에 대한 클래스 캐스트 예외 (0) | 2019.09.23 |
[HADOOP] 오라클 로더를 사용하여 Oracle 테이블에 HBase와 데이터를 가져옵니다 (0) | 2019.09.23 |
[HADOOP] 1000 개 파일에서 문자열을 검색하고 각 파일 크기 1GB의입니다 (0) | 2019.09.23 |