본문 바로가기
IT 지식/빅데이터 & 분석

JAVA-HIVE/HBASE 간 통신_HBase 통신

by 이민우 2023. 1. 22.
728x90
반응형

HBase는 Hadoop에 데이터를 저장하는 일종의 NoSQL DB라고 볼 수 있다.

 

이전 포스팅인 Hive의 경우, 기존 RDB와 DDL을 제외하면 대부분이 비슷했었지만, HBase는 HBase만의 문법이 존재했고, 이는 Java Application에도 적용되었다.

 

  • CREATE - CREATE
  • ALTER - ALTER
  • DROP - DROP
  • INSERT - PUT
  • UPDATE - PUT
  • DELETE - DELETE
  • SELECT - GET/SCAN

현 포스팅에서는 MyBatis가 아닌 HBase 자체 문법을 사용해 HBase와 통신하는 방법을 다룰 것이다.

 

또한 각 칼럼이 칼럼패밀리-칼럼퀄리파이어로 구성되는데, 지금 포스팅에서는 칼럼 = 칼럼패밀리 = 칼럼퀄리파이어로 구성해서 진행할 예정이다.

 

 

먼저 HBase에 연동하기 위한 Dependency를 추가한다. Hive와 마찬가지로 설치한 HBase와 버전이 일치해야 함을 유의한다.

<dependency>
    <groupId>org.apache.hbase</groupId>
    <artifactId>hbase-client</artifactId>
    <version>2.4.15</version>
</dependency>

 

 

그리고 application.yml에 주키퍼 연동 부분을 추가한다. 이 부분은 설치한 HBase의 설정파일인 hbase-site.xml을 가져와서 해당 파일을 읽게해도 되지만, 테스트 환경에서는 주키퍼의 ip와 port만 필요하기에 해당 부분만 직접 환경변수로 입력했다.

hbase: 
  zookeeper: 
    quorum: 192.168.0.10
    port: 2181

 

 

다음으로 각 클래스 파일들을 생성한다. 직전 포스팅에서 작성한 Hive 어플리케이션에 덮어씌웠으며, 구성은 다음과 같다.

 

이제 각 클래스 파일을 생성한다. 각 코드에 대한 설명은 마찬가지로 주석에 달아놓았다.

 

HBaseConfig.java

package com.example.demo.config;

import org.apache.hadoop.hbase.HBaseConfiguration;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.transaction.annotation.EnableTransactionManagement;

/*
 * HBase 연결 설정 파일.
 * Zookeeper를 통해 HBase에 연동되므로, ZK의 IP와 PORT 정보가 필요함.
 * (hbase.zookeeper.quorum : ip or host | hbase.zookeeper.port : port)
 */
@Configuration
@EnableTransactionManagement
public class HBaseConfig {
	@Value("${hbase.zookeeper.quorum}")
	private String hbaseZookeeperQuorum;
	@Value("${hbase.zookeeper.port}")
	private String hbaseZookeeperPort;
	
	@Bean
	@Qualifier("hbaseConfig")
	public org.apache.hadoop.conf.Configuration hbaseConfig() {
		org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create();
		
		config.set("hbase.zookeeper.quorum", hbaseZookeeperQuorum);
		config.set("hbase.zookeeper.property.clientPort", hbaseZookeeperPort);
		
		return config;
	}
}

 

HBaseDao.java

package com.example.demo.hbase.dao;

import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.ColumnValueFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.CompareOperator;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Repository;

import com.example.demo.Commons;

/*
 * HBase에 연동되어 데이터를 직접 주고받는 클래스
 * 참고 : https://www.tutorialspoint.com/hbase/hbase_read_data.htm
 * 참고 : https://www.baeldung.com/hbase
 * 참고 : https://docs.cloudera.com/runtime/7.2.10/accessing-hbase/topics/hbase-use-the-java-api.html
 */
@Repository
public class HBaseDao {

	@Autowired
	@Qualifier("hbaseConfig")
	private Configuration config;
	
	/**
	 * 공통 함수 : HBase 연결
	 */
	private Connection connectHbase() throws IOException {
		return ConnectionFactory.createConnection(config);
	}
	/**
	 * 공통 함수 : HBase에서 admin으로 수행 (DDL은 admin으로 수행해야 함.)
	 */
	private Admin getAdmin(Connection connection) throws IOException {
		return connection.getAdmin();
	}
	/**
	 * 공통 함수 : HBase 연결 끊기
	 */
	private void disconnectHbase(Table table, Admin admin, Connection connection) throws IOException {
		if(table != null) table.close();
		if(admin != null) admin.close();
		
		connection.close();
	}
	
	// DDL
	/**
	 * 테이블 생성
	 * 
	 * @param queryConditions
	 * 	-tableName (String) : 생성할 테이블명
	 *  -columnsInfo (List<String>) : 생성할 컬럼명
	 *  
	 */
	public void createTable(Map<String, Object> queryConditions) throws IOException {
		Connection connection = connectHbase();
		Admin admin = getAdmin(connection);
		
		TableName tableName = TableName.valueOf(queryConditions.get(Commons.TABLE_NAME).toString());
		
		List<String> columns = (List<String>) queryConditions.get(Commons.COLUMNS_INFO);
		List<ColumnFamilyDescriptor> columnDescList = new LinkedList<>();
		
		// ColumnFamily : Column 을  1대1 매핑으로 처리해서 ADD/DROP COLUMN 적용
		// 테스트 상황이라 data별로 그룹핑을 할 게 아니라 1대1 매핑으로 해도 될듯함.
		for(String column : columns) {
			columnDescList.add(ColumnFamilyDescriptorBuilder.of(column));
		}
		
		// 생성한 컬럼들을 테이블에 생성
		TableDescriptor desc = 
				TableDescriptorBuilder.newBuilder(tableName)
				.setColumnFamilies(columnDescList)
				.build();
		
		admin.createTable(desc);
		
		disconnectHbase(null, admin, connection);
	}
	/**
	 * 테이블 수정
	 * 
	 * @param queryConditions
	 * 	-tableName (String) : 수정할 테이블명
	 * 	-addColumnsInfo (List<String>) : 추가될 컬럼 목록
	 *  -dropColumnsInfo (List<String>) : 삭제될 컬럼 목록
	 *  
	 */
	public void updateTable(Map<String, Object> queryConditions) throws IOException {
		Connection connection = connectHbase();
		Admin admin = getAdmin(connection);
		
		TableName tableName = TableName.valueOf(queryConditions.get(Commons.TABLE_NAME).toString());
		
		// NOSQL이기에 MODIFY COLUMN은 제외 (애초에 정해진 컬럼 설정이 없음.)
		
		List<String> addColumnList = (List<String>) queryConditions.get(Commons.ADD_COLUMNS_INFO);
		List<String> dropColumnlist = (List<String>) queryConditions.get(Commons.DROP_COLUMNS_INFO);
		
		for(String column : addColumnList) {
			admin.addColumnFamily(tableName, ColumnFamilyDescriptorBuilder.of(column));
		}
		for(String column : dropColumnlist) {
			admin.deleteColumnFamily(tableName, column.getBytes());
		}

		disconnectHbase(null, admin, connection);
	}
	/**
	 * 테이블 삭제
	 * 
	 * @param tableName
	 * 	생성할 테이블명
	 *  
	 */
	public void dropTable(String table) throws IOException {
		Connection connection = connectHbase();
		Admin admin = getAdmin(connection);
		
		TableName tableName = TableName.valueOf(table);
		
		// 이렇게 안하고 존재하지 않는 테이블에 명령 입력 시 TableNotFoundException 발생 
		// TODO: DROP 뿐 아니라 CREATE을 제외한 모든 함수(DDL, DML)에 데이터 입출력 전에 사용되어야 하는데, 일단 빠른 작성을 위해 배제함.
		if(admin.tableExists(tableName)) {
			// 선 disable 후 drop 하지 않으면 TableNotDisabledException 발생
			admin.disableTable(tableName);
			admin.deleteTable(tableName);
		}
		
		disconnectHbase(null, admin, connection);
	}
	
	// DML
	/**
	 * 데이터 입력
	 * 
	 * @param queryConditions: 
	 * 	-tableName (String) : 테이블명
	 *  -hbasePk (String) : 테이블의 PK로 사용될 칼럼의 이름 (ex: col01)
	 *  -dataInfo (List<Map<String, Object>>) : 입력할 데이터
	 *  	-key : 컬럼명 (ex: col01)
	 *  	-value : 값 (ex: '001')
	 *  
	 */
	public void insertData(Map<String, Object> queryConditions) throws IOException {
		Connection connection = connectHbase();
		
		TableName tableName = TableName.valueOf(queryConditions.get(Commons.TABLE_NAME).toString());
		
		Table table = connection.getTable(tableName);
		List<Map<String, Object>> insertDataList = (List<Map<String, Object>>) queryConditions.get(Commons.DATA_INFO);
		String pkName = queryConditions.get(Commons.HBASE_PK).toString(); // HBase ddl은 pk를 먼저 설정해주고 해당 pk를 가진 row에서 동작하기에 pk를 따로 빼둠.
		
		for(Map<String, Object> insertData : insertDataList) {
			
			// PK 지정
			Put put = new Put(insertData.get(pkName).toString().getBytes());
			
			Iterator<String> keySet = insertData.keySet().iterator();
			while(keySet.hasNext()) {
				String key = keySet.next();
				Object value = insertData.get(key);
				
				put.addColumn(key.getBytes(), key.getBytes(), value.toString().getBytes());
			}
			
			table.put(put);
		}

		disconnectHbase(table, null, connection);
	}
	/**
	 * 데이터 수정
	 * 
	 * @param queryConditions: 
	 * 	-tableName (String) : 테이블명
	 * 	-hbasePk (String) : 테이블의 PK로 사용될 값의 이름
	 * 	-dataInfo (Map<String, Object>) : 신규 데이터
	 * 		-key : 컬럼명 (ex: col02)
	 * 		-value : 값 (ex: '001')
	 * 	-where (Map<String, Object>) : WHERE에 들어갈 값.
	 */
	public void updateData(Map<String, Object> queryConditions) throws IOException {
		Connection connection = connectHbase();
		
		String pkName = queryConditions.get(Commons.HBASE_PK).toString();
		TableName tableName = TableName.valueOf(queryConditions.get(Commons.TABLE_NAME).toString());
		Map<String, Object> updateData = (Map<String, Object>) queryConditions.get(Commons.UPDATE_DATA_INFO);
		Table table = connection.getTable(tableName);

		// 조건부 쿼리문은 scan 사용 | 특정 row 출력은 get(rowId)
		// 참고 : 특정 row만 조회하고 싶으면  scan.addFamily(byte[]) 사용
		Scan scan = new Scan();
		
		if(queryConditions.containsKey(Commons.WHERE) && queryConditions.get(Commons.WHERE) != null) {
			Map<String, Object> where = (Map<String, Object>) queryConditions.get(Commons.WHERE);
			Iterator<String> keys = where.keySet().iterator();
			
			// WHERE 조건 적용 (FILTER)
			while(keys.hasNext()) {
				String key = keys.next();
				Object value = where.get(key);
				
				// 일단 테스트 용이므로 == 만 취급.
				// ColumnValueFilter 사용 시 SELECT {column} 처럼 지정한 칼럼만 가져다줌. ( key.getBytes )
                
                // 만약 조건문이 여러개라면 org.apache.hadoop.hbase.filter.FilterList를 선언하고,
                // FilterList.Operator.MUST_PASS_ALL 등을 지정한 후
                // 여러 개의 Filter를 FilterList에 addFilter 해주고 scanner에 setFilter로 지정해주면 된다.
				Filter filter = new ColumnValueFilter(
							key.getBytes(),
							key.getBytes(),
							CompareOperator.EQUAL, 
							value.toString().getBytes()
						);
				
				scan.setFilter(filter);
			}
			
			ResultScanner scanner = table.getScanner(scan);
			
			for(Result result : scanner) {
				
				// PK 지정해서 PUT 연산 수행
				Put put = new Put(result.getRow());
					
				Iterator<String> keySet = updateData.keySet().iterator();
				while(keySet.hasNext()) {
					String key = keySet.next();
					Object value = updateData.get(key);
					
					put.addColumn(key.getBytes(), key.getBytes(), value.toString().getBytes());
				}
				
				table.put(put);
			}
			
			scanner.close();
		}
		else {
			// 조회 조건이 없으면 PK기반 수정
			// PK 지정
			if(updateData.containsKey(pkName) && updateData.get(pkName) != null) {
				Put put = new Put(updateData.get(pkName).toString().getBytes());
				
				Iterator<String> keySet = updateData.keySet().iterator();
				while(keySet.hasNext()) {
					String key = keySet.next();
					Object value = updateData.get(key);
					
					put.addColumn(key.getBytes(), key.getBytes(), value.toString().getBytes());
				}
				
				table.put(put);
			}
		}
		
		disconnectHbase(table, null, connection);
	}	
	/**
	 * 데이터 삭제
	 * 
	 * @param queryConditions: 
	 * 	-tableName (String) : 테이블명
	 *  -hbasePk (List<String>) : 삭제할 데이터의 PK 목록
	 *  
	 *  조건문에 의한 쿼리를 원할 경우 UPDATE처럼 SCAN후 적절한 항목의 ROWID를 뽑아내서 DELETE하면 됨.
	 */
	public void deleteData(Map<String, Object> queryConditions) throws IOException {
		Connection connection = connectHbase();

		TableName tableName = TableName.valueOf(queryConditions.get(Commons.TABLE_NAME).toString());
		
		Table table = connection.getTable(tableName);
		List<String> rowPks = (List<String>) queryConditions.get(Commons.HBASE_PK);
		List<Delete> deleteList = new LinkedList<>();
		
		// 삭제할 데이터 리스트 생성
		for(String rowPk : rowPks) {
			Delete delete = new Delete(rowPk.getBytes());
			deleteList.add(delete);
		}
		
		table.delete(deleteList);

		disconnectHbase(table, null, connection);
	}
	/**
	 * 데이터 조회
	 * 
	 * @param queryConditions:
	 * 	-tableName (String) : 테이블명
	 *  -where (Map<String, Object>) : WHERE에 들어갈 조건
	 *  	-key : 컬럼명
	 *  	-value : 조회할 대상의 값
	 *  
	 *  테스트용이기에 = 의 경우만 만들어놓았음.
	 */
	public List<String> selectData(Map<String, Object> queryConditions) throws IOException {
		Connection connection = connectHbase();

		TableName tableName = TableName.valueOf(queryConditions.get(Commons.TABLE_NAME).toString());
		
		Table table = connection.getTable(tableName);
		
		// 조건부 쿼리문은 scan 사용 | 특정 row 출력은 get(rowId)
		// 참고 : 특정 row만 조회하고 싶으면  scan.addFamily(byte[]) 사용
		Scan scan = new Scan();
		
		if(queryConditions.containsKey(Commons.WHERE) && queryConditions.get(Commons.WHERE) != null) {
			Map<String, Object> where = (Map<String, Object>) queryConditions.get(Commons.WHERE);
			Iterator<String> keys = where.keySet().iterator();
			
			// WHERE 조건 적용 (FILTER)
			while(keys.hasNext()) {
				String key = keys.next();
				Object value = where.get(key);
				
				// 일단 테스트 용이므로 == 만 취급.
				// ColumnValueFilter가 특정 열만 가져왔다면 이건 다 가져옴.
				Filter filter = new SingleColumnValueFilter(
							key.getBytes(),
							key.getBytes(),
							CompareOperator.EQUAL, 
							value.toString().getBytes()
						);
				
				scan.setFilter(filter);
			}
			
		}
		
		List<String> resultList = new LinkedList<>();
		ResultScanner scanner = table.getScanner(scan);
		
		for(Result result : scanner) {
			Iterator<byte[]> columnFamilyNamesItr = table.getDescriptor().getColumnFamilyNames().iterator();
			StringBuffer column = new StringBuffer();
			while(columnFamilyNamesItr.hasNext()) {
				String key = new String(columnFamilyNamesItr.next());
				String value = "";
				try {
					value = new String(result.getValue(key.getBytes(), key.getBytes()));
				}
				catch(NullPointerException e) {
					// 값이 존재하지 않을 경우
					value = "";
				}
				column.append(key).append(":").append(value);
				if(columnFamilyNamesItr.hasNext()) column.append(", ");
			}
			
			resultList.add(column.toString());
		}

		scanner.close();
		disconnectHbase(table, null, connection);
		
		return resultList;
	}
}
  • ColumnFilter처럼 사용하며 특정 셀이 아닌 열 전체를 가져오고 싶다면 SigleColumnFilter을 사용하면 됨.
  • 만약 OR 기능이 사용하고 싶다면 FilterList를 FilterList,Operator.MUST_PASS_ONE으로 설정 후 addFIilter을 추가하면 됨.

 

HBaseService.java

package com.example.demo.hive.service;

import java.util.List;
import java.util.HashMap;
import java.util.Map;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import com.example.demo.Commons;
import com.example.demo.hive.dao.HiveDao;

import lombok.extern.slf4j.Slf4j;

@Slf4j
@Service
public class HiveService {
	@Autowired
	private HiveDao hiveDao;
	
	public void createTable(String tableName, Map<String, String> columnsInfo) {
		
		Map<String, Object> queryConditions = new HashMap<>();
		
		queryConditions.put(Commons.TABLE_NAME, tableName);
		queryConditions.put(Commons.COLUMNS_INFO, columnsInfo);
		
		try {
			hiveDao.createTable(queryConditions);
			log.info("@@@ SQL CREATE TABLE DONE");
		}
		catch(Exception e) {
			log.error("@@@ SQL ERROR OCCURED WHILE CREATE TABLE: {}", e.getMessage());
		}
	}
	
	
	public void updateTable(String tableName, Map<String, String> addColumnsInfo, List<String> dropColumnsInfo, Map<String, String> modifyColumnsInfo) {
		Map<String, Object> queryConditions = new HashMap<>();
		
		queryConditions.put(Commons.TABLE_NAME, tableName);
		queryConditions.put(Commons.ADD_COLUMNS_INFO, addColumnsInfo);
		queryConditions.put(Commons.DROP_COLUMNS_INFO, dropColumnsInfo);
		queryConditions.put(Commons.MODIFY_COLUMNS_INFO, modifyColumnsInfo);
		
		try {
			hiveDao.updateTable(queryConditions);
			log.info("@@@ SQL UPDATE TABLE DONE");
		}
		catch(Exception e) {
			log.error("@@@ SQL ERROR OCCURED WHILE UPDATE TABLE: {}", e.getMessage());
		}
	}
	
	public void dropTable(String tableName) {
		try {
			hiveDao.dropTable(tableName);
			log.info("@@@ SQL DROP TABLE DONE");
		}
		catch(Exception e) {
			log.error("@@@ SQL ERROR OCCURED WHILE DROP TABLE : {}", e.getMessage());
		}
	}
	
	
	public void insertData(String tableName, List<List<Object>> values) {
		Map<String, Object> queryConditions = new HashMap<>();
		
		queryConditions.put(Commons.TABLE_NAME, tableName);
		queryConditions.put(Commons.DATA_INFO, values);
		
		try {
			hiveDao.insertData(queryConditions);
			log.info("@@@ SQL INSERT DATA DONE");
		}
		catch(Exception e) {
			log.error("@@@ SQL ERROR OCCURED WHILE INSERT DATA : {}", e.getMessage());
		}
	}
	
	public void updateData(String tableName, Map<String, Object> values, Map<String, Object> conditions) {
		Map<String, Object> queryConditions = new HashMap<>();
		
		queryConditions.put(Commons.TABLE_NAME, tableName);
		queryConditions.put(Commons.UPDATE_DATA_INFO, values);
		queryConditions.put(Commons.WHERE, conditions);
		
		try {
			hiveDao.updateData(queryConditions);
			log.info("@@@ SQL UPDATE DATA DONE");
		}
		catch(Exception e) {
			log.error("@@@ SQL ERROR OCCURED WHILE UPDATE DATA : {}", e.getMessage());
		}
	}
	
	public void deleteData(String tableName, Map<String, Object> conditions) {
		Map<String, Object> queryConditions = new HashMap<>();
		
		queryConditions.put(Commons.TABLE_NAME, tableName);
		queryConditions.put(Commons.WHERE, conditions);
		
		try {
			hiveDao.deleteData(queryConditions);
			log.info("@@@ SQL DELETE DATA DONE");
		}
		catch(Exception e) {
			log.error("@@@ SQL ERROR OCCURED WHILE DELETE DATA : {}", e.getMessage());
		}
	}
	
	public List<Map<String, Object>> selectData(String tableName, Map<String, Object> conditions) {
		Map<String, Object> queryConditions = new HashMap<>();
		
		queryConditions.put(Commons.TABLE_NAME, tableName);
		queryConditions.put(Commons.WHERE, conditions);
		
		return hiveDao.selectAll(queryConditions);
	}
}

 

 

마지막으로 테스트 파일을 작성해서 테스트를 수행해본다.

	@Autowired HBaseService hbaseService;
	@Test
	void hbaseTest() {
		String tableName = "test_table";

		log.info("### DROP TABLE STARTED...");
		hbaseService.dropTable(tableName);
		
		log.info("\n");
		
		
		// CREATE TABLE
		log.info("### CREATE TABLE STARTED...");
		List<String> createDDL = new LinkedList<>();
		
		createDDL.add("ID");
		createDDL.add("VALUE01");
		createDDL.add("VALUE02");
		
		hbaseService.createTable(tableName, createDDL);
		
		log.info("\n");
		
		// ALTER TABLE
		log.info("### ALTER TABLE STARTED...");
		List<String> updateAddColumnDDL = new LinkedList<>();
		List<String> updateDelColumnDDL = new LinkedList<>();
		
		updateAddColumnDDL.add("VALUE03");
		updateAddColumnDDL.add("VALUE04");
		
		updateDelColumnDDL.add("VALUE02");
		
		hbaseService.updateTable(tableName, updateAddColumnDDL, updateDelColumnDDL);

		log.info("\n");
		
		// INSERT DATA
		log.info("### INSERT DATA STARTED...");
		List<Map<String, Object>> insertData = new LinkedList<>();
		
		Map<String, Object> data1 = new LinkedHashMap<>();
		data1.put("ID", "ID01");
		data1.put("VALUE01", "1234");
		data1.put("VALUE03", "1235");
		data1.put("VALUE04", "1236");
		
		Map<String, Object> data2 = new LinkedHashMap<>();
		data2.put("ID", "ID02");
		data2.put("VALUE01", "2345");
		data2.put("VALUE03", "2346");
		data2.put("VALUE04", "2347");
		
		Map<String, Object> data3 = new LinkedHashMap<>();
		data3.put("ID", "ID03");
		data3.put("VALUE01", "3456");
		data3.put("VALUE03", "3457");
		data3.put("VALUE04", "3458");
		
		insertData.add(data1);
		insertData.add(data2);
		insertData.add(data3);
		
		hbaseService.insertData(tableName, insertData, "ID");

		log.info("\n");
		
		// SELECT DATA (ALL)
		log.info("### SELECT DATA STARTED...");
		
		log.info("===========================");
		log.info("===========================");
		log.info(hbaseService.selectData(tableName, null).toString());
		// [ID:ID01, VALUE01:1234, VALUE03:1235, VALUE04:1236, ID:ID02, VALUE01:2345, VALUE03:2346, VALUE04:2347, ID:ID03, VALUE01:3456, VALUE03:3457, VALUE04:3458]
		log.info("===========================");
		log.info("===========================");

		log.info("\n");

		// UPDATE DATA
		log.info("### UPDATE DATA STARTED...");
		Map<String, Object> updateValues = new HashMap<>();
		updateValues.put("VALUE01", "3459");
		updateValues.put("VALUE04", "3450");
		
		Map<String, Object> updateWhere = new HashMap<>();
		updateWhere.put("ID", "ID03");
		
		hbaseService.updateData(tableName, updateValues, "ID", updateWhere);
		
		log.info("\n");

		// SELECT DATA (ONE)
		log.info("### SELECT DATA STARTED...");
		Map<String, Object> selectWhere = new HashMap<>();
		selectWhere.put("ID", "ID03");
		
		log.info("===========================");
		log.info("===========================");
		log.info(hbaseService.selectData(tableName, selectWhere).toString());
		// [ID:ID03, VALUE01:3459, VALUE03:3457, VALUE04:3450]
		log.info("===========================");
		log.info("===========================");

		log.info("\n");
		
		// DELETE DATA
		log.info("### DELETE DATA STARTED...");
		List<String> deleteWhere = new LinkedList<>();
		deleteWhere.add("ID03");
		
		hbaseService.deleteData(tableName, deleteWhere);
		
		log.info("\n");
		
		// SELECT DATA (ALL)
		log.info("### SELECT DATA STARTED...");
		
		log.info("===========================");
		log.info("===========================");
		log.info(hbaseService.selectData(tableName, null).toString());
		// [ID:ID01, VALUE01:1234, VALUE03:1235, VALUE04:1236, ID:ID02, VALUE01:2345, VALUE03:2346, VALUE04:2347]
		log.info("===========================");
		log.info("===========================");
		
		log.info("\n");

		// DROP TABLE
		log.info("### DROP TABLE STARTED...");
		hbaseService.dropTable(tableName);
		
		log.info("\n");
		
		// DONE
		log.info("### ALL METHOD DONE SUCCESSFULLY");
	}

 

 

테스트 전에 앞서 메모장을 관리자 권한으로 실행한 후 hadoopsv 서버를 추가한다.

 

추가로 처음에 HADOOP_HOME 관련 에러는 무시해도 무방하다.

그리고 다음 결과를 확인할 수 있다.

2023-01-22 22:36:33.316  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : Started HadoopTestApplicationTests in 3.115 seconds (JVM running for 5.649)
2023-01-22 22:36:33.866  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### DROP TABLE STARTED...
2023-01-22 22:36:33.977  WARN 35232 --- [           main] org.apache.hadoop.util.NativeCodeLoader  : Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2023-01-22 22:36:36.202  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT
2023-01-22 22:36:36.203  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:host.name=host.docker.internal
2023-01-22 22:36:36.203  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:java.version=1.8.0_265
2023-01-22 22:36:36.203  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:java.vendor=Red Hat, Inc.
2023-01-22 22:36:36.203  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:java.home=C:\Program Files\RedHat\java-1.8.0-openjdk-1.8.0.265-3\jre
2023-01-22 22:36:36.203  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:java.class.path=...
2023-01-22 22:36:36.207  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:java.library.path=..
2023-01-22 22:36:36.207  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:java.io.tmpdir=...
2023-01-22 22:36:36.207  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:java.compiler=<NA>
2023-01-22 22:36:36.207  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:os.name=Windows 10
2023-01-22 22:36:36.207  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:os.arch=amd64
2023-01-22 22:36:36.207  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:os.version=10.0
2023-01-22 22:36:36.208  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:user.name=이민우
2023-01-22 22:36:36.208  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:user.home=...
2023-01-22 22:36:36.208  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Client environment:user.dir...
2023-01-22 22:36:36.211  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:37.839  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:37.845  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:37.878  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f0039, negotiated timeout = 90000
2023-01-22 22:36:42.721  INFO 35232 --- [           main] o.apache.hadoop.hbase.client.HBaseAdmin  : Started disable of test_table
2023-01-22 22:36:43.091  INFO 35232 --- [           main] o.apache.hadoop.hbase.client.HBaseAdmin  : Operation: DISABLE, Table Name: default:test_table, procId: 527 completed
2023-01-22 22:36:43.448  INFO 35232 --- [           main] o.apache.hadoop.hbase.client.HBaseAdmin  : Operation: DELETE, Table Name: default:test_table, procId: 530 completed
2023-01-22 22:36:43.448  INFO 35232 --- [           main] o.a.h.h.client.ConnectionImplementation  : Closing master protocol: MasterService
2023-01-22 22:36:43.451  INFO 35232 --- [           main] c.e.demo.hbase.service.HBaseService      : @@@ SQL DROP TABLE DONE
2023-01-22 22:36:43.452  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:43.452  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### CREATE TABLE STARTED...
2023-01-22 22:36:43.454  INFO 35232 --- [2181@0x413d2cd1] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:43.456  INFO 35232 --- [071-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:43.456  INFO 35232 --- [2181@0x48df4071] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f0039 closed
2023-01-22 22:36:43.459  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:43.461  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:43.477  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f003a, negotiated timeout = 90000
2023-01-22 22:36:44.289  INFO 35232 --- [           main] o.apache.hadoop.hbase.client.HBaseAdmin  : Operation: CREATE, Table Name: default:test_table, procId: 531 completed
2023-01-22 22:36:44.289  INFO 35232 --- [           main] o.a.h.h.client.ConnectionImplementation  : Closing master protocol: MasterService
2023-01-22 22:36:44.290  INFO 35232 --- [           main] c.e.demo.hbase.service.HBaseService      : @@@ SQL CREATE TABLE DONE
2023-01-22 22:36:44.290  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:44.290  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### ALTER TABLE STARTED...
2023-01-22 22:36:44.293  INFO 35232 --- [2181@0x7675c171] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:44.296  INFO 35232 --- [cd1-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:44.296  INFO 35232 --- [2181@0x413d2cd1] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f003a closed
2023-01-22 22:36:44.300  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:44.303  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:44.309  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f003b, negotiated timeout = 90000
2023-01-22 22:36:46.759  INFO 35232 --- [           main] o.a.h.h.client.ConnectionImplementation  : Closing master protocol: MasterService
2023-01-22 22:36:46.760  INFO 35232 --- [           main] c.e.demo.hbase.service.HBaseService      : @@@ SQL UPDATE TABLE DONE
2023-01-22 22:36:46.761  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:46.761  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### INSERT DATA STARTED...
2023-01-22 22:36:46.764  INFO 35232 --- [2181@0x67f9cb52] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:46.768  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:46.769  INFO 35232 --- [2181@0x7675c171] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f003b closed
2023-01-22 22:36:46.769  INFO 35232 --- [171-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:46.774  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:46.782  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f003c, negotiated timeout = 90000
2023-01-22 22:36:46.992  INFO 35232 --- [           main] c.e.demo.hbase.service.HBaseService      : @@@ SQL INSERT DATA DONE
2023-01-22 22:36:46.992  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:46.992  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### SELECT DATA STARTED...
2023-01-22 22:36:46.992  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:46.992  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:46.994  INFO 35232 --- [2181@0x6db328f8] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:47.009  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:47.009  INFO 35232 --- [b52-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:47.009  INFO 35232 --- [2181@0x67f9cb52] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f003c closed
2023-01-22 22:36:47.011  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:47.019  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f003d, negotiated timeout = 90000
2023-01-22 22:36:47.110  INFO 35232 --- [           main] o.a.h.h.client.ConnectionImplementation  : Closing master protocol: MasterService
2023-01-22 22:36:47.111  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : [ID:ID01, VALUE01:1234, VALUE03:1235, VALUE04:1236, ID:ID02, VALUE01:2345, VALUE03:2346, VALUE04:2347, ID:ID03, VALUE01:3456, VALUE03:3457, VALUE04:3458]
2023-01-22 22:36:47.112  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.112  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.112  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:47.112  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### UPDATE DATA STARTED...
2023-01-22 22:36:47.113  INFO 35232 --- [2181@0x6070775e] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:47.117  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:47.117  INFO 35232 --- [2181@0x6db328f8] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f003d closed
2023-01-22 22:36:47.117  INFO 35232 --- [8f8-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:47.118  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:47.126  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f003e, negotiated timeout = 90000
2023-01-22 22:36:47.208  INFO 35232 --- [           main] c.e.demo.hbase.service.HBaseService      : @@@ SQL UPDATE DATA DONE
2023-01-22 22:36:47.208  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:47.208  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### SELECT DATA STARTED...
2023-01-22 22:36:47.208  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.208  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.209  INFO 35232 --- [2181@0x60bf494c] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:47.212  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:47.213  INFO 35232 --- [2181@0x6070775e] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f003e closed
2023-01-22 22:36:47.213  INFO 35232 --- [75e-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:47.214  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:47.220  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f003f, negotiated timeout = 90000
2023-01-22 22:36:47.286  INFO 35232 --- [           main] o.a.h.h.client.ConnectionImplementation  : Closing master protocol: MasterService
2023-01-22 22:36:47.289  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : [ID:ID03, VALUE01:3459, VALUE03:3457, VALUE04:3450]
2023-01-22 22:36:47.289  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.289  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.289  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:47.289  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### DELETE DATA STARTED...
2023-01-22 22:36:47.291  INFO 35232 --- [2181@0x51ff3c4b] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:47.294  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:47.295  INFO 35232 --- [2181@0x60bf494c] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f003f closed
2023-01-22 22:36:47.296  INFO 35232 --- [94c-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:47.296  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:47.305  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f0040, negotiated timeout = 90000
2023-01-22 22:36:47.419  INFO 35232 --- [           main] c.e.demo.hbase.service.HBaseService      : @@@ SQL DELETE DATA DONE
2023-01-22 22:36:47.419  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:47.419  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### SELECT DATA STARTED...
2023-01-22 22:36:47.420  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.420  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.421  INFO 35232 --- [2181@0x450f0235] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:47.425  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:47.425  INFO 35232 --- [2181@0x51ff3c4b] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f0040 closed
2023-01-22 22:36:47.425  INFO 35232 --- [c4b-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:47.427  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:47.433  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f0041, negotiated timeout = 90000
2023-01-22 22:36:47.491  INFO 35232 --- [           main] o.a.h.h.client.ConnectionImplementation  : Closing master protocol: MasterService
2023-01-22 22:36:47.493  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : [ID:ID01, VALUE01:1234, VALUE03:1235, VALUE04:1236, ID:ID02, VALUE01:2345, VALUE03:2346, VALUE04:2347]
2023-01-22 22:36:47.493  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.493  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ===========================
2023-01-22 22:36:47.493  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:47.493  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### DROP TABLE STARTED...
2023-01-22 22:36:47.494  INFO 35232 --- [2181@0x7d2c345d] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=192.168.0.10:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$737/1753936723@5a56294a
2023-01-22 22:36:47.498  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server 192.168.0.10/192.168.0.10:2181. Will not attempt to authenticate using SASL (unknown error)
2023-01-22 22:36:47.500  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to 192.168.0.10/192.168.0.10:2181, initiating session
2023-01-22 22:36:47.502  INFO 35232 --- [2181@0x450f0235] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f0041 closed
2023-01-22 22:36:47.502  INFO 35232 --- [235-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:47.507  INFO 35232 --- [8.0.10:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server 192.168.0.10/192.168.0.10:2181, sessionid = 0x2000004e17f0042, negotiated timeout = 90000
2023-01-22 22:36:47.534  INFO 35232 --- [           main] o.apache.hadoop.hbase.client.HBaseAdmin  : Started disable of test_table
2023-01-22 22:36:48.675  INFO 35232 --- [           main] o.apache.hadoop.hbase.client.HBaseAdmin  : Operation: DISABLE, Table Name: default:test_table, procId: 549 completed
2023-01-22 22:36:49.324  INFO 35232 --- [           main] o.apache.hadoop.hbase.client.HBaseAdmin  : Operation: DELETE, Table Name: default:test_table, procId: 552 completed
2023-01-22 22:36:49.324  INFO 35232 --- [           main] o.a.h.h.client.ConnectionImplementation  : Closing master protocol: MasterService
2023-01-22 22:36:49.326  INFO 35232 --- [           main] c.e.demo.hbase.service.HBaseService      : @@@ SQL DROP TABLE DONE
2023-01-22 22:36:49.326  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : 

2023-01-22 22:36:49.327  INFO 35232 --- [           main] c.e.demo.HadoopTestApplicationTests     : ### ALL METHOD DONE SUCCESSFULLY
2023-01-22 22:36:49.335  INFO 35232 --- [45d-EventThread] org.apache.zookeeper.ClientCnxn          : EventThread shut down
2023-01-22 22:36:49.335  INFO 35232 --- [2181@0x7d2c345d] org.apache.zookeeper.ZooKeeper           : Session: 0x2000004e17f0042 closed
728x90
반응형