Updated on 2024-05-11 GMT+08:00

Examples

JDBC Secondary Development Code Example 1

The following code example provides the following functions:

  1. Provides the username and key file path in the JDBC URL address so that programs can automatically perform security logins and create Hive connections.
  2. Runs HQL statements for creating, querying, and deleting tables.
package com.huawei.bigdata.hive.example;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.Properties;


import org.apache.hadoop.conf.Configuration;
import com.huawei.bigdata.security.LoginUtil;

public class JDBCExample {
  private static final String HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";
  
  private static final String ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME = "Client";
  private static final String ZOOKEEPER_SERVER_PRINCIPAL_KEY = "zookeeper.server.principal";    
  private static final String ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL = "zookeeper/hadoop.hadoop.com";
  
  private static Configuration CONF = null; 
  private static String KRB5_FILE = null;
  private static String USER_NAME = null;
  private static String USER_KEYTAB_FILE = null;
  
  private static String zkQuorum = null;//IP address and port list of a ZooKeeper node
  private static String auth = null;
  private static String sasl_qop = null;
  private static String zooKeeperNamespace = null;
  private static String serviceDiscoveryMode = null;
  private static String principal = null;
  private static String auditAddition = null;
  private static void init() throws IOException{
    CONF = new Configuration();

    Properties clientInfo = null;
    String userdir = System.getProperty("user.dir") + File.separator
        + "conf" + File.separator;
    InputStream fileInputStream = null;
    try{
      clientInfo = new Properties();
      //"hiveclient.properties" is the client configuration file. If the multi-instance feature is used, you need to replace the file with "hiveclient.properties" of the corresponding instance client.
      //"hiveclient.properties" file is stored in the config directory of the decompressed installation package of the corresponding instance client. 
      String hiveclientProp = userdir + "hiveclient.properties" ;
      File propertiesFile = new File(hiveclientProp);
      fileInputStream = new FileInputStream(propertiesFile);
      clientInfo.load(fileInputStream);
    }catch (Exception e) {
      throw new IOException(e);
    }finally{
      if(fileInputStream != null){
        fileInputStream.close();
        fileInputStream = null;
      }
    }
    //The format of zkQuorum is "xxx.xxx.xxx.xxx:2181,xxx.xxx.xxx.xxx:2181,xxx.xxx.xxx.xxx:2181";
    //"xxx.xxx.xxx.xxx" of zkQuorum indicates the IP address of the node where ZooKeeper locates. The default port is 2181. 
    zkQuorum =  clientInfo.getProperty("zk.quorum");
    auth = clientInfo.getProperty("auth");
    sasl_qop = clientInfo.getProperty("sasl.qop");
    zooKeeperNamespace = clientInfo.getProperty("zooKeeperNamespace");
    serviceDiscoveryMode = clientInfo.getProperty("serviceDiscoveryMode");
    principal = clientInfo.getProperty("principal"); 
    auditAddition = clientInfo.getProperty("auditAddition");
    // Set a user name for the newly created user, where xxx indicates the username created previously. For example, if the created user is user, USER_NAME is user. 
    USER_NAME = "xxx";

    if ("KERBEROS".equalsIgnoreCase(auth)) {
      // Set the keytab and krb5 file path on the client.
      USER_KEYTAB_FILE = userdir + "user.keytab";
      KRB5_FILE = userdir + "krb5.conf";
      System.setProperty("java.security.krb5.conf", KRB5_FILE);
      System.setProperty(ZOOKEEPER_SERVER_PRINCIPAL_KEY, ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL);
    }
  }
  
  /**
   * This example shows how to use the Hive JDBC interface to run the HQL command <br>.
   * <br>
   * 
   * @throws ClassNotFoundException
   * @throws IllegalAccessException
   * @throws InstantiationException
   * @throws SQLException
   * @throws IOException 
   */
  public static void main(String[] args) throws InstantiationException,
      IllegalAccessException, ClassNotFoundException, SQLException, IOException{
    // Parameter Initialization
    init();

    // Define HQL. HQL must be a single statement and cannot contain ";".
    String[] sqls = {"CREATE TABLE IF NOT EXISTS employees_info(id INT,name STRING)",
        "SELECT COUNT(*) FROM employees_info", "DROP TABLE employees_info"};

    // Build JDBC URL
    StringBuilder sBuilder = new StringBuilder(
        "jdbc:hive2://").append(zkQuorum).append("/");

    if ("KERBEROS".equalsIgnoreCase(auth)) {
      sBuilder.append(";serviceDiscoveryMode=") 
              .append(serviceDiscoveryMode)
              .append(";zooKeeperNamespace=")
              .append(zooKeeperNamespace)
              .append(";sasl.qop=")
              .append(sasl_qop)
              .append(";auth=")
              .append(auth)
              .append(";principal=")
              .append(principal)
              .append(";user.principal=")
              .append(USER_NAME)
              .append(";user.keytab=")
              .append(USER_KEYTAB_FILE);
    } else {
  //Normal mode
      sBuilder.append(";serviceDiscoveryMode=") 
              .append(serviceDiscoveryMode)
              .append(";zooKeeperNamespace=")
              .append(zooKeeperNamespace)
              .append(";auth=none");
    }
    if (auditAddition != null && !auditAddition.isEmpty()) {
        strBuilder.append(";auditAddition=").append(auditAddition);
    }
    String url = sBuilder.toString();
    
    // Load the Hive JDBC driver.
    Class.forName(HIVE_DRIVER);

    Connection connection = null;
    try {
      // Obtain the JDBC connection.
      // If the normal mode is used, the second parameter needs to be set to a correct username. Otherwise, the anonymous user will be used for login.
      connection = DriverManager.getConnection(url, "", "");
        
      // Create a table.
      // If data needs to be imported to the table, use the load statement to import data to the table, for example, import data from the HDFS to the table.
      // load data inpath '/tmp/employees.txt' overwrite into table employees_info;
      execDDL(connection,sqls[0]);
      System.out.println("Create table success!");
       
      // Query the table.
      execDML(connection,sqls[1]);
        
      // Delete the table
      execDDL(connection,sqls[2]);
      System.out.println("Delete table success!");
    }catch (Exception e) {
      System.out.println("Create connection failed : "  + e.getMessage());
    }
    finally {
      // Close the JDBC connection.
      if (null != connection) {
        connection.close();
      }
    }
  }
  
  public static void execDDL(Connection connection, String sql)
  throws SQLException {
    PreparedStatement statement = null;
    try {
      statement = connection.prepareStatement(sql);
      statement.execute();
    }
    finally {
      if (null != statement) {
        statement.close();
      }
    }
  }


  public static void execDML(Connection connection, String sql) throws SQLException {
    PreparedStatement statement = null;
    ResultSet resultSet = null;
    ResultSetMetaData resultMetaData = null;
    
    try {
      // Execute HQL.
      statement = connection.prepareStatement(sql);
      resultSet = statement.executeQuery();
      
      // Export the queried column names to the console.
      resultMetaData = resultSet.getMetaData();
      int columnCount = resultMetaData.getColumnCount();
      for (int i = 1; i <= columnCount; i++) {
        System.out.print(resultMetaData.getColumnLabel(i) + '\t');
      }
      System.out.println();
      
      // Export the query results to the console.
      while (resultSet.next()) {
        for (int i = 1; i <= columnCount; i++) {
          System.out.print(resultSet.getString(i) + '\t');
        }
        System.out.println();
      }
    }
    finally {
      if (null != resultSet) {
        resultSet.close();
      }
      
      if (null != statement) {
        statement.close();
      }
    }
  }

}

JDBC Secondary Development Code Example 2

The following code example provides the following functions:

  1. Does not provide the username and key file path in the JDBC URL address to create Hive connections. Users perform security logins by themselves.
  2. Runs HQL statements for creating, querying, and deleting tables.

When accessing ZooKeeper, programs need to use the jaas configuration file, for example, user.hive.jaas.conf. The details are as follows:

Client { 
 com.sun.security.auth.module.Krb5LoginModule required 
 useKeyTab=true 
 keyTab="D:\\workspace\\jdbc-examples\\conf\\user.keytab" 
 principal="xxx@HADOOP.COM" 
 useTicketCache=false 
 storeKey=true 
 debug=true; 
 };

You need to modify the keyTab path (absolute path) and principal in the configuration file based on the actual environment, and set environment variable java.security.auth.login.config to the file path.

package com.huawei.bigdata.hive.example;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.Properties;


import org.apache.hadoop.conf.Configuration;
import com.huawei.bigdata.security.LoginUtil;

public class JDBCExamplePreLogin {
  private static final String HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";
  
  private static final String ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME = "Client";
  private static final String ZOOKEEPER_SERVER_PRINCIPAL_KEY = "zookeeper.server.principal";    
  private static final String ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL = "zookeeper/hadoop";
  
  private static Configuration CONF = null; 
  private static String KRB5_FILE = null;
  private static String USER_NAME = null;
  private static String USER_KEYTAB_FILE = null;
  
  private static String zkQuorum = null;//IP address and port list of a ZooKeeper node
  private static String auth = null;
  private static String sasl_qop = null;
  private static String zooKeeperNamespace = null;
  private static String serviceDiscoveryMode = null;
  private static String principal = null;
  private static String auditAddition = null;
  private static void init() throws IOException{
    CONF = new Configuration();

    Properties clientInfo = null;
    String userdir = System.getProperty("user.dir") + File.separator
        + "conf" + File.separator;
    InputStream fileInputStream = null;
    try{
      clientInfo = new Properties();
      //"hiveclient.properties" is the client configuration file. If the multi-instance feature is used, you need to replace the file with "hiveclient.properties" of the corresponding instance client.
      //"hiveclient.properties" file is stored in the config directory of the decompressed installation package of the corresponding instance client.
      String hiveclientProp = userdir + "hiveclient.properties" ;
      File propertiesFile = new File(hiveclientProp);
      fileInputStream = new FileInputStream(propertiesFile);
      clientInfo.load(fileInputStream);
    }catch (Exception e) {
      throw new IOException(e);
    }finally{
      if(fileInputStream != null){
        fileInputStream.close();
        fileInputStream = null;
      }
    }
    //The format of zkQuorum is "xxx.xxx.xxx.xxx:2181,xxx.xxx.xxx.xxx:2181,xxx.xxx.xxx.xxx:2181"; 
    //"xxx.xxx.xxx.xxx" of zkQuorum indicates the IP address of the node where ZooKeeper locates. The default port is 2181.
    zkQuorum =  clientInfo.getProperty("zk.quorum");
    auth = clientInfo.getProperty("auth");
    sasl_qop = clientInfo.getProperty("sasl.qop");
    zooKeeperNamespace = clientInfo.getProperty("zooKeeperNamespace");
    serviceDiscoveryMode = clientInfo.getProperty("serviceDiscoveryMode");
    principal = clientInfo.getProperty("principal"); 
    auditAddition = clientInfo.getProperty("auditAddition");
    // Set a user name for the newly created user, where xxx indicates the username created previously. For example, if the created user is user, USER_NAME is user.
    USER_NAME = "xxx";

    if ("KERBEROS".equalsIgnoreCase(auth)) {
      // Set the keytab and krb5 file path on the client.
      USER_KEYTAB_FILE = userdir + "user.keytab";
      KRB5_FILE = userdir + "krb5.conf";

      LoginUtil.setJaasConf(ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME, USER_NAME, USER_KEYTAB_FILE);
      LoginUtil.setZookeeperServerPrincipal(ZOOKEEPER_SERVER_PRINCIPAL_KEY, ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL);

      // Security mode
      // Zookeeper Login Authentication
      LoginUtil.login(USER_NAME, USER_KEYTAB_FILE, KRB5_FILE, CONF);
    }
  }
  
  /**
   * This example shows how to use the Hive JDBC interface to run the HQL command <br>.
   * <br>
   * 
   * @throws ClassNotFoundException
   * @throws IllegalAccessException
   * @throws InstantiationException
   * @throws SQLException
   * @throws IOException 
   */
  public static void main(String[] args) throws InstantiationException,
      IllegalAccessException, ClassNotFoundException, SQLException, IOException{
    // Parameter Initialization
    init();

    // Define HQL. HQL must be a single statement and cannot contain ";".
    String[] sqls = {"CREATE TABLE IF NOT EXISTS employees_info(id INT,name STRING)",
        "SELECT COUNT(*) FROM employees_info", "DROP TABLE employees_info"};

    // Build JDBC URL
    StringBuilder sBuilder = new StringBuilder(
        "jdbc:hive2://").append(zkQuorum).append("/");

    if ("KERBEROS".equalsIgnoreCase(auth)) {
      sBuilder.append(";serviceDiscoveryMode=") 
              .append(serviceDiscoveryMode)
              .append(";zooKeeperNamespace=")
              .append(zooKeeperNamespace)
              .append(";sasl.qop=")
              .append(sasl_qop)
              .append(";auth=")
              .append(auth)
              .append(";principal=")
              .append(principal);
    } else {
      // Normal mode
      sBuilder.append(";serviceDiscoveryMode=") 
              .append(serviceDiscoveryMode)
              .append(";zooKeeperNamespace=")
              .append(zooKeeperNamespace)
              .append(";auth=none");
    }
    if (auditAddition != null && !auditAddition.isEmpty()) {
        strBuilder.append(";auditAddition=").append(auditAddition);
    }
    String url = sBuilder.toString();
    
    // Load the Hive JDBC driver.
    Class.forName(HIVE_DRIVER);

    Connection connection = null;
    try {
      // Obtain the JDBC connection.
      // If the normal mode is used, the second parameter needs to be set to a correct username. Otherwise, the anonymous user will be used for login.
      connection = DriverManager.getConnection(url, "", "");

      // Create a table.
      // If data needs to be imported to the table, use the load statement to import data to the table, for example, import data from the HDFS to the table.
      // load data inpath '/tmp/employees.txt' overwrite into table employees_info;
      execDDL(connection,sqls[0]);
      System.out.println("Create table success!");
       
      // Query the table.
      execDML(connection,sqls[1]);
        
      // Delete the table
      execDDL(connection,sqls[2]);
      System.out.println("Delete table success!");
    }
    finally {
      // Close the JDBC connection.
      if (null != connection) {
        connection.close();
      }
    }
  }
  
  public static void execDDL(Connection connection, String sql)
  throws SQLException {
    PreparedStatement statement = null;
    try {
      statement = connection.prepareStatement(sql);
      statement.execute();
    }
    finally {
      if (null != statement) {
        statement.close();
      }
    }
  }


  public static void execDML(Connection connection, String sql) throws SQLException {
    PreparedStatement statement = null;
    ResultSet resultSet = null;
    ResultSetMetaData resultMetaData = null;
    
    try {
      // Execute HQL. 
      statement = connection.prepareStatement(sql);
      resultSet = statement.executeQuery();
      
      // Export the queried column names to the console.
      resultMetaData = resultSet.getMetaData();
      int columnCount = resultMetaData.getColumnCount();
      for (int i = 1; i <= columnCount; i++) {
        System.out.print(resultMetaData.getColumnLabel(i) + '\t');
      }
      System.out.println();
      
      // Export the query results to the console.
      while (resultSet.next()) {
        for (int i = 1; i <= columnCount; i++) {
          System.out.print(resultSet.getString(i) + '\t');
        }
        System.out.println();
      }
    }
    finally {
      if (null != resultSet) {
        resultSet.close();
      }
      
      if (null != statement) {
        statement.close();
      }
    }
  }

}

HCatalog Secondary Development Code Example

The following code example demonstrates how to use the HCatInputFormat and HCatOutputFormat interfaces provided by HCatalog to submit MapReduce jobs.

public class HCatalogExample extends Configured implements Tool { 

     public static class Map extends 
             Mapper<LongWritable, HCatRecord, IntWritable, IntWritable> { 
         int age; 
         @Override 
         protected void map( 
                 LongWritable key, 
                 HCatRecord value, 
                 org.apache.hadoop.mapreduce.Mapper<LongWritable, HCatRecord, 
                         IntWritable, IntWritable>.Context context) 
                 throws IOException, InterruptedException { 
             age = (Integer) value.get(0); 
             context.write(new IntWritable(age), new IntWritable(1)); 
         } 
     } 

     public static class Reduce extends Reducer<IntWritable, IntWritable, 
     IntWritable, HCatRecord> { 
       @Override 
       protected void reduce( 
               IntWritable key, 
               java.lang.Iterable<IntWritable> values, 
               org.apache.hadoop.mapreduce.Reducer<IntWritable, IntWritable, 
               IntWritable, HCatRecord>.Context context) 
               throws IOException, InterruptedException { 
           int sum = 0; 
           Iterator<IntWritable> iter = values.iterator(); 
           while (iter.hasNext()) { 
               sum++; 
               iter.next(); 
           } 
           HCatRecord record = new DefaultHCatRecord(2); 
           record.set(0, key.get()); 
           record.set(1, sum); 

           context.write(null, record); 
         } 
     } 

     public int run(String[] args) throws Exception { 
         Configuration conf = getConf(); 
         String[] otherArgs = args; 

         String inputTableName = otherArgs[0]; 
         String outputTableName = otherArgs[1]; 
         String dbName = "default"; 

         @SuppressWarnings("deprecation") 
         Job job = new Job(conf, "GroupByDemo"); 

         HCatInputFormat.setInput(job, dbName, inputTableName); 
         job.setInputFormatClass(HCatInputFormat.class); 
         job.setJarByClass(HCatalogExample.class); 
         job.setMapperClass(Map.class); 
         job.setReducerClass(Reduce.class); 
         job.setMapOutputKeyClass(IntWritable.class); 
         job.setMapOutputValueClass(IntWritable.class); 
         job.setOutputKeyClass(WritableComparable.class); 
         job.setOutputValueClass(DefaultHCatRecord.class); 

         OutputJobInfo outputjobInfo = OutputJobInfo.create(dbName,outputTableName, null); 
         HCatOutputFormat.setOutput(job, outputjobInfo); 
         HCatSchema schema = outputjobInfo.getOutputSchema(); 
         HCatOutputFormat.setSchema(job, schema); 
         job.setOutputFormatClass(HCatOutputFormat.class); 

         return (job.waitForCompletion(true) ? 0 : 1); 
     } 
     public static void main(String[] args) throws Exception { 
         int exitCode = ToolRunner.run(new HCatalogExample(), args); 
         System.exit(exitCode); 
     } 
 }