Java源码示例:org.apache.hadoop.hive.metastore.conf.MetastoreConf

示例1
private HiveConf newHiveConf() {
  HiveConf conf = new HiveConf(SessionState.class);

  MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
  // Metastore needs to be set, and WITH the deprecated key :(
  // Otherwise, will default to /user/hive/warehouse when trying to create a new database
  // (database location is now sent by the client to the server...)
  HiveConf.setVar(conf, ConfVars.METASTOREWAREHOUSE, whDir);
  conf.set("mapred.job.tracker", "local");
  HiveConf.setVar(conf, ConfVars.SCRATCHDIR,  getTempDir("scratch_dir"));
  HiveConf.setVar(conf, ConfVars.LOCALSCRATCHDIR, getTempDir("local_scratch_dir"));
  HiveConf.setVar(conf, ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
  HiveConf.setBoolVar(conf, ConfVars.HIVE_CBO_ENABLED, false);

  return conf;

}
 
示例2
@Override
StreamingConnection makeStreamingConnection(HiveOptions options, RecordReader reader) throws StreamingException {

    // Test here to ensure the 'hive.metastore.uris' property matches the options.getMetastoreUri() value (if it is set)
    String userDefinedMetastoreURI = options.getMetaStoreURI();
    if (null != userDefinedMetastoreURI) {
        assertEquals(userDefinedMetastoreURI, options.getHiveConf().get(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName()));
    }

    if (generateConnectFailure) {
        throw new StubConnectionError("Unit Test - Connection Error");
    }

    HiveRecordWriter hiveRecordWriter = new HiveRecordWriter(reader, getLogger());
    if (generatePermissionsFailure) {
        throw new StreamingException("Permission denied");
    }
    MockHiveStreamingConnection hiveConnection = new MockHiveStreamingConnection(options, reader, hiveRecordWriter, schema);
    hiveConnection.setGenerateWriteFailure(generateWriteFailure);
    hiveConnection.setGenerateSerializationError(generateSerializationError);
    hiveConnection.setGenerateCommitFailure(generateCommitFailure);
    return hiveConnection;
}
 
示例3
private ThriftHiveMetastore.Client getClient() {
  final URI metastoreUri = URI.create(MetastoreConf.getAsString(conf, THRIFT_URIS));
  final int socketTimeOut = (int) MetastoreConf.getTimeVar(conf, CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
  TTransport transport = new TSocket(metastoreUri.getHost(), metastoreUri.getPort(), socketTimeOut);
  try {
    transport.open();
  } catch (TTransportException e) {
    throw new RuntimeException("failed to open socket for " + metastoreUri + " with timeoutMillis " + socketTimeOut);
  }
  return new ThriftHiveMetastore.Client(new TBinaryProtocol(transport));
}
 
示例4
private HiveTestDataGenerator(Map<String, String> config) throws Exception {
  String root = getTempDir("");
  this.dbDir = root + "metastore_db";

  // metastore helper will create wh in a subdirectory
  String whDirBase = root + "warehouse";
  new File(whDirBase).mkdirs();

  Configuration conf = MetastoreConf.newMetastoreConf();

  // Force creating schemas. Note that JDO creates schema on demand
  MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.SCHEMA_VERIFICATION, false);
  MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.AUTO_CREATE_ALL, true);
  // Disable direct SQL as it might cause not all schemas to be created
  MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, false);
  MetastoreConf.setVar(conf, MetastoreConf.ConfVars.WAREHOUSE, whDirBase);
  MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY, String.format("jdbc:derby:;databaseName=%s;create=true", dbDir));
  HiveConf.setVar(conf, ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
  if (config != null) {
    config.forEach((k, v) -> conf.set(k, v));
  }

  this.port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
  this.whDir = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.WAREHOUSE);

  this.config = new HashedMap<>();
  this.config.put(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
}
 
示例5
private void createStreamingConnection() throws StreamingException {
  final StrictDelimitedInputWriter strictDelimitedInputWriter = StrictDelimitedInputWriter.newBuilder()
    .withFieldDelimiter(',').build();
  HiveConf hiveConf = new HiveConf();
  hiveConf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), metastoreUri);
  // isolated classloader and shadeprefix are required for reflective instantiation of outputformat class when
  // creating hive streaming connection (isolated classloader to load different hive versions and shadeprefix for
  // HIVE-19494)
  hiveConf.setVar(HiveConf.ConfVars.HIVE_CLASSLOADER_SHADE_PREFIX, "shadehive");
  hiveConf.set(MetastoreConf.ConfVars.CATALOG_DEFAULT.getHiveName(), "hive");

  // HiveStreamingCredentialProvider requires metastore uri and kerberos principal to obtain delegation token to
  // talk to secure metastore. When HiveConf is created above, hive-site.xml is used from classpath. This option
  // is just an override in case if kerberos principal cannot be obtained from hive-site.xml.
  if (metastoreKrbPrincipal != null) {
    hiveConf.set(MetastoreConf.ConfVars.KERBEROS_PRINCIPAL.getHiveName(), metastoreKrbPrincipal);
  }

  LOG.info("Creating hive streaming connection..");
  streamingConnection = HiveStreamingConnection.newBuilder()
    .withDatabase(db)
    .withTable(table)
    .withStaticPartitionValues(partition)
    .withRecordWriter(strictDelimitedInputWriter)
    .withHiveConf(hiveConf)
    .withAgentInfo(jobId + "(" + partitionId + ")")
    .connect();
  streamingConnection.beginTransaction();
  LOG.info("{} created hive streaming connection.", streamingConnection.getAgentInfo());
}