public class ParquetWriter<T> extends Object implements Closeable
| Modifier and Type | Field and Description |
|---|---|
static int |
DEFAULT_BLOCK_SIZE |
static CompressionCodecName |
DEFAULT_COMPRESSION_CODEC_NAME |
static boolean |
DEFAULT_IS_DICTIONARY_ENABLED |
static boolean |
DEFAULT_IS_VALIDATING_ENABLED |
static int |
DEFAULT_PAGE_SIZE |
static ParquetProperties.WriterVersion |
DEFAULT_WRITER_VERSION |
| Constructor and Description |
|---|
ParquetWriter(org.apache.hadoop.fs.Path file,
org.apache.hadoop.conf.Configuration conf,
WriteSupport<T> writeSupport) |
ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport)
Create a new ParquetWriter.
|
ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize)
Create a new ParquetWriter.
|
ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
boolean enableDictionary,
boolean validating)
Create a new ParquetWriter.
|
ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
int dictionaryPageSize,
boolean enableDictionary,
boolean validating)
Create a new ParquetWriter.
|
ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
int dictionaryPageSize,
boolean enableDictionary,
boolean validating,
ParquetProperties.WriterVersion writerVersion)
Create a new ParquetWriter.
|
ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
int dictionaryPageSize,
boolean enableDictionary,
boolean validating,
ParquetProperties.WriterVersion writerVersion,
org.apache.hadoop.conf.Configuration conf)
Create a new ParquetWriter.
|
public static final int DEFAULT_BLOCK_SIZE
public static final int DEFAULT_PAGE_SIZE
public static final CompressionCodecName DEFAULT_COMPRESSION_CODEC_NAME
public static final boolean DEFAULT_IS_DICTIONARY_ENABLED
public static final boolean DEFAULT_IS_VALIDATING_ENABLED
public static final ParquetProperties.WriterVersion DEFAULT_WRITER_VERSION
public ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize)
throws IOException
file - the file to createwriteSupport - the implementation to write a record to a RecordConsumercompressionCodecName - the compression codec to useblockSize - the block size thresholdpageSize - the page size thresholdIOExceptionParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, boolean, boolean)public ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
boolean enableDictionary,
boolean validating)
throws IOException
file - the file to createwriteSupport - the implementation to write a record to a RecordConsumercompressionCodecName - the compression codec to useblockSize - the block size thresholdpageSize - the page size threshold (both data and dictionary)enableDictionary - to turn dictionary encoding onvalidating - to turn on validation using the schemaIOExceptionParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, int, boolean, boolean)public ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
int dictionaryPageSize,
boolean enableDictionary,
boolean validating)
throws IOException
file - the file to createwriteSupport - the implementation to write a record to a RecordConsumercompressionCodecName - the compression codec to useblockSize - the block size thresholdpageSize - the page size thresholddictionaryPageSize - the page size threshold for the dictionary pagesenableDictionary - to turn dictionary encoding onvalidating - to turn on validation using the schemaIOException#ParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, int, boolean, boolean, WriterVersion)public ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
int dictionaryPageSize,
boolean enableDictionary,
boolean validating,
ParquetProperties.WriterVersion writerVersion)
throws IOException
Configuration which reads
configuration from the classpath.file - the file to createwriteSupport - the implementation to write a record to a RecordConsumercompressionCodecName - the compression codec to useblockSize - the block size thresholdpageSize - the page size thresholddictionaryPageSize - the page size threshold for the dictionary pagesenableDictionary - to turn dictionary encoding onvalidating - to turn on validation using the schemawriterVersion - version of parquetWriter from ParquetProperties.WriterVersionIOException#ParquetWriter(Path, WriteSupport, CompressionCodecName, int, int, int, boolean, boolean, WriterVersion, Configuration)public ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport,
CompressionCodecName compressionCodecName,
int blockSize,
int pageSize,
int dictionaryPageSize,
boolean enableDictionary,
boolean validating,
ParquetProperties.WriterVersion writerVersion,
org.apache.hadoop.conf.Configuration conf)
throws IOException
file - the file to createwriteSupport - the implementation to write a record to a RecordConsumercompressionCodecName - the compression codec to useblockSize - the block size thresholdpageSize - the page size thresholddictionaryPageSize - the page size threshold for the dictionary pagesenableDictionary - to turn dictionary encoding onvalidating - to turn on validation using the schemawriterVersion - version of parquetWriter from ParquetProperties.WriterVersionconf - Hadoop configuration to use while accessing the filesystemIOExceptionpublic ParquetWriter(org.apache.hadoop.fs.Path file,
WriteSupport<T> writeSupport)
throws IOException
file - the file to createwriteSupport - the implementation to write a record to a RecordConsumerIOExceptionpublic ParquetWriter(org.apache.hadoop.fs.Path file,
org.apache.hadoop.conf.Configuration conf,
WriteSupport<T> writeSupport)
throws IOException
IOExceptionpublic void write(T object) throws IOException
IOExceptionpublic void close()
throws IOException
close in interface Closeableclose in interface AutoCloseableIOExceptionCopyright © 2014. All Rights Reserved.