#Java - Code Snippets for '#Apache Hadoop' - 34 code snippet(s) found |
|
Sample 1. Usage of org.apache.hadoop.hdfs.server.namenode.NameNode | |
|
NameNode nn = (NameNode)context.getAttribute("name.node");
Configuration conf = new Configuration(
(Configuration)context.getAttribute("name.conf"));
UnixUserGroupInformation.saveToConf(conf,
UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
return DFSClient.createNamenode(nn.getNameNodeAddress(), conf);
|
|
Like Feedback Apache Hadoop NameNode |
|
|
Sample 2. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair | |
|
public BlockCommand(int action, List<BlockTargetPair> blocktargetlist) {
super(action);
blocks = new Block[blocktargetlist.size()];
targets = new DatanodeInfo[blocks.length][];
for(int i = 0; i < blocks.length; i++) {
BlockTargetPair p = blocktargetlist.get(i);
blocks[i] = p.block;
targets[i] = p.targets;
}
}
|
|
Like Feedback org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair |
|
|
Sample 3. Code Sample / Example / Snippet of org.apache.hadoop.io.ObjectWritable | |
|
public void readFields(DataInput in) throws IOException {
methodName = UTF8.readString(in);
parameters = new Object[in.readInt()];
parameterClasses = new Class[parameters.length];
ObjectWritable objectWritable = new ObjectWritable();
for (int i = 0; i < parameters.length; i++) {
parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf);
parameterClasses[i] = objectWritable.getDeclaredClass();
}
}
|
|
Like Feedback org.apache.hadoop.io.ObjectWritable |
|
|
Sample 4. Usage of org.apache.hadoop.conf.Configuration | |
|
Configuration conf = new Configuration(
(Configuration)context.getAttribute("name.conf"));
UnixUserGroupInformation.saveToConf(conf,
UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
return DFSClient.createNamenode(nn.getNameNodeAddress(), conf);
|
|
Like Feedback Apache Hadoop apache Hadoop Configuration |
|
|
|
Sample 5. Usage of org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream | |
|
EditLogFileInputStream edits =
new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS));
numEdits = FSEditLog.loadFSEdits(edits);
edits.close();
File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
if (editsNew.exists() && editsNew.length() > 0) {
edits = new EditLogFileInputStream(editsNew);
numEdits += FSEditLog.loadFSEdits(edits);
edits.close();
|
|
Like Feedback EditLogFileInputStream Apache Hadoop |
|
|
Sample 6. Usage of org.apache.hadoop.fs.permission.FsPermission | |
|
FsPermission mode = inode.getFsPermission();
if (user.equals(inode.getUserName())) { //user class
if (mode.getUserAction().implies(access)) { return; }
}
else if (groups.contains(inode.getGroupName())) { //group class
if (mode.getGroupAction().implies(access)) { return; }
}
else { //other class
if (mode.getOtherAction().implies(access)) { return; }
}
|
|
Like Feedback Apache Hadoop FsPermission |
|
|
Sample 7. Usage of org.apache.hadoop.fs.permission.PermissionStatus | |
|
PermissionStatus permissions = fsNamesys.getUpgradePermission();
if (imgVersion <= -11) {
permissions = PermissionStatus.read(in);
}
if (path.length() == 0) { // it is the root
if (nsQuota != -1 || dsQuota != -1) {
fsDir.rootDir.setQuota(nsQuota, dsQuota);
}
fsDir.rootDir.setModificationTime(modificationTime);
fsDir.rootDir.setPermissionStatus(permissions);
continue;
|
|
Like Feedback Apache Hadoop PermissionStatus |
|
|
Sample 8. Usage of org.apache.hadoop.metrics.MetricsUtil | |
|
MetricsContext metricsContext = MetricsUtil.getContext("dfs");
directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
directoryMetrics.setTag("sessionId", conf.get("session.id"));
|
|
Like Feedback Apache Hadoop MetricsUtil |
|
|
Sample 9. Usage of org.apache.hadoop.metrics.MetricsContext | |
|
MetricsContext metricsContext = MetricsUtil.getContext("dfs");
directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
directoryMetrics.setTag("sessionId", conf.get("session.id"));
|
|
Like Feedback Apache Hadoop MetricsContext |
|
|
|
Sample 10. Usage of MD5MD5CRC32FileChecksum | |
|
final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(filename, nnproxy, socketFactory, socketTimeout);
MD5MD5CRC32FileChecksum.write(xml, checksum);
|
|
Like Feedback MD5MD5CRC32FileChecksum Apache Hadoop |
|
|
Sample 11. Usage of org.jets3t.service.security.AWSCredentials and S3Credentials | |
|
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
try {
AWSCredentials awsCredentials = new AWSCredentials(s3Credentials.getAccessKey(),
s3Credentials.getSecretAccessKey());
this.s3Service = new RestS3Service(awsCredentials);
} catch (S3ServiceException e) {
}
|
|
Like Feedback AWSCredentials S3Credentials Apache Hadoop |
|
|
Sample 12. Example / Sample of import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair; | |
|
public BlockCommand(int action, List<BlockTargetPair> blocktargetlist) {
super(action);
blocks = new Block[blocktargetlist.size()];
targets = new DatanodeInfo[blocks.length][];
for(int i = 0; i < blocks.length; i++) {
BlockTargetPair p = blocktargetlist.get(i);
blocks[i] = p.block;
targets[i] = p.targets;
}
}
|
|
Like Feedback org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair |
|
|
Sample 13. Code Sample / Example / Snippet of org.apache.hadoop.util.Daemon | |
|
public static void main(String[] argv) throws Exception {
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
Configuration tconf = new Configuration();
if (argv.length >= 1) {
SecondaryNameNode secondary = new SecondaryNameNode(tconf);
int ret = secondary.processArgs(argv);
System.exit(ret);
}
Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf));
checkpointThread.start();
}
|
|
Like Feedback org.apache.hadoop.util.Daemon |
|
|
Sample 14. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.server.namenode.NameNode | |
|
protected ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi
) throws IOException {
ServletContext context = getServletContext();
NameNode nn = (NameNode)context.getAttribute("name.node");
Configuration conf = new Configuration(
(Configuration)context.getAttribute("name.conf"));
UnixUserGroupInformation.saveToConf(conf,
UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
return DFSClient.createNamenode(nn.getNameNodeAddress(), conf);
}
|
|
Like Feedback org.apache.hadoop.hdfs.server.namenode.NameNode |
|
|
|
Sample 15. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.server.namenode.FSNamesystem | |
|
private void doMerge(CheckpointSignature sig) throws IOException {
FSNamesystem namesystem =
new FSNamesystem(checkpointImage, conf);
assert namesystem.dir.fsImage == checkpointImage;
checkpointImage.doMerge(sig);
}
|
|
Like Feedback org.apache.hadoop.hdfs.server.namenode.FSNamesystem |
|
|
Sample 16. Code Sample / Example / Snippet of org.apache.hadoop.fs.FileStatus | |
|
public synchronized void setPermission(String src, FsPermission permission
) throws IOException {
checkOwner(src);
dir.setPermission(src, permission);
getEditLog().logSync();
if (auditLog.isInfoEnabled()) {
final FileStatus stat = dir.getFileInfo(src);
logAuditEvent(UserGroupInformation.getCurrentUGI(),
Server.getRemoteIp(),
"setPermission", src, null, stat);
}
}
|
|
Like Feedback org.apache.hadoop.fs.FileStatus |
|
|
Sample 17. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.protocol.Block | |
|
private Block allocateBlock(String src, INode[] inodes) throws IOException {
Block b = null;
do {
b = new Block(FSNamesystem.randBlockId.nextLong(), 0,
getGenerationStamp());
} while (isValidBlock(b));
b = dir.addBlock(src, inodes, b);
NameNode.stateChangeLog.info("BLOCK* NameSystem.allocateBlock: "
+src+ ". "+b);
return b;
}
|
|
Like Feedback org.apache.hadoop.hdfs.protocol.Block |
|
|
Sample 18. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory | |
|
public File getFsEditName() throws IOException {
return getEditLog().getFsEditName();
}
File getFsTimeName() {
StorageDirectory sd = null;
for (Iterator<StorageDirectory> it =
dirIterator(); it.hasNext();)
sd = it.next();
return getImageFile(sd, NameNodeFile.TIME);
}
|
|
Like Feedback org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory |
|
|
Sample 19. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.server.common.UpgradeStatusReport | |
|
public String getUpgradeStatusText() {
String statusText = "";
try {
UpgradeStatusReport status =
fsn.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
statusText = (status == null ?
"There are no upgrades in progress." :
status.getStatusText(false));
} catch(IOException e) {
statusText = "Upgrade status unknown.";
}
return statusText;
}
|
|
Like Feedback org.apache.hadoop.hdfs.server.common.UpgradeStatusReport |
|
|
|
Sample 20. Code Sample / Example / Snippet of org.apache.hadoop.util.DataChecksum | |
|
private static BlockMetadataHeader readHeader(short version, DataInputStream in)
throws IOException {
DataChecksum checksum = DataChecksum.newDataChecksum(in);
return new BlockMetadataHeader(version, checksum);
}
|
|
Like Feedback org.apache.hadoop.util.DataChecksum |
|
|
Sample 21. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException | |
|
public DatanodeDescriptor getDatanode(DatanodeID nodeID) throws IOException {
UnregisteredDatanodeException e = null;
DatanodeDescriptor node = datanodeMap.get(nodeID.getStorageID());
if (node == null)
return null;
if (!node.getName().equals(nodeID.getName())) {
e = new UnregisteredDatanodeException(nodeID, node);
NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
+ e.getLocalizedMessage());
throw e;
}
return node;
}
|
|
Like Feedback org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException |
|
|
Sample 22. Code Sample / Example / Snippet of org.apache.hadoop.hdfs.DistributedFileSystem | |
|
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
DistributedFileSystem dfs = (DistributedFileSystem) fs;
dfs.metaSave(pathname);
System.out.println("Created file " + pathname + " on server " +
dfs.getUri());
return 0;
}
|
|
Like Feedback org.apache.hadoop.hdfs.DistributedFileSystem |
|
|
Sample 23. Code Sample / Example / Snippet of org.apache.hadoop.io.DataOutputBuffer | |
|
private void writeHeader() throws IOException {
out.write(Server.HEADER.array());
out.write(Server.CURRENT_VERSION);
DataOutputBuffer buf = new DataOutputBuffer();
ObjectWritable.writeObject(buf, remoteId.getTicket(),
UserGroupInformation.class, conf);
int bufLen = buf.getLength();
out.writeInt(bufLen);
out.write(buf.getData(), 0, bufLen);
}
|
|
Like Feedback org.apache.hadoop.io.DataOutputBuffer |
|
|
Sample 24. Code Sample / Example / Snippet of org.apache.hadoop.security.UserGroupInformation | |
|
public Connection(ConnectionId remoteId) throws IOException {
if (remoteId.getAddress().isUnresolved()) {
throw new UnknownHostException("unknown host: " +
remoteId.getAddress().getHostName());
}
this.remoteId = remoteId;
UserGroupInformation ticket = remoteId.getTicket();
this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +
remoteId.getAddress().toString() +
" from " + ((ticket==null)?"an unknown user":ticket.getUserName()));
this.setDaemon(true);
}
|
|
Like Feedback org.apache.hadoop.security.UserGroupInformation |
|
|
|
Sample 25. Code Sample / Example / Snippet of org.apache.hadoop.fs.FileSystem | |
|
public static long getTimestamp(Configuration conf, URI cache)
throws IOException {
FileSystem fileSystem = FileSystem.get(cache, conf);
Path filePath = new Path(cache.getPath());
return fileSystem.getFileStatus(filePath).getModificationTime();
}
|
|
Like Feedback org.apache.hadoop.fs.FileSystem |
|
|
Sample 26. Code Sample / Example / Snippet of org.apache.hadoop.util.Shell.ShellCommandExecutor | |
|
public void runScript(List<String> args, File dir) throws IOException {
ShellCommandExecutor shexec =
new ShellCommandExecutor(args.toArray(new String[0]), dir);
shexec.execute();
int exitCode = shexec.getExitCode();
if (exitCode != 0) {
throw new IOException("Task debug script exit with nonzero status of "
+ exitCode + ".");
}
}
|
|
Like Feedback org.apache.hadoop.util.Shell.ShellCommandExecutor |
|
|
Sample 27. Code Sample / Example / Snippet of org.apache.hadoop.fs.FSDataInputStream | |
|
private void init() throws IOException {
if (reader == null) {
FSDataInputStream in = fs.open(file);
in.seek(segmentOffset);
reader = new Reader<K, V>(conf, in, segmentLength, codec);
}
}
|
|
Like Feedback org.apache.hadoop.fs.FSDataInputStream |
|
|
Sample 28. Code Sample / Example / Snippet of org.apache.hadoop.fs.BlockLocation | |
|
protected int getBlockIndex(BlockLocation[] blkLocations,
long offset) {
for (int i = 0 ; i < blkLocations.length; i++) {
if ((blkLocations[i].getOffset() <= offset) &&
(offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){
return i;
}
}
BlockLocation last = blkLocations[blkLocations.length -1];
long fileLength = last.getOffset() + last.getLength() -1;
throw new IllegalArgumentException("Offset " + offset +
" is outside of file (0.." +
fileLength + ")");
}
|
|
Like Feedback org.apache.hadoop.fs.BlockLocation |
|
|
Sample 29. Code Sample / Example / Snippet of org.apache.hadoop.util.MergeSort | |
|
public RawKeyValueIterator sort() {
MergeSort m = new MergeSort(this);
int count = super.count;
if (count == 0) return null;
int [] pointers = super.pointers;
int [] pointersCopy = new int[count];
System.arraycopy(pointers, 0, pointersCopy, 0, count);
m.mergeSort(pointers, pointersCopy, 0, count);
return new MRSortResultIterator(super.keyValBuffer, pointersCopy,
super.startOffsets, super.keyLengths, super.valueLengths);
}
|
|
Like Feedback org.apache.hadoop.util.MergeSort |
|
|
|
Sample 30. Code Sample / Example / Snippet of org.apache.hadoop.mapred.TaskTracker.TaskInProgress | |
|
public synchronized boolean statusUpdate(TaskAttemptID taskid,
TaskStatus taskStatus)
throws IOException {
TaskInProgress tip = tasks.get(taskid);
if (tip != null) {
tip.reportProgress(taskStatus);
return true;
} else {
LOG.warn("Progress from unknown child task: "+taskid);
return false;
}
}
|
|
Like Feedback org.apache.hadoop.mapred.TaskTracker.TaskInProgress |
|
|
Sample 31. Code Sample / Example / Snippet of org.apache.hadoop.mapred.Counters.Counter | |
|
public synchronized void incrAllCounters(Counters other) {
for (Group otherGroup: other) {
Group group = getGroup(otherGroup.getName());
group.displayName = otherGroup.displayName;
for (Counter otherCounter : otherGroup) {
Counter counter = group.getCounterForName(otherCounter.getName());
counter.displayName = otherCounter.displayName;
counter.value += otherCounter.value;
}
}
}
|
|
Like Feedback org.apache.hadoop.mapred.Counters.Counter |
|
|
Sample 32. Code Sample / Example / Snippet of org.apache.hadoop.mapred.Mapper | |
|
public void map(Object key, Object value, OutputCollector output,
Reporter reporter) throws IOException {
Mapper mapper = chain.getFirstMap();
if (mapper != null) {
mapper.map(key, value, chain.getMapperCollector(0, output, reporter),
reporter);
}
}
|
|
Like Feedback org.apache.hadoop.mapred.Mapper |
|
|
Sample 33. Code Sample / Example / Snippet of org.apache.hadoop.io.serializer.Serialization | |
|
public OutputCollector getMapperCollector(int mapperIndex,
OutputCollector output,
Reporter reporter) {
Serialization keySerialization = mappersKeySerialization.get(mapperIndex);
Serialization valueSerialization =
mappersValueSerialization.get(mapperIndex);
return new ChainOutputCollector(mapperIndex, keySerialization,
valueSerialization, output, reporter);
}
|
|
Like Feedback org.apache.hadoop.io.serializer.Serialization |
|
|
Sample 34. Code Sample / Example / Snippet of org.apache.hadoop.mapred.lib.KeyFieldHelper.KeyDescription | |
|
public void setKeyFieldSpec(int start, int end) {
if (end >= start) {
KeyDescription k = new KeyDescription();
k.beginFieldIdx = start;
k.endFieldIdx = end;
keySpecSeen = true;
allKeySpecs.add(k);
}
}
|
|
Like Feedback org.apache.hadoop.mapred.lib.KeyFieldHelper.KeyDescription |
|
|