#Java - Code Snippets for '#Lists.newArrayList' - 6 code snippet(s) found |
|
Sample 1. Initialize list using google guava Lists | |
|
import java.util.List;
import com.google.common.collect.Lists;
class GoogleListsTest{
public static void main(String[] args){
List list = Lists.newArrayList();
}
}
|
|
Like Feedback list google guava Lists Lists.newArrayList com.google.common.collect.Lists |
|
|
Sample 2. Code Sample / Example / Snippet of org.apache.spark.mllib.clustering.PowerIterationClustering | |
|
public static void main(String[] args) {
SparkConf sparkConf = new SparkConf().setAppName("JavaPowerIterationClusteringExample");
JavaSparkContext sc = new JavaSparkContext(sparkConf);
@SuppressWarnings("unchecked")
JavaRDD<Tuple3<Long, Long, Double>> similarities = sc.parallelize(Lists.newArrayList(
new Tuple3<Long, Long, Double>(0L, 1L, 0.9),
new Tuple3<Long, Long, Double>(1L, 2L, 0.9),
new Tuple3<Long, Long, Double>(2L, 3L, 0.9),
new Tuple3<Long, Long, Double>(3L, 4L, 0.1),
new Tuple3<Long, Long, Double>(4L, 5L, 0.9)));
PowerIterationClustering pic = new PowerIterationClustering()
.setK(2)
.setMaxIterations(10);
PowerIterationClusteringModel model = pic.run(similarities);
for (PowerIterationClustering.Assignment a: model.assignments().toJavaRDD().collect()) {
System.out.println(a.id() + " -> " + a.cluster());
}
sc.stop();
}
|
|
Like Feedback org.apache.spark.mllib.clustering.PowerIterationClustering |
|
|
Sample 3. Code Sample / Example / Snippet of org.apache.spark.mllib.clustering.PowerIterationClusteringModel | |
|
public static void main(String[] args) {
SparkConf sparkConf = new SparkConf().setAppName("JavaPowerIterationClusteringExample");
JavaSparkContext sc = new JavaSparkContext(sparkConf);
@SuppressWarnings("unchecked")
JavaRDD<Tuple3<Long, Long, Double>> similarities = sc.parallelize(Lists.newArrayList(
new Tuple3<Long, Long, Double>(0L, 1L, 0.9),
new Tuple3<Long, Long, Double>(1L, 2L, 0.9),
new Tuple3<Long, Long, Double>(2L, 3L, 0.9),
new Tuple3<Long, Long, Double>(3L, 4L, 0.1),
new Tuple3<Long, Long, Double>(4L, 5L, 0.9)));
PowerIterationClustering pic = new PowerIterationClustering()
.setK(2)
.setMaxIterations(10);
PowerIterationClusteringModel model = pic.run(similarities);
for (PowerIterationClustering.Assignment a: model.assignments().toJavaRDD().collect()) {
System.out.println(a.id() + " -> " + a.cluster());
}
sc.stop();
}
|
|
Like Feedback org.apache.spark.mllib.clustering.PowerIterationClusteringModel |
|
|
Sample 4. Code Sample / Example / Snippet of org.apache.storm.starter.tools.Rankable | |
|
public Object[][] duplicatesData() {
Rankable A1 = new RankableObjectWithFields("A", 1);
Rankable A2 = new RankableObjectWithFields("A", 2);
Rankable A3 = new RankableObjectWithFields("A", 3);
return new Object[][]{ { Lists.newArrayList(ANY_RANKABLE, ANY_RANKABLE, ANY_RANKABLE) }, { Lists.newArrayList(A1,
A2, A3) }, };
}
|
|
Like Feedback org.apache.storm.starter.tools.Rankable |
|
|
|
Sample 5. Code Sample / Example / Snippet of org.apache.calcite.sql.type.SqlTypeFactoryImpl | |
|
public void testLeastRestrictiveWithAny() {
SqlTypeFactoryImpl typeFactory = new SqlTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
final RelDataType sqlBigInt = typeFactory.createSqlType(SqlTypeName.BIGINT);
final RelDataType sqlAny = typeFactory.createSqlType(SqlTypeName.ANY);
RelDataType leastRestrictive =
typeFactory.leastRestrictive(Lists.newArrayList(sqlBigInt, sqlAny));
assertEquals(leastRestrictive.getSqlTypeName(), SqlTypeName.ANY);
}
|
|
Like Feedback org.apache.calcite.sql.type.SqlTypeFactoryImpl |
|
|
Sample 6. Get Files from a directory | |
|
private List<File> getFilesFromDirectory(String dir) {
List<File> files = Lists.newArrayList();
Path dirPath = Paths.get(dir);
DirectoryStream<Path> dirFiles = null;
try {
dirFiles = Files.newDirectoryStream(dirPath, glob);
for (Path dirFile : dirFiles) {
files.add(dirFile.toFile());
}
}
catch (Exception cause) {
System.out.println("Exception Occurred, What the hell are you doing");
}
return files;
}
|
|
Like Feedback |
|
|