|  
 
 
       int len = 40000;         int hotel = 1000;         int LM = 1000;         Random ran = new Random();         int type = 0; // 0酒店 1地标         String hdfs_path = "/tmp/hotel";         Configuration hadoopconf = new Configuration();         hadoopconf.set("fs.default.name", "hdfs://m04.ct1.r01.hdp:9000");         FileSystem myFS = FileSystem.get(hadoopconf);         OutputStream out = myFS.create(new Path(hdfs_path));           for (int i = 0; i < hotel; i++) { // 用两个for循环嵌套,打乱酒店/地标数据             // 类型(酒店/地标) id(酒店id/地标id) 纬度 经度             int hx = ran.nextInt(len);             int hy = ran.nextInt(len);             type = 0;             StringBuilder str = new StringBuilder();             str.append(type); // 类型(酒店/地标)             str.append("\t");             str.append(i * 10000); // id(酒店id/地标id)             str.append("\t");             str.append(hx); // 酒店 X             str.append("\t");             str.append(hy);// 酒店 Y             str.append("\r\n");             type = 1;             for (int k = 0; k < LM; k++) {                 int lmx = ran.nextInt(len);                 int lmy = ran.nextInt(len);                 str.append(type); // 类型(酒店/地标)                 str.append("\t");                 str.append(i * 10000 + k + 1); // id(酒店id/地标id)                 str.append("\t");                 str.append(lmx); // 地标 X                 str.append("\t");                 str.append(lmy);// 地标 Y                 str.append("\r\n");             }             byte[] midbytes1 = (str.toString()).getBytes("UTF8");             out.write(midbytes1);             System.out.println(i * 1000);         }         out.close();     } }
 MapReduce 找出所有酒店1公里内的所有地标 
 import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList;   import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DoubleWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Partitioner; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputF
 上一页  [1] [2] [3] [4] [5] [6] 下一页  
 |