1. 程式人生 > >hadoop實現單表和多表關聯

hadoop實現單表和多表關聯

補充一個單錶鏈接的例子:

ublic class Single {

    private static class SingleMapper extends
            Mapper<LongWritable, Text, Text, Text> {

        @Override
        protected void map(LongWritable key, Text value,
                Mapper<LongWritable, Text, Text, Text>.Context context)
                throws
IOException, InterruptedException { String string = value.toString(); if (!string.contains("child")) { String[] strings = string.split(" "); context.write(new Text(strings[0]), new Text(strings[1] + ":1")); context.write(new Text(strings[1
]), new Text(strings[0] + ":2")); } } } // reduce是執行key的次數 private static class SingleReduce extends Reducer<Text, Text, Text, Text> { @Override protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, Text>.Context context) throws
IOException, InterruptedException { List<String> left = Lists.newArrayList(); List<String> right = Lists.newArrayList(); for (Text value : values) { String[] strings = value.toString().split(":"); if (strings[1].equals("1")) { right.add(strings[0]); } else { left.add(strings[0]); } } for (String lef : left) { for (String rig : right) { context.write(new Text(lef), new Text(rig)); } } } } public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { Configuration configuration = HadoopConfig.getConfiguration(); Job job = Job.getInstance(configuration, "單表連線"); job.setJarByClass(Sort.class); job.setMapperClass(SingleMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setReducerClass(SingleReduce.class); FileInputFormat.addInputPath(job, new Path("/data")); FileOutputFormat.setOutputPath(job, new Path("/single")); job.waitForCompletion(true); }

補充一個多連結串列

import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class MTjoin {

    public static class Map extends Mapper<LongWritable, Text, Text, Text>{
        private static Text k = new Text();
        private static Text v = new Text();

        protected void map(LongWritable key, Text value, Context context)
                throws java.io.IOException ,InterruptedException {
            String[] splits = value.toString().split("\t");
            if(splits.length != 2){
                return ;
            }

            //取得檔名 a.txt(工廠名字,序號) b.txt(序號,地址)
            String fileName = ((FileSplit)context.getInputSplit()).getPath().getName();
            if("a.txt".equals(fileName)){
                k.set(splits[1]);
                v.set("1"+splits[0]);
            }else if("b.txt".equals(fileName)){
                k.set(splits[0]);
                v.set("2"+splits[1]);
            }else{
                return ;
            }
            context.write(k, v);
        };
    }
    public static class Reduce extends Reducer<Text, Text, Text, Text>{
        private static List<String> names = new ArrayList<String>();
        private static List<String> addrs = new ArrayList<String>();
        private static Text name = new Text();
        private static Text addr = new Text();

        protected void reduce(Text key, Iterable<Text> values, Context context)
                throws java.io.IOException ,InterruptedException {
            for (Text value : values) {
                String temp = value.toString();
                if(temp.startsWith("1")){
                    names.add(temp.substring(1));
                }else{
                    addrs.add(temp.substring(1));
                }
            }
            for (String n : names) {
                for (String a : addrs) {
                    name.set(n);
                    addr.set(a);
                    context.write(name, addr);
                }
            }
            names.clear();
            addrs.clear();
        };
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();
        if(otherArgs.length != 2){
            System.err.println("Usage:MTjoin");
            System.exit(2);
        }
        Job job = new Job(conf, "MTjoin");
        job.setJarByClass(MTjoin.class);

        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }
}

設計思路
分析這個事例,顯然需要進行單表連線,連線的是左表的parent列和又表的child列,且左表和右表示同一個表。
連線結果中除去連線的兩列就是所需要的結果,需要mapreduce解決這個事例,首先應該考慮如何實現表的自連線,其次就是連線的設定,最後是結果的整理
考慮到mapreduce的shuffle過程會將相同的key會連線在一起,所以可以將map結果的key設定成待連線的列,然後列中相同的值自然會連線在一起了,再與最開始的分析聯絡起來:

要連線的是左表parent列和右表的child列,且左表 和右表是用一個表,所以在map階段將讀入資料分割成child和parent之後,會將parent設定key,child設定成value進行是輸出,並作為左表,再將同一隊child和parent中的child設定成key,parent設定成value進行key,作為右表,為了區分輸出中的左右表,需要在輸出的value中再加上左表和右表,然後在shuffle過程中完成連線,reduce收到連線的結果,其中每個key的value-list就包含了“grandchild–grandparent”關係,取出每個key的value-list進行解析,將左表中的child放入一個數組,右表中的parent放入一個數組,然後對兩個陣列求#笛卡爾積#就是最後的結果了

import java.io.IOException;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Job;
import org.apche.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionParser;

public class STjoin{
public static int time=0;

public static class Map extends Mapper<Object,Text,Text,Text>{
//實現map函式
public void map(Object key,Text value,Context context)
throws IOException,InterruptedEXception{
String childname=new String();
String parentname=new String();
String relationtype=new String();//左右表標識

//輸入的一行預處理問題

StringTokenizer itr=new StringTokenizer(value.toString());
String[] values=new String[2];
int i=0;
while(itr.hasMoreTokens()){
values[i]=itr.nextToken();
i++;
}
if(values[0].compareTo("child")!=0){
childname=values[0];
parentname=values[1];
//輸出左表
relationtype="1";
context.write(new Text(values[1]),new Text(relationtype+"+"+childname+"+"+parentname));
//輸出右表
relationtype="2";
context.write(new Text(values[0],new Text(relationtype+"+"+childname+"+"+parentname));
}
}
}
public static class Reduce extends Reducer<Text,Text,Text,Text>{
public void reduce(Text key,Iterable<Text> values,Context context)
throws IOException,InterruptedException{
if(0==time){
context.write(new Text("grandchild"),new Text("grandparent"));
time++;
}
int grandchildnum=0;
String [] grandchild=new String [10];
int grandparentnum=0;
String [] grandparent =new String[10];
Iterator ite=values.iterator();
while(ite.hashNext()){
String record =ite.next().toString();
int len=record.length();
int i=2;
if(0==len){
continue;
}
// 取得左右表標識

                char relationtype = record.charAt(0);

                // 定義孩子和父母變數

                String childname = new String();

                String parentname = new String();



                // 獲取value-list中value的child

                while (record.charAt(i) != '+') {

                    childname += record.charAt(i);

                    i++;

                }



                i = i + 1;



                // 獲取value-list中value的parent

                while (i < len) {

                    parentname += record.charAt(i);

                    i++;

                }



                // 左表,取出child放入grandchildren

                if ('1' == relationtype) {

                    grandchild[grandchildnum] = childname;

                    grandchildnum++;

                }



                // 右表,取出parent放入grandparent

                if ('2' == relationtype) {

                    grandparent[grandparentnum] = parentname;

                    grandparentnum++;

                }

            }



            // grandchild和grandparent陣列求笛卡爾兒積

            if (0 != grandchildnum && 0 != grandparentnum) {

                for (int m = 0; m < grandchildnum; m++) {

                    for (int n = 0; n < grandparentnum; n++) {

                        // 輸出結果

                        context.write(new Text(grandchild[m]), new Text(grandparent[n]));

                    }

                }

            }

        }

    }


public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
conf.set("mapred.job.tracker","192.168.224.100");
String[] ioArgs = new String[] { "STjoin_in", "STjoin_out" };

        String[] otherArgs = new GenericOptionsParser(conf, ioArgs).getRemainingArgs();

        if (otherArgs.length != 2) {

            System.err.println("Usage: Single Table Join <in> <out>");

            System.exit(2);

        }



        Job job = new Job(conf, "Single Table Join");

        job.setJarByClass(STjoin.class);



        // 設定Map和Reduce處理類

        job.setMapperClass(Map.class);

        job.setReducerClass(Reduce.class);



        // 設定輸出型別

        job.setOutputKeyClass(Text.class);

        job.setOutputValueClass(Text.class);



        // 設定輸入和輸出目錄

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));

        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }

}
import java.io.IOException;

import java.util.*;



import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.hadoop.util.GenericOptionsParser;



public class MTjoin {



    public static int time = 0;



    /*

     * 在map中先區分輸入行屬於左表還是右表,然後對兩列值進行分割,

     * 儲存連線列在key值,剩餘列和左右表標誌在value中,最後輸出

     */

    public static class Map extends Mapper<Object, Text, Text, Text> {



        // 實現map函式

        public void map(Object key, Text value, Context context)

                throws IOException, InterruptedException {

            String line = value.toString();// 每行檔案

            String relationtype = new String();// 左右表標識



            // 輸入檔案首行,不處理

            if (line.contains("factoryname") == true

                    || line.contains("addressed") == true) {

                return;

            }



            // 輸入的一行預處理文字

            StringTokenizer itr = new StringTokenizer(line);

            String mapkey = new String();

            String mapvalue = new String();

            int i = 0;

            while (itr.hasMoreTokens()) {

                // 先讀取一個單詞

                String token = itr.nextToken();

                // 判斷該地址ID就把存到"values[0]"

                if (token.charAt(0) >= '0' && token.charAt(0) <= '9') {

                    mapkey = token;

                    if (i > 0) {

                        relationtype = "1";

                    } else {

                        relationtype = "2";

                    }

                    continue;

                }



                // 存工廠名

                mapvalue += token + " ";

                i++;

            }



            // 輸出左右表

            context.write(new Text(mapkey), new Text(relationtype + "+"+ mapvalue));

        }

    }



    /*

     * reduce解析map輸出,將value中資料按照左右表分別儲存,

  * 然後求出笛卡爾積,並輸出。

     */

    public static class Reduce extends Reducer<Text, Text, Text, Text> {



        // 實現reduce函式

        public void reduce(Text key, Iterable<Text> values, Context context)

                throws IOException, InterruptedException {



            // 輸出表頭

            if (0 == time) {

                context.write(new Text("factoryname"), new Text("addressname"));

                time++;

            }



            int factorynum = 0;

            String[] factory = new String[10];

            int addressnum = 0;

            String[] address = new String[10];



            Iterator ite = values.iterator();

            while (ite.hasNext()) {

                String record = ite.next().toString();

                int len = record.length();

                int i = 2;

                if (0 == len) {

                    continue;

                }



                // 取得左右表標識

                char relationtype = record.charAt(0);



                // 左表

                if ('1' == relationtype) {

                    factory[factorynum] = record.substring(i);

                    factorynum++;

                }



                // 右表

                if ('2' == relationtype) {

                    address[addressnum] = record.substring(i);

                    addressnum++;

                }

            }



            // 求笛卡爾積

            if (0 != factorynum && 0 != addressnum) {

                for (int m = 0; m < factorynum; m++) {

                    for (int n = 0; n < addressnum; n++) {

                        // 輸出結果

                        context.write(new Text(factory[m]),

                                new Text(address[n]));

                    }

                }

            }



        }

    }



    public static void main(String[] args) throws Exception {

        Configuration conf = new Configuration();

        // 這句話很關鍵

        conf.set("mapred.job.tracker", "192.168.1.2:9001");



        String[] ioArgs = new String[] { "MTjoin_in", "MTjoin_out" };

        String[] otherArgs = new GenericOptionsParser(conf, ioArgs).getRemainingArgs();

        if (otherArgs.length != 2) {

            System.err.println("Usage: Multiple Table Join <in> <out>");

            System.exit(2);

        }



        Job job = new Job(conf, "Multiple Table Join");

        job.setJarByClass(MTjoin.class);



        // 設定Map和Reduce處理類

        job.setMapperClass(Map.class);

        job.setReducerClass(Reduce.class);



        // 設定輸出型別

        job.setOutputKeyClass(Text.class);

        job.setOutputValueClass(Text.class);



        // 設定輸入和輸出目錄

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));

        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }

}