복붙노트

[HADOOP] Hadoop 분산 캐시를 사용할 때 FileNotFoundException 발생

HADOOP

Hadoop 분산 캐시를 사용할 때 FileNotFoundException 발생

이번에 누군가는 회신을 기뻐해야한다. 분산 캐시를 사용하여 코드를 실행하는 데 어려움을 겪고 있습니다. 나는 이미 hdfs에 파일을 가지고 있지만이 코드를 실행할 때 :

import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.awt.image.Raster;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URISyntaxException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.imageio.ImageIO;
import org.apache.hadoop.filecache.*;
import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import java.lang.String;
import java.lang.Runtime;
import java.net.URI;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;

public class blur2 {
public static class BlurMapper extends MapReduceBase implements Mapper<Text, BytesWritable, LongWritable, BytesWritable>
{
    OutputCollector<LongWritable, BytesWritable> goutput;

    int IMAGE_HEIGHT = 240;
    int IMAGE_WIDTH = 320;
    public BytesWritable Gmiu;
    public BytesWritable Gsigma;
    public BytesWritable w;
    byte[] bytes = new byte[IMAGE_HEIGHT*IMAGE_WIDTH*3];
    public BytesWritable emit = new BytesWritable(bytes);
    int count = 0;
    int initVar = 125;
    public LongWritable l = new LongWritable(1);
    byte[] byte1 = new byte[IMAGE_HEIGHT*IMAGE_WIDTH];
    byte[] byte2 = new byte[IMAGE_HEIGHT*IMAGE_WIDTH];
    byte[] byte3 = new byte[IMAGE_HEIGHT*IMAGE_WIDTH];
    public void map(Text key, BytesWritable file,OutputCollector<LongWritable, BytesWritable> output, Reporter reporter) throws IOException {
                    goutput = output;
BufferedImage img = ImageIO.read(new ByteArrayInputStream(file.getBytes()));
Raster ras=img.getData();
DataBufferByte db= (DataBufferByte)ras.getDataBuffer();
                    byte[] data = db.getData();


                    if(count==0){

                        for(int i=0;i<IMAGE_HEIGHT*IMAGE_WIDTH;i++)
                         {
                         byte1[i]=20;
                         byte2[i]=125;
                         }
                            Gmiu = new BytesWritable(data);
                            Gsigma = new BytesWritable(byte1);
                            w = new BytesWritable(byte2);
                            count++;
                    }

                        else{
                        byte1 = Gmiu.getBytes();
                        byte2 = Gsigma.getBytes();
                        byte3 = w.getBytes();
                             for(int i=0;i<IMAGE_HEIGHT*IMAGE_WIDTH;i++)
                             {
                                byte pixel = data[i];
                                Double  tempmiu=new Double(0.0);
                                Double  tempsig=new Double(0.0);
                                 double temp1=0.0; double alpha = 0.05;
                                 tempmiu = (1-alpha)*byte1[i] + alpha*pixel;
                                 temp1=temp1+(pixel-byte1[i])*(pixel-byte1[i]);
                                 tempsig=(1-alpha)*byte2[i]+ alpha*temp1;

                                 byte1[i] = tempmiu.byteValue();
                                 byte2[i]= tempsig.byteValue();
                                 Double w1=new Double((1-alpha)*byte3[i]+alpha*100);
                                 byte3[i] = w1.byteValue();
                             }
                             Gmiu.set(byte1,0,IMAGE_HEIGHT*IMAGE_WIDTH);
                             Gsigma.set(byte2,0,IMAGE_HEIGHT*IMAGE_WIDTH);
                             w.set(byte3,0,IMAGE_HEIGHT*IMAGE_WIDTH);
                        }

                        byte1 = Gsigma.getBytes();
                        for(int i=0;i<IMAGE_HEIGHT*IMAGE_WIDTH;i++)
                        {
                            bytes[i]=byte1[i];
                        }
                        byte1 = Gsigma.getBytes();
                        for(int i=0;i<IMAGE_HEIGHT*IMAGE_WIDTH;i++)
                        {
                            bytes[IMAGE_HEIGHT*IMAGE_WIDTH+i]=byte1[i];
                        }
                        byte1 = w.getBytes();
                        for(int i=0;i<IMAGE_HEIGHT*IMAGE_WIDTH;i++)
                        {
                            bytes[2*IMAGE_HEIGHT*IMAGE_WIDTH+i]=byte1[i];
                        }
                        emit.set(bytes,0,3*IMAGE_HEIGHT*IMAGE_WIDTH);

        }

        @Override
         public void close(){
            try{
                  goutput.collect(l, emit);
            }
            catch(Exception e){
                e.printStackTrace();
                System.exit(-1);
            }

         }

 }
//end of first job , this is running perfectly
        public static void main(String[] args) throws URISyntaxException {

                if(args.length!=3) {

                        System.err.println("Usage: blurvideo input  output");
                        System.exit(-1);

                 }
                 JobClient client = new JobClient();
                 JobConf conf = new JobConf(blur2.class);


                conf.setOutputValueClass(BytesWritable.class);
                conf.setInputFormat(SequenceFileInputFormat.class);
                //conf.setNumMapTasks(n)

               SequenceFileInputFormat.addInputPath(conf, new Path(args[0]));
                 TextOutputFormat.setOutputPath(conf, new  Path(args[1]));
              conf.setMapperClass(BlurMapper.class);
              conf.setNumReduceTasks(0);
              //conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);

                client.setConf(conf);
                try {
                       JobClient.runJob(conf);
                } catch (Exception e) {
                        e.printStackTrace();
                }

             //  exec("jar cf /home/hmobile/hadoop-0.19.2/imag /home/hmobile/hadoop-0.19.2/output");
                JobClient client2 = new JobClient();
                 JobConf conf2 = new JobConf(blur2.class);


                conf2.setOutputValueClass(BytesWritable.class);
                conf2.setInputFormat(SequenceFileInputFormat.class);
                //conf.setNumMapTasks(n)

               SequenceFileInputFormat.addInputPath(conf2, new Path(args[0]));
                 SequenceFileOutputFormat.setOutputPath(conf2, new  Path(args[2]));
              conf2.setMapperClass(BlurMapper2.class);
              conf2.setNumReduceTasks(0);

            DistributedCache.addCacheFile(new URI("~/ayush/output/part-00000"), conf2);// these files are already on the hdfs
            DistributedCache.addCacheFile(new URI("~/ayush/output/part-00001"), conf2);




                client2.setConf(conf2);
                try {
                       JobClient.runJob(conf2);
                } catch (Exception e) {
                        e.printStackTrace();
                }

        }


public static class BlurMapper2 extends MapReduceBase implements Mapper<Text, BytesWritable, LongWritable, BytesWritable>
{

    int IMAGE_HEIGHT = 240;
    int T =60;
    int IMAGE_WIDTH = 320;
    public BytesWritable Gmiu;
    public BytesWritable Gsigma;
    public BytesWritable w;
    byte[] bytes = new byte[IMAGE_HEIGHT*IMAGE_WIDTH];
    public BytesWritable emit = new BytesWritable(bytes);
    int initVar = 125;int gg=0;
    int K=64;int k=0,k1=0,k2=0;
    public LongWritable l = new LongWritable(1);
    byte[] Gmiu1 = new byte[IMAGE_HEIGHT*IMAGE_WIDTH*K];
    byte[] Gsigma1 = new byte[IMAGE_HEIGHT*IMAGE_WIDTH*K];
    byte[] w1 = new byte[IMAGE_HEIGHT*IMAGE_WIDTH*K];
public Path[] localFiles=new Path[2];
private FileSystem fs;
        @Override
public void configure(JobConf conf2)
{
          try {
              fs = FileSystem.getLocal(new Configuration());

                localFiles = DistributedCache.getLocalCacheFiles(conf2);
               //System.out.println(localFiles[0].getName());
            } catch (IOException ex) {
                Logger.getLogger(blur2.class.getName()).log(Level.SEVERE, null, ex);
            }

}
public void map(Text key, BytesWritable file,OutputCollector<LongWritable, BytesWritable> output, Reporter reporter) throws IOException
{
    if(gg==0){
         //System.out.println(localFiles[0].getName());
     String wrd; String line;
     for(Path f:localFiles)
            {
                 if(!f.getName().endsWith("crc"))
                 {
                  //  FSDataInputStream localFile = fs.open(f);
  BufferedReader  br = null;
                    try {
                     br = new BufferedReader(new InputStreamReader(fs.open(f)));
                        int c = 0;
                        try {
                            while ((line = br.readLine()) != null) {
                                StringTokenizer itr = new StringTokenizer(line, " ");
                                while (itr.hasMoreTokens()) {
                                    wrd = itr.nextToken();
                                    c++;
                                    int i = Integer.parseInt(wrd, 16);
                                    Integer I = new Integer(i);
                                    byte b = I.byteValue();
                                    if (c < IMAGE_HEIGHT * IMAGE_WIDTH) {
                                        Gmiu1[k] = b;k++;
                                    } else {
                                        if ((c >= IMAGE_HEIGHT * IMAGE_WIDTH) && (c < 2 * IMAGE_HEIGHT * IMAGE_WIDTH)) {
                                            Gsigma1[k] = b;k1++;
                                        } else {
                                            w1[k] = b;k2++;
                                        }
                                    }

                                }
                            }
                        } catch (IOException ex) {
                            Logger.getLogger(blur2.class.getName()).log(Level.SEVERE, null, ex);
                        }
                    } catch (FileNotFoundException ex) {
                        Logger.getLogger(blur2.class.getName()).log(Level.SEVERE, null, ex);
                    } finally {
                        try {
                            br.close();
                        } catch (IOException ex) {
                            Logger.getLogger(blur2.class.getName()).log(Level.SEVERE, null, ex);
                        }
                    }


                 }
            }
     gg++;
    }

}
}
}

이걸로 많은 태클, 아무도 내가 왜이 오류가 발생하는지 말할 수 :

java.io.FileNotFoundException: File does not exist: ~/ayush/output/part-00000
    at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:394)
    at org.apache.hadoop.filecache.DistributedCache.getTimestamp(DistributedCache.java:475)
    at org.apache.hadoop.mapred.JobClient.configureCommandLineOptions(JobClient.java:676)
    at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:774)
    at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1127)
    at blur2.main(blur2.java:175)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    at java.lang.reflect.Method.invoke(Method.java:597)
    at org.apache.hadoop.util.RunJar.main(RunJar.java:165)
    at org.apache.hadoop.mapred.JobShell.run(JobShell.java:54)
    at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
    at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
    at org.apache.hadoop.mapred.JobShell.main(JobShell.java:68)

해결법

  1. ==============================

    1.문제는 "~ / ayush / output / part-00000"을 사용하는 파일 이름이 유닉스 쉘 (sh, bash, ksh) 틸드 확장을 사용하여 "~"을 홈 디렉토리의 경로 이름으로 대체하는 것입니다.

    문제는 "~ / ayush / output / part-00000"을 사용하는 파일 이름이 유닉스 쉘 (sh, bash, ksh) 틸드 확장을 사용하여 "~"을 홈 디렉토리의 경로 이름으로 대체하는 것입니다.

    Java (및 C 및 C ++ 및 대부분의 다른 프로그래밍 언어)는 물결 확장을 수행하지 않습니다. 경로 이름을 "/ home / ayush / output / part-00000"... 또는 tilded 양식이 확장되는 절대 경로 이름으로 제공해야합니다.

    엄밀히 말하면 URI는 다음과 같이 만들어야합니다.

    new File("/home/ayush/output/part-00000").toURI()
    

    ~ 만큼이 아닌

    new URI("/home/ayush/output/part-00000")
    

    후자는 "프로토콜"없이 URI를 만들고, 그것은 문제가 될 수 있습니다.

  2. from https://stackoverflow.com/questions/3219012/filenotfoundexception-when-using-hadoop-distributed-cache by cc-by-sa and MIT license