source: proiecte/HadoopJUnit/hadoop-0.20.1/src/c++/librecordio/test/test.cc @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 9.8 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#include "test.hh"
20#include <vector>
21
22int main()
23{
24  org::apache::hadoop::record::test::RecRecord1 r1;
25  org::apache::hadoop::record::test::RecRecord1 r2;
26  {
27    hadoop::FileOutStream ostream;
28    ostream.open("/tmp/hadooptmp.dat", true);
29    hadoop::RecordWriter writer(ostream, hadoop::kBinary);
30    r1.setBoolVal(true);
31    r1.setByteVal((int8_t)0x66);
32    r1.setFloatVal(3.145);
33    r1.setDoubleVal(1.5234);
34    r1.setIntVal(4567);
35    r1.setLongVal(0x5a5a5a5a5a5aLL);
36    std::string& s = r1.getStringVal();
37    s = "random text";
38    writer.write(r1);
39    ostream.close();
40    hadoop::FileInStream istream;
41    istream.open("/tmp/hadooptmp.dat");
42    hadoop::RecordReader reader(istream, hadoop::kBinary);
43    reader.read(r2);
44    if (r1 == r2) {
45      printf("Binary archive test passed.\n");
46    } else {
47      printf("Binary archive test failed.\n");
48    }
49    istream.close();
50  }
51  {
52    hadoop::FileOutStream ostream;
53    ostream.open("/tmp/hadooptmp.txt", true);
54    hadoop::RecordWriter writer(ostream, hadoop::kCSV);
55    r1.setBoolVal(true);
56    r1.setByteVal((int8_t)0x66);
57    r1.setFloatVal(3.145);
58    r1.setDoubleVal(1.5234);
59    r1.setIntVal(4567);
60    r1.setLongVal(0x5a5a5a5a5a5aLL);
61    std::string& s = r1.getStringVal();
62    s = "random text";
63    writer.write(r1);
64    ostream.close();
65    hadoop::FileInStream istream;
66    istream.open("/tmp/hadooptmp.txt");
67    hadoop::RecordReader reader(istream, hadoop::kCSV);
68    reader.read(r2);
69    if (r1 == r2) {
70      printf("CSV archive test passed.\n");
71    } else {
72      printf("CSV archive test failed.\n");
73    }
74    istream.close();
75  }
76  {
77    hadoop::FileOutStream ostream;
78    ostream.open("/tmp/hadooptmp.xml", true);
79    hadoop::RecordWriter writer(ostream, hadoop::kXML);
80    r1.setBoolVal(true);
81    r1.setByteVal((int8_t)0x66);
82    r1.setFloatVal(3.145);
83    r1.setDoubleVal(1.5234);
84    r1.setIntVal(4567);
85    r1.setLongVal(0x5a5a5a5a5a5aLL);
86    std::string& s = r1.getStringVal();
87    s = "random text";
88    writer.write(r1);
89    ostream.close();
90    hadoop::FileInStream istream;
91    istream.open("/tmp/hadooptmp.xml");
92    hadoop::RecordReader reader(istream, hadoop::kXML);
93    reader.read(r2);
94    if (r1 == r2) {
95      printf("XML archive test passed.\n");
96    } else {
97      printf("XML archive test failed.\n");
98    }
99    istream.close();
100  }
101 
102  /*
103   * Tests to check for versioning functionality
104   */
105 
106  // basic test
107  // write out a record and its type info, read it back using its typeinfo
108  {
109    hadoop::FileOutStream ostream, ortistream;
110    ostream.open("/tmp/hadooptmp.dat", true);
111    ortistream.open("/tmp/hadooprti.dat", true);
112    hadoop::RecordWriter writer(ostream, hadoop::kBinary);
113    hadoop::RecordWriter writerRti(ortistream, hadoop::kBinary);
114    r1.setBoolVal(true);
115    r1.setByteVal((int8_t)0x66);
116    r1.setFloatVal(3.145);
117    r1.setDoubleVal(1.5234);
118    r1.setIntVal(4567);
119    r1.setLongVal(0x5a5a5a5a5a5aLL);
120    std::string& s = r1.getStringVal();
121    s = "random text";
122    writer.write(r1);
123    ostream.close();
124    // write out rti info
125    writerRti.write(org::apache::hadoop::record::test::RecRecord1::getTypeInfo());
126    ortistream.close();
127
128    // read
129    hadoop::FileInStream istream;
130    istream.open("/tmp/hadooptmp.dat");
131    hadoop::RecordReader reader(istream, hadoop::kBinary);
132    hadoop::FileInStream irtistream;
133    irtistream.open("/tmp/hadooprti.dat");
134    hadoop::RecordReader readerRti(irtistream, hadoop::kBinary);
135    hadoop::RecordTypeInfo rti;
136    readerRti.read(rti);
137    irtistream.close();
138    org::apache::hadoop::record::test::RecRecord1::setTypeFilter(rti);
139    reader.read(r2);
140    if (r1 == r2) {
141      printf("Basic versioning test passed.\n");
142    } else {
143      printf("Basic versioning test failed.\n");
144    }
145    istream.close();
146  }     
147 
148  // versioning:write out a record and its type info, read back a similar record using the written record's typeinfo
149  {
150    hadoop::FileOutStream ostream, ortistream;
151    ostream.open("/tmp/hadooptmp.dat", true);
152    ortistream.open("/tmp/hadooprti.dat", true);
153    hadoop::RecordWriter writer(ostream, hadoop::kBinary);
154    hadoop::RecordWriter writerRti(ortistream, hadoop::kBinary);
155
156    // we create an array of records to write
157    std::vector<org::apache::hadoop::record::test::RecRecordOld*> recsWrite;
158    int i, j, k, l;
159    char buf[1000];
160    for (i=0; i<5; i++) {
161      org::apache::hadoop::record::test::RecRecordOld* ps1Rec = 
162        new org::apache::hadoop::record::test::RecRecordOld();
163      sprintf(buf, "This is record s1: %d", i);
164      ps1Rec->getName().assign(buf);
165
166      for (j=0; j<3; j++) {
167        ps1Rec->getIvec().push_back((int64_t)(i+j));
168      }
169
170      for (j=0; j<2; j++) {
171        std::vector<org::apache::hadoop::record::test::RecRecord0>* pVec = 
172          new std::vector<org::apache::hadoop::record::test::RecRecord0>();
173        for (k=0; k<3; k++) {
174          org::apache::hadoop::record::test::RecRecord0 *psRec = 
175            new org::apache::hadoop::record::test::RecRecord0();
176          sprintf(buf, "This is record s: (%d: %d)", j, k);
177          psRec->getStringVal().assign(buf);
178        }
179        ps1Rec->getSvec().push_back(*pVec);
180      }
181
182      sprintf(buf, "This is record s: %d", i);
183      ps1Rec->getInner().getStringVal().assign(buf);
184
185      for (l=0; l<2; l++) {
186        std::vector<std::vector<std::string> >* ppVec =
187          new std::vector<std::vector<std::string> >();
188        for (j=0; j<2; j++) {
189          std::vector< std::string >* pVec =
190            new std::vector< std::string >();
191          for (k=0; k<3; k++) {
192            sprintf(buf, "THis is a nested string: (%d: %d: %d)", l, j, k);
193            std::string* s = new std::string((const char*)buf);
194            pVec->push_back(*s);
195          }
196        }
197        ps1Rec->getStrvec().push_back(*ppVec);
198      }
199
200      ps1Rec->setI1(100+i);
201
202      ps1Rec->getMap1()[23] = "23";
203      ps1Rec->getMap1()[11] = "11";
204
205      std::map<int32_t, int64_t>* m1 = new std::map<int32_t, int64_t>();
206      std::map<int32_t, int64_t>* m2 = new std::map<int32_t, int64_t>();
207      (*m1)[5] = 5;
208      (*m1)[10] = 10;
209      (*m2)[15] = 15;
210      (*m2)[20] = 20;
211      ps1Rec->getMvec1().push_back(*m1);
212      ps1Rec->getMvec1().push_back(*m2);
213      ps1Rec->getMvec2().push_back(*m1);
214
215      recsWrite.push_back(ps1Rec);
216    }
217
218    // write out to file
219    for (unsigned int i=0; i<recsWrite.size(); i++) {
220      writer.write(*(recsWrite[i]));
221    }
222    ostream.close();
223    // write out rti info
224    writerRti.write(org::apache::hadoop::record::test::RecRecordOld::getTypeInfo());
225    ortistream.close();
226
227    // read
228    hadoop::FileInStream istream;
229    istream.open("/tmp/hadooptmp.dat");
230    hadoop::RecordReader reader(istream, hadoop::kBinary);
231    hadoop::FileInStream irtistream;
232    irtistream.open("/tmp/hadooprti.dat");
233    hadoop::RecordReader readerRti(irtistream, hadoop::kBinary);
234    hadoop::RecordTypeInfo rti;
235    readerRti.read(rti);
236    irtistream.close();
237    org::apache::hadoop::record::test::RecRecordNew::setTypeFilter(rti);
238   
239    // read records
240    std::vector<org::apache::hadoop::record::test::RecRecordNew*> recsRead;
241    for (unsigned int i=0; i<recsWrite.size(); i++) {
242      org::apache::hadoop::record::test::RecRecordNew* ps2Rec = 
243        new org::apache::hadoop::record::test::RecRecordNew();
244      reader.read(*ps2Rec);
245      recsRead.push_back(ps2Rec);
246    }
247    istream.close();
248
249    // compare
250    bool pass = true;
251    for (unsigned int i=0; i<recsRead.size(); i++) {
252      org::apache::hadoop::record::test::RecRecordNew* ps2In = recsRead[i];
253      org::apache::hadoop::record::test::RecRecordOld* ps1Out = recsWrite[i];
254
255      if (!ps2In->getName2().empty()) {
256        printf("Error in s2: name2\n");
257        pass = false;
258      }
259
260      if (!(ps2In->getInner() == ps1Out->getInner())) {
261        printf("error in s2: s1 struct\n");
262        pass = false;
263      }
264
265      if (0 != ps2In->getIvec().size()) {
266        printf("error in s2: ivec\n");
267        pass = false;
268      }
269
270      if (0 != ps2In->getSvec().size()) {
271        printf("error in s2: svec\n");
272        pass = false;
273      }
274
275      for (unsigned int j=0; j<ps2In->getStrvec().size(); j++) {
276        ::std::vector< ::std::vector< ::std::string > >& ss2Vec = ps2In->getStrvec()[j];
277        ::std::vector< ::std::vector< ::std::string > >& ss1Vec = ps1Out->getStrvec()[j];
278        for (unsigned int k=0; k<ss2Vec.size(); k++) {
279          ::std::vector< ::std::string >& s2Vec = ss2Vec[k];
280          ::std::vector< ::std::string >& s1Vec = ss1Vec[k];
281          for (unsigned int l=0; l<s2Vec.size(); l++) {
282            if (s2Vec[l] != s1Vec[l]) {
283              printf("Error in s2: s2Vec\n");
284              pass = false;
285            }
286          }
287        }
288      }
289
290      if (0 != ps2In->getMap1().size()) {
291        printf("Error in s2: map1\n");
292        pass = false;
293      }
294
295      for (unsigned int j=0; j<ps2In->getMvec2().size(); j++) {
296        if (ps2In->getMvec2()[j] != ps1Out->getMvec2()[j]) {
297          printf("Error in s2: mvec2\n");
298          pass = false;
299        }
300      }
301    }
302 
303    if (pass)   
304      printf("Versioning test passed.\n");
305  }     
306   
307  return 0;
308}
309
Note: See TracBrowser for help on using the repository browser.