예제 #1
0
def serializeDataToOCFFile(schemaFile,outputFile,dataToSerialize):
    logging.debug("Parsing in avro schema:"+schemaFile)
    schema=parse_schema(schemaFile)
    logging.debug("Writing avro data to:"+outputFile)
    writer = DataFileWriter(open(outputFile, "w"), DatumWriter(), schema)
    writer.append(dataToSerialize)
    writer.close()
예제 #2
0
파일: example1.py 프로젝트: 3rwww1/Hadoop
def testAppend(filename):
    fd = open(filename, 'a+b')
    datum_writer = DatumWriter()
    fwriter = DataFileWriter(fd, datum_writer)
    for i in xrange(10, 20):
        fwriter.append(_makeTestPerson(i))
    fwriter.close()
예제 #3
0
  def write(self, format):
    time_start = time.time()

    if format == 'json' or format == 'jsch':
      with open('./output/output.json', 'w') as file:
        for base_person_obj in self._base_person_list:
          file.write(json.dumps(self._get_json_person(base_person_obj), separators=(',', ':')))
        # file.write(json.dumps(self._data_dict, separators=(',', ':')))

    elif format == 'avro':
      writer = DataFileWriter(open('./output/output.avro', 'wb'), DatumWriter(), self._schema_avro)
      for user in self._data_dict:
        writer.append(user)
      writer.close()

    elif format == 'protobuf':
      with open('./output/output.pb', 'wb') as file:
        for base_person_obj in self._base_person_list:
          protobuf_person = self._get_proto_buf_person(base_person_obj)
          file.write(protobuf_person.SerializeToString())

    elif format == 'gzjson':
      with gzip.open('./output/output.jsz', 'wb') as file:
        file.write(json.dumps(self._data_dict, separators=(',', ':')))

    time_end = time.time()

    return time_end - time_start
예제 #4
0
  def _write_lines(self,lines,fname):
    """
    Write the lines to an avro file named fname

    Parameters
    --------------------------------------------------------
    lines - list of strings to write
    fname - the name of the file to write to.
    """
    import avro.io as avio
    from avro.datafile import DataFileReader,DataFileWriter
    from avro import schema

    #recursively make all directories
    dparts=fname.split(os.sep)[:-1]
    for i in range(len(dparts)):
      pdir=os.sep+os.sep.join(dparts[:i+1])
      if not(os.path.exists(pdir)):
        os.mkdir(pdir)


    with file(fname,'w') as hf:
      inschema="""{"type":"string"}"""
      writer=DataFileWriter(hf,avio.DatumWriter(inschema),writers_schema=schema.parse(inschema))

      #encoder = avio.BinaryEncoder(writer)
      #datum_writer = avio.DatumWriter()
      for datum in lines:
        writer.append(datum)

      writer.close()
예제 #5
0
def generate_sample_datasets (host_ips, metric_ids, year, month, day, hour):
    avro_schema = ''
    #load data from hdfs
    cat = subprocess.Popen(['sudo', '-u', 'hdfs', 'hadoop', 'fs', '-cat', '/user/pnda/PNDA_datasets/datasets/.metadata/schema.avsc'], stdout=subprocess.PIPE)
    for line in cat.stdout:
        avro_schema = avro_schema + line
    schema = avro.schema.parse(avro_schema)
    bytes_writer = io.BytesIO()
    encoder = avro.io.BinaryEncoder(bytes_writer)
    #create hdfs folder structure
    dir = create_hdfs_dirs (year, month, day, hour)
    filename = str(uuid.uuid4()) + '.avro'
    filepath = dir + filename
    tmp_file = '/tmp/' + filename
    
    writer = DataFileWriter(open(tmp_file, "w"), DatumWriter(), schema)
    
    start_dt = datetime.datetime(year, month, day, hour, 0, 0) 
    start_ts = int(time.mktime(start_dt.timetuple()))
    end_dt = start_dt.replace(hour=hour+1)
    end_ts = int(time.mktime(end_dt.timetuple()))

    for ts in xrange(start_ts, end_ts, 1):
        #generate random pnda record on per host ip basis
        for host_ip in host_ips:
           record = {}
           record['timestamp'] = (ts * 1000)
           record['src'] = 'test'
           record['host_ip'] = host_ip
           record['rawdata'] = generate_random_metrics(metric_ids)
           #encode avro
           writer.append(record)
    writer.close()
    subprocess.Popen(['sudo', '-u', 'hdfs', 'hadoop', 'fs', '-copyFromLocal', tmp_file, dir])
    return filepath
예제 #6
0
 def _produce_test_input(self):
     schema = avro.schema.parse("""
     {
         "name": "TestQueryTask_record",
         "type": "record",
         "doc": "The description",
         "fields": [
             {"name": "col0", "type": "int", "doc": "The bold"},
             {"name": "col1", "type": {
                 "name": "inner_record",
                 "type": "record",
                 "doc": "This field shall be an inner",
                 "fields": [
                     {"name": "inner", "type": "int", "doc": "A inner field"},
                     {"name": "col0", "type": "int", "doc": "Same name as outer but different doc"},
                     {"name": "col1", "type": ["null", "string"], "default": null, "doc": "Nullable primitive"},
                     {"name": "col2", "type": ["null", {
                         "type": "map",
                         "values": "string"
                     }], "default": null, "doc": "Nullable map"}
                 ]
             }, "doc": "This field shall be an inner"},
             {"name": "col2", "type": "int", "doc": "The beautiful"},
             {"name": "col3", "type": "double"}
         ]
     }""")
     self.addCleanup(os.remove, "tmp.avro")
     writer = DataFileWriter(open("tmp.avro", "wb"), DatumWriter(), schema)
     writer.append({'col0': 1000, 'col1': {'inner': 1234, 'col0': 3000}, 'col2': 1001, 'col3': 1.001})
     writer.close()
     self.gcs_client.put("tmp.avro", self.gcs_dir_url + "/tmp.avro")
예제 #7
0
class AvroRecordWriter(TrivialRecordWriter):
    def __init__(self, simulator, stream):
        super(AvroRecordWriter, self).__init__(simulator, stream)

        self.deserializers = {}
        schema = None
        if self.simulator.avro_output_key_schema:
            self.deserializers['k'] = AvroDeserializer(self.simulator.avro_output_key_schema)
            schema = avro.schema.parse(self.simulator.avro_output_key_schema)

        if self.simulator.avro_output_value_schema:
            self.deserializers['v'] = AvroDeserializer(self.simulator.avro_output_value_schema)
            schema = avro.schema.parse(self.simulator.avro_output_value_schema)

        if self.simulator.avro_output == 'kv':
            schema_k_parsed = avro.schema.parse(self.simulator.avro_output_key_schema)
            schema_v_parsed = avro.schema.parse(self.simulator.avro_output_value_schema)

            schema_k = json.loads(self.simulator.avro_output_key_schema)
            schema_k.pop('namespace', None)
            schema_v = json.loads(self.simulator.avro_output_value_schema)
            schema_v.pop('namespace', None)

            schema = {
                'type': 'record',
                'name': 'kv',
                'fields': [
                   {'name': 'key', 'type': schema_k},
                   {'name': 'value', 'type': schema_v if schema_k_parsed.fullname != schema_v_parsed.fullname
                   else schema_k_parsed.name}
                ]
            }
            schema = avro.schema.parse(json.dumps(schema))

        self.writer = DataFileWriter(self.stream, DatumWriter(), schema)

    def send(self, cmd, *vals):
        if cmd == 'done':
            self.writer.close()
        super(AvroRecordWriter, self).send(cmd, *vals)

    def output(self, key, value):
        if self.simulator.avro_output == 'k':
            obj_to_append = self.deserializers['k'].deserialize(key)
        elif self.simulator.avro_output == 'v':
            obj_to_append = self.deserializers['v'].deserialize(value)
        else:
            obj_to_append = {
                'key': self.deserializers['k'].deserialize(key),
                'value': self.deserializers['v'].deserialize(value)
            }
        self.writer.append(obj_to_append)

    def close(self):
        try:
            self.writer.close()
        except ValueError:  # let's ignore if already closed
            pass
        self.stream.close()
예제 #8
0
def prepare(producer, arr, root, level):
    for it in arr:
        buf = io.BytesIO()
        writer = DataFileWriter(buf, DatumWriter(), sch)
        item = Item(root, it, False)
        writer.append(item.get_dict())
        writer.flush()
        send(buf, level, producer)
예제 #9
0
 def encode(self, raw_data):
     byte_stream = BytesIO()
     writer = DataFileWriter(byte_stream, DatumWriter(), self._schema)
     writer.append(raw_data)
     writer.flush()
     serialized_data = byte_stream.getvalue()
     writer.close()
     return serialized_data
예제 #10
0
def gen_avro(filename):
    schema = avro.schema.parse(SCHEMA)
    fo = open(filename, "wb")
    writer = DataFileWriter(fo, DatumWriter(), schema)
    for record in looney_records():
        writer.append(record)
    writer.close()
    fo.close()
예제 #11
0
파일: test_script.py 프로젝트: 10sr/hue
def gen_avro(filename):
    schema = avro.schema.parse(SCHEMA)
    fo = open(filename, "wb")
    writer = DataFileWriter(fo, DatumWriter(), schema)
    for record in looney_records():
        writer.append(record)
    writer.close()
    fo.close()
예제 #12
0
    def run(self):
        # for normalizing alcohol
        minimum, maximum, average = 100, 0, 0

        with open('raw.csv', 'r') as fd:
            csv_reader = csv.reader(fd, delimiter=',')

            collection = {}
            for i, row in enumerate(csv_reader):
                desc = row[3].lower().replace('.', '').replace(',', '')

                alc = float(row[-1])
                if alc < minimum:
                    minimum = alc
                if alc > maximum:
                    maximum = alc
                average += alc

                # Remove gifts or items without description
                if 'engin' in desc:
                    continue

                if 'gjafa' in desc or 'gjafa' in row[0]:
                    continue

                if 'öskju' in desc or 'öskju' in row[0]:
                    continue

                if 'flöskur m/glasi' in desc or 'kútur' in row[0]:
                    continue

                features = self.parse(desc.split(), row[0])
                features['alcohol'] = alc
                collection[row[0]] = features

        average = average / (i + 1)

        with open('beers.avsc', 'r') as fd:
            schema = avro.schema.Parse(fd.read())

        with open('beers.avro', 'wb') as fd:
            writer = DataFileWriter(fd, DatumWriter(), schema)

            denominator_alc = maximum - minimum

            for k, v in collection.items():
                v['bitterness'] = self.BITTERNESS['class'][
                    v['bitterness']] / self.BITTERNESS['maximum']
                v['color'] = self.COLOR['class'][
                    v['color']] / self.COLOR['maximum']
                v['clarity'] = self.CLARITY['class'][
                    v['clarity']] / self.CLARITY['maximum']
                v['sweetness'] = self.SWEETNESS['class'][
                    v['sweetness']] / self.CLARITY['maximum']
                v['alcohol'] = (v['alcohol'] - minimum) / denominator_alc
                v['name'] = k
                writer.append(v)
            writer.close()
예제 #13
0
class Avro_Merger(object):
    _merge_started = False
    _avro_extention = '.avro'
    _avro_stats_record = None


    def __init__(self, path, new_filename):
        try:
            self._avro_files = filter(lambda x: x.endswith(self._avro_extention), iter(os.listdir(path)))
            schema = avro.schema.parse(open(schema_file).read())
            self._writter = DataFileWriter(open(output_file, 'w'), DatumWriter(), schema, 'deflate')
        except Exception as e:
            raise avro.schema.AvroException(e)
            sys.exit(1)


    def flog_metadata_handler(func):
        """ This is a decorator that handles avro meta data as well as very last stats record 
            in each file during merging
        """    
        def wrapper(self, avro_records):
            """ Wrapper method for consuming flog avro file
            """
            # Handle meta data
            if self._writter.tell() != 0:  # TODO, need to fix this
                next(avro_records)

            # Handle stats line
            self._avro_stats_record = deque(avro_records, maxlen=1).pop()

            func(avro_records)

        return wrapper
        

    @flog_metadata_handler
    def consume_avro(self, avro_records):
        """ Write the avro data from the butter to file
        """
        map(self._writter.append, iter(self._avro_record))

    
    def merge(self):
        """ Loop through the avros and merge each file
        """
        for file_ in self._avro_files:
            try:
                avro_records = DataFileReader(open(os.path.join(input_dir, file_), "r"), DatumReader())
            except Exception as e:
                raise avro.schema.AvroException(e)

            # Consume the records!
            self.consume_avro(avro_records)

        # Write stats data to the last of the file
        self._writter.append(self._avro_stats_record)
        self._writter.close()
예제 #14
0
    def check_schema(self, data, schema_path):
        schema = avro.schema.Parse(
            open(schema_path, "rb").read().decode("utf-8"))

        writer = DataFileWriter(open('_test.avro', "wb"), DatumWriter(),
                                schema)

        writer.append(data)
        writer.close()
예제 #15
0
def _create_avro_file(schema, items, file_prefix):
    _, result_file_path = tempfile.mkstemp(prefix=file_prefix, suffix='.avro')
    parsed_schema = avro.schema.Parse(schema)
    with open(result_file_path, 'wb') as f:
        writer = DataFileWriter(f, DatumWriter(), parsed_schema)
        for s in items:
            writer.append(s)
        writer.close()
    return result_file_path
예제 #16
0
class AvroFileWriter(Writer):
    def __init__(self, schemaFile, avroFile):
        self.schema = avro.schema.Parse(open(schemaFile, "rb").read())
        self.writer = DataFileWriter(open(avroFile, "wb"), DatumWriter(), self.schema)
    def write(self, obj):
        self.writer.append(obj);

    def close(self):
        self.writer.close()
예제 #17
0
파일: example1.py 프로젝트: 3rwww1/Hadoop
def testWrite(filename):
    schema_object = avro.schema.parse(TEST_SCHEMA)

    fd = open(filename, 'wb')
    datum_writer = DatumWriter()
    fwriter = DataFileWriter(fd, datum_writer, schema_object)
    for i in xrange(10):
        fwriter.append(_makeTestPerson(i))
    fwriter.close()
예제 #18
0
파일: gen-event2.py 프로젝트: EricDoug/tomb
def gen_single_day_data(date, schema):
    writer = DataFileWriter(open("events2-{}.avro".format(date), "w"), DatumWriter(), schema)
    N = 10 ** 5
    for i in xrange(0, N):
        tags = ["t{}".format(random.randint(1, 10)) for x in range(0, 4)]
        (tag1, tag2, tag3, tag4) = tags
        cookie = 'CK.{}'.format(random.randint(1, 10 ** 5))
        writer.append({"tag1":tag1, "tag2":tag2, "tag3": tag3, "tag4":tag4, "date":date, "cookie":cookie, "count": 1})
    writer.close()
예제 #19
0
def create_archive(basedir, destdir):
    all_files = []
    all_dirs = []

    # make a snapshot in case the output directory is the bundle source - so we don't recursively bundle the output
    for path, dirs, files in os.walk(basedir):
        for d in dirs:
            dir = os.path.join(path, d)
            all_dirs.append(dir)
        for f in files:
            file = os.path.join(path, f)
            all_files.append(file)

    schema = avro.schema.parse(
        open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "avro-schemas.json")).read())
    fileprefix = time.strftime("%Y%m%d-%H%M%S")
    avrofile = fileprefix + "-part-0001.avro"
    iteration = 1

    fd = open(os.path.join(destdir, avrofile), 'wb')
    datum = avro.io.DatumWriter()
    writer = DataFileWriter(fd, datum, schema, codec='deflate')
    try:
        for d in all_dirs:
            val = makedir(os.path.basename(os.path.normpath(d)),
                          os.path.relpath(d, basedir))
            writer.append(val)

        for f in all_files:
            for sibling, numsiblings, chunk in get_file_chunks(f):
                if (fd.tell() + len(chunk)) > maxfilesize * 1.1:
                    fd, writer, iteration = rotate_avro_file(fd,
                                                             writer,
                                                             iteration,
                                                             fileprefix,
                                                             destdir,
                                                             datum,
                                                             schema)
                file = makefile(os.path.basename(os.path.normpath(f)),
                                os.path.relpath(f, basedir),
                                numsiblings,
                                sibling,
                                chunk)
                writer.append(file)
                writer.flush()
                del file

        for f in all_files:
            os.remove(f)

        for d in all_dirs:
            os.rmdir(d)

    finally:
        writer.close()
        fd.close()
예제 #20
0
def objToBin2():
    file = io.BytesIO()
    datum_writer = DatumWriter()
    fwriter = DataFileWriter(file, datum_writer, sc)
    for d in datum:
        fwriter.append(d)
    ab = file.getvalue()
    fwriter.close()

    return ab
예제 #21
0
 def serialize_records(records, coin, avro_output=None):
     if avro_output == None:
         avro_output = str(coin) + ".avro"
     transformer = transform_data()
     schema = transformer.parse_schema()
     #avro_output=str(coin) + ".avro"
     with open(avro_output, 'wb') as out:
         writer = DataFileWriter(out, DatumWriter(), schema)
         for record in records:
             writer.append(record)
예제 #22
0
def write_json_to_avro(schema_uri, output_uri, json_str):

    schema = avro.schema.parse(open(schema_uri).read())
    writer = DataFileWriter(open(output_uri, "w"), DatumWriter(), schema)
    json_list = json.loads(json_str)

    for row in json_list:
        writer.append(row)

    writer.close()
예제 #23
0
파일: parse.py 프로젝트: xsongx/blog-files
def serialize_records(records, outpath="funding.avro"):
    schema = parse_schema()
    # with open(outpath, 'wb') as out:
    out = StringIO()
    writer = DataFileWriter(out, DatumWriter(), schema)
    for record in records:
        record = dict((f, getattr(record, f)) for f in record._fields)
        record['fundedDate'] = record['fundedDate'].strftime('%Y-%m-%dT%H:M:S')
        writer.append(record)
    return out
예제 #24
0
파일: log_reader.py 프로젝트: namesuqi/zeus
def read_log(topic, log):
    schema = avro.schema.parse(open(os.path.abspath(os.path.dirname(__file__)) + "/avro_schema/" + topic + ".avsc").read())
    print "schema:", schema
    writer = DataFileWriter(open(os.path.abspath(os.path.dirname(__file__)) + topic + ".avro", "w"), DatumWriter(), schema)
    for i in range(5):
        writer.append(log)
    writer.close()
    reader = DataFileReader(open(os.path.abspath(os.path.dirname(__file__)) + topic + ".avro", "r"), DatumReader())
    for log in reader:
        print log
예제 #25
0
def serialize_records(records, outpath="funding.avro"):
    schema = parse_schema()
    # with open(outpath, 'wb') as out:
    out = StringIO()
    writer = DataFileWriter(out, DatumWriter(), schema)
    for record in records:
        record = dict((f, getattr(record, f)) for f in record._fields)
        record['fundedDate'] = record['fundedDate'].strftime('%Y-%m-%dT%H:M:S')
        writer.append(record)
    return out
예제 #26
0
def main():
    parser = ArgumentParser(description="Simple AMS example of subscription pull/consume")
    parser.add_argument('--host', type=str, default='messaging-devel.argo.grnet.gr', help='FQDN of AMS Service')
    parser.add_argument('--token', type=str, required=True, help='Given token')
    parser.add_argument('--project', type=str, required=True, help='Project  registered in AMS Service')
    parser.add_argument('--subscription', type=str, required=True, help='Subscription name')
    parser.add_argument('--topic', type=str, required=True, help='Given topic')
    parser.add_argument('--nummsgs', type=int, default=3, help='Number of messages to pull and ack')
    parser.add_argument('--schema', type=str, required=True, help='Avro schema')
    parser.add_argument('--outfile', type=str, required=True, help='Output avro file')
    args = parser.parse_args()

    # initialize service with given token and project
    ams = ArgoMessagingService(endpoint=args.host, token=args.token, project=args.project)

    # ensure that subscription is created in first run. messages can be
    # pulled from the subscription only when subscription already exists
    # for given topic prior messages being published to topic
    try:
        if not ams.has_sub(args.subscription):
            ams.create_sub(args.subscription, args.topic)
        subscription = ams.get_sub(args.subscription, retobj=True)
    except AmsException as e:
        print(e)
        raise SystemExit(1)

    # try to pull number of messages from subscription. method will
    # return (ackIds, AmsMessage) tuples from which ackIds and messages
    # payload will be extracted.
    avro_payloads = list()
    for msg in subscription.pullack(args.nummsgs, retry=5, retrysleep=15, return_immediately=True):
        data = msg.get_data()
        msgid = msg.get_msgid()
        print('msgid={0}'.format(msgid))
        avro_payloads.append(data)

    try:
        schema = load_schema(args.schema)
        if os.path.exists(args.outfile):
            avroFile = open(args.outfile, 'a+')
            writer = DataFileWriter(avroFile, DatumWriter())
        else:
            avroFile = open(args.outfile, 'w+')
            writer = DataFileWriter(avroFile, DatumWriter(), schema)

        for am in avro_payloads:
            msg = avro_deserialize(am, args.schema)
            writer.append(msg)

        writer.close()
        avroFile.close()

    except Exception as e:
        print(e)
        raise SystemExit(1)
예제 #27
0
def main():

    if len(sys.argv) < 3:
        print "Usage:", sys.argv[0]
        print "add [num of events to add] filename"
        print "list filename"
        exit(1)

    command = sys.argv[1]

    if command == 'add':

        noEvents = sys.argv[2]
        filename = sys.argv[3]

        # load existing events

        existingEvents = {}

        try:
            reader = DataFileReader(open(filename, "rb"), DatumReader())
            existingEvents = reader
            reader.close()
        except IOError:
            print filename + ": Could not open file.  Creating a new one."

        # Write back out to disk

        try:

            schema = avro.schema.parse(open("etc/userevent.avsc").read())

            f = open(filename, "w")
            writer = DataFileWriter(f, DatumWriter(), schema)

            # Append new user events

            for i in range(0, int(noEvents)):
                newEvent = createUserEvent()
                print newEvent
                writer.append(newEvent)

            writer.close()

            print "Wrote {0} user events".format(noEvents)
        except IOError:
            print filename + ": Could not save file."

    elif command == 'list':

        listAllUserEvents(sys.argv[2])

    else:
        print "Unregistered command. Exiting"
        sys.exit(1)
예제 #28
0
    def _write_to_avro(self, log, fields):
        msglist = []
        msg, tags = {}, {}

        msg = {'service': fields['serviceType'],
               'timestamp': fields['timestamp'],
               'hostname': fields['hostName'],
               'metric': fields['metricName'],
               'status': fields['metricStatus']}
        msgattrmap = {'detailsData': 'message',
                      'summaryData': 'summary',
                      'nagios_host': 'monitoring_host'}
        for attr in msgattrmap.keys():
            if attr in fields:
                msg[msgattrmap[attr]] = fields[attr]

        tagattrmap = {'ROC': 'roc', 'voName': 'voName', 'voFqan': 'voFqan'}
        for attr in tagattrmap.keys():
            tags[tagattrmap[attr]] = fields.get(attr, None)
        if tags:
            msg['tags'] = tags

        if ',' in fields['serviceType']:
            servtype = fields['serviceType'].split(',')
            msg['service'] = servtype[0].strip()
            msglist.append(msg)
            copymsg = msg.copy()
            copymsg['service'] = servtype[1].strip()
            msglist.append(copymsg)
        else:
            msglist.append(msg)

        sh.thlock.acquire(True)
        try:
            schema = avro.schema.parse(open(self.avroSchema).read())
            if path.exists(log):
                avroFile = open(log, 'a+')
                writer = DataFileWriter(avroFile, DatumWriter())
            else:
                avroFile = open(log, 'w+')
                writer = DataFileWriter(avroFile, DatumWriter(), schema)

            for m in msglist:
                writer.append(m)

            writer.close()
            avroFile.close()

        except (IOError, OSError) as e:
            sh.Logger.error(e)
            raise SystemExit(1)

        finally:
            sh.thlock.release()
예제 #29
0
def testWrite(filename, schema):
    fd = open(filename, 'wb')

    datum = DatumWriter()
    writer = DataFileWriter(fd, datum, schema)

    writer.append(makeObject("Person A", 23))
    writer.append(makeObject("Person B", 31))
    writer.append(makeObject("Person C", 28))

    writer.close()
예제 #30
0
파일: pyavro.py 프로젝트: yuyiguo/WMArchive
def write(fin, fout, schema):
    "write json to avro"
    schema = avro.schema.parse(open(schema).read())
    data = json.load(open(fin, 'r'))
    writer = DataFileWriter(open(fout, "w"), DatumWriter(), schema)
    if  isinstance(data, list):
        for doc in data:
            writer.append(doc)
    else:
        writer.append(data)
    writer.close()
예제 #31
0
파일: test-file.py 프로젝트: 3rwww1/Hadoop
def testWrite(filename, schema):
    fd = open(filename, 'wb')

    datum = DatumWriter()
    writer = DataFileWriter(fd, datum, schema)

    writer.append(makeObject("Person A", 23))
    writer.append(makeObject("Person B", 31))
    writer.append(makeObject("Person C", 28))

    writer.close()
예제 #32
0
def write(fin, fout, schema):
    "write json to avro"
    schema = avro.schema.parse(open(schema).read())
    data = json.load(open(fin, 'r'))
    writer = DataFileWriter(open(fout, "w"), DatumWriter(), schema)
    if isinstance(data, list):
        for doc in data:
            writer.append(doc)
    else:
        writer.append(data)
    writer.close()
예제 #33
0
	def make_record_set(self, schema_path: str, items: list) -> bytes:
		if schema_path not in self.schemas:
			with open(schema_path, 'rb') as raw:
				self.schemas[schema_path] = avro.schema.Parse(raw.read())
		out = BytesIO()
		writer = DataFileWriter(out, DatumWriter(), self.schemas[schema_path])
		for item in items:
			writer.append(item)
		writer.flush()

		return out.getvalue()
예제 #34
0
    def writer(self, outputs, stdout, stderr=sys.stderr):
        """Overrides base method for hadoop.JobTask
        """
        schema = avro.schema.parse(json.dumps(self.avro_schema()))

        writer = DataFileWriter(stdout, DatumWriter(), schema)
        
        for output in outputs:
            writer.append(output[1])
        #Needn't call close, cause the luigi job will do that.
        writer.flush()
예제 #35
0
def main(schema_fn, csv_fn, avro_fn):

    with open(schema_fn) as f_in:
        schema = avro.schema.parse(f_in.read())

    with open(csv_fn) as f_in:
        reader = csv.reader(f_in, delimiter=';')
        with open(avro_fn, 'wb') as f_out:
            writer = DataFileWriter(f_out, DatumWriter(), schema)
            for row in reader:
                writer.append(dict(zip(FIELDS, row)))
            writer.close()
예제 #36
0
def objToBinTmp2():
    with tempfile.SpooledTemporaryFile(suffix='.avro') as tmp:
        writer = DataFileWriter(tmp, DatumWriter(), sc)
        for d in datum:
            writer.append(d)
        writer.flush()
        tmp.seek(0)
        ab = tmp.read()
        writer.close()
        tmp.close()

    return ab
예제 #37
0
def serialize_records(msgs, schema) -> bytes:
    with io.BytesIO() as buf:
        writer = DataFileWriter(buf, DatumWriter(),
                                avro.schema.parse(json.dumps(schema)))
        for line_item in msgs:
            #print(f"SERRECORD {line_item}")
            writer.append(line_item)

        writer.flush()
        record = buf.getvalue()

        return record
예제 #38
0
def create_archive(basedir, destdir):
    all_files = []
    all_dirs = []

    # make a snapshot in case the output directory is the bundle source - so we don't recursively bundle the output
    for path, dirs, files in os.walk(basedir):
        for d in dirs:
            dir = os.path.join(path, d)
            all_dirs.append(dir)
        for f in files:
            file = os.path.join(path, f)
            all_files.append(file)

    schema = avro.schema.parse(
        open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         "avro-schemas.json")).read())
    fileprefix = time.strftime("%Y%m%d-%H%M%S")
    avrofile = fileprefix + "-part-0001.avro"
    iteration = 1

    fd = open(os.path.join(destdir, avrofile), 'wb')
    datum = avro.io.DatumWriter()
    writer = DataFileWriter(fd, datum, schema, codec='deflate')
    try:
        for d in all_dirs:
            val = makedir(os.path.basename(os.path.normpath(d)),
                          os.path.relpath(d, basedir))
            writer.append(val)

        for f in all_files:
            for sibling, numsiblings, chunk in get_file_chunks(f):
                if (fd.tell() + len(chunk)) > maxfilesize * 1.1:
                    fd, writer, iteration = rotate_avro_file(
                        fd, writer, iteration, fileprefix, destdir, datum,
                        schema)
                file = makefile(os.path.basename(os.path.normpath(f)),
                                os.path.relpath(f, basedir), numsiblings,
                                sibling, chunk)
                writer.append(file)
                writer.flush()
                del file

        for f in all_files:
            os.remove(f)

        for d in all_dirs:
            os.rmdir(d)

    finally:
        writer.close()
        fd.close()
예제 #39
0
def test_datalake_origin_with_avro(sdc_builder, sdc_executor, azure):
    """Ensure that the origin can properly read Avro document."""
    directory_name = get_random_string(string.ascii_letters, 10)
    file_name = get_random_string(string.ascii_letters, 10)
    file = f'{directory_name}/{file_name}.avro'
    data = {'name': 'Arvind P.'}
    total_records = len(data)

    try:
        # Create Avro file (with temporary location)
        with open(f'{TMP}{file_name}', "wb") as data_file:
            writer = DataFileWriter(data_file, DatumWriter(),
                                    avro.schema.Parse(json.dumps(SCHEMA)))

            # Write data using DatumWriter
            writer.append(data)
            writer.close()

        # And upload it to ADSL
        with open(f'{TMP}{file_name}', 'rb') as fp:
            dl_fs = azure.datalake.file_system
            dl_fs.mkdir(directory_name)
            dl_fs.touch(file)
            dl_fs.write(file,
                        fp.read(),
                        content_type='application/octet-stream')

        # Build the origin pipeline
        builder = sdc_builder.get_pipeline_builder()

        origin = builder.add_stage(name=SOURCE_STAGE_NAME)
        origin.set_attributes(data_format='AVRO',
                              files_directory=f'/{directory_name}',
                              file_name_pattern='*')
        wiretap = builder.add_wiretap()
        origin >> wiretap.destination

        pipeline = builder.build().configure_for_environment(azure)
        sdc_executor.add_pipeline(pipeline)

        # start pipeline and read file in ADLS
        sdc_executor.start_pipeline(
            pipeline).wait_for_pipeline_output_records_count(total_records)
        sdc_executor.stop_pipeline(pipeline)

        assert len(wiretap.output_records) == 1
        assert wiretap.output_records[0].field['name'] == 'Arvind P.'
    finally:
        logger.info(
            'Azure Data Lake directory %s and underlying files will be deleted.',
            directory_name)
        dl_fs.rmdir(directory_name, recursive=True)
예제 #40
0
def convert_file_to_avro():
    schema = avro.schema.parse(open(file_name + ".avsc").read())
    data = read_csv_from_hdfs(schema)
    writer = DataFileWriter(open(result_file_path, "wb"),
                            DatumWriter(),
                            schema,
                            codec='deflate')
    for count, row in enumerate(data):
        try:
            writer.append(row)
        except IndexError:
            print("Something is wrong in {0}".format(row))
    writer.close()
예제 #41
0
def main():
    schema = avro.schema.parse(open("user.avsc", "rb").read())

    writer = DataFileWriter(open("users.avro", "wb"), DatumWriter(), schema)
    writer.append({"name": "Alyssa", "favorite_number": 256})
    writer.append({"name": "Ben", "favorite_number": 7, "favorite_color": "red"})
    writer.close()

    reader = DataFileReader(open("users.avro", "rb"), DatumReader())
    for user in reader:
        print(user)

    reader.close()
예제 #42
0
def read_and_write_avro_data():
    avsc_string = """{"namespace": "example.avro",
     "type": "record",
     "name": "User",
     "fields": [
         {"name": "name", "type": "string"},
         {"name": "age",  "type": ["int", "null"]},
         {"name": "sal", "type": ["long", "null"]},
         {"name": "xfloat", "type": ["float", "null"]},
         {"name": "xdouble", "type": ["double", "null"]},
         {"name": "xbytes", "type": ["bytes", "null"]},
         {"name": "xbool", "type": ["boolean", "null"]}
     ]
     }
    """

    # generate a avro schema file
    write_to_file(avro_schema, avsc_string)

    schema = avro.schema.Parse(open(avro_schema).read())

    writer = DataFileWriter(open(avro_data, "wb"), DatumWriter(), schema)
    writer.append({
        "name": "Alyssa",
        "age": 256,
        "sal": 30438940839849384,
        "xfloat": 983494.3434,
        "xdouble": 983498934.3434,
        "xbytes": b"52017-",
        "xbool": True
    })
    writer.append({
        "name": "dd5",
        "age": 6,
        "sal": 8940839849384,
        "xfloat": 983494.3434,
        "xbytes": b"dsd2017-",
        "xbool": True
    })
    writer.close()

    # load avro file
    reader = DataFileReader(open(avro_data, "rb"), DatumReader())
    for user in reader:
        print(user)
    reader.close()

    # cleanup
    os.remove(avro_schema)
    os.remove(avro_data)
예제 #43
0
    def encode(self, event: BaseEvent) -> bytes:
        schema = self._schemas[event.name]

        if schema is None:
            raise NameError(
                f"No schema found to encode event with name {event.name}")

        output = BytesIO()
        writer = DataFileWriter(output, DatumWriter(), schema)
        writer.append(event.data)
        writer.flush()
        encoded_event = output.getvalue()
        writer.close()
        return encoded_event
예제 #44
0
def simpleETL(config, rawJsonData):
    print("**********************Simple ET*************************")
    daysOfForecasts = len(rawJsonData["DailyForecasts"])
    logFolder = config["Log"]["Folder"]
    logFile = logFolder + config["Log"]["LogFile"]
    dWHForecastPath = config["ETL"]["Load"]["AvgData"]["DWHForecastPath"]
    days = []
    try:
        # ET
        for dayNumer in range(daysOfForecasts):
            dayDic = {}  # create an empty dictionary
            d = rawJsonData["DailyForecasts"][dayNumer]
            # print str(dayNumer)+'-----------'
            # read accu weather format
            date = d["Date"]
            minTemp = d["Temperature"]["Minimum"]["Value"]
            maxTemp = d["Temperature"]["Maximum"]["Value"]

            # load desire avro format
            dayDic["temperatureMin_C"] = minTemp
            dayDic["temperatureMax_C"] = maxTemp
            dayDic["date"] = date

            # print(date + " " + str(minTemp) + " " + str(maxTemp))
            days.append(dayDic)
        # L
        schemaFile = config["ETL"]["Load"]["Avro"]["SchemaFile"]
        schemaJson = json.load(open(schemaFile, "r"))
        # pp.pprint(schemaJson)
        dayAvroSchemaString = json.dumps(schemaJson)
        schema = avro.schema.Parse(dayAvroSchemaString)

        # create a writer
        dataAvro = dWHForecastPath+"simpleETL.avro"
        writer = DataFileWriter(open(dataAvro, "wb"),
                                DatumWriter(), schema)

        # append each day
        for day in days:
            # pp.pprint(day)
            writer.append(day)
        # close writer
        writer.close()
        print("**********************Simple Check**********************")
        _readAvro(dataAvro)

    except Exception as ex:
        print(ex)
        with open(logFile, "a") as file:
            file.write("{}\n".format(ex))
예제 #45
0
def avro_dumps(data, schema):
    """dump the given data into an avro file with the provided schema"""
    schema = avro.schema.Parse(schema)
    fp = BytesIO()
    writer = DataFileWriter(fp, DatumWriter(), schema)
    if isinstance(data, list):
        for item in data:
            writer.append(item)
    else:
        writer.append(data)
    writer.flush()
    contents = fp.getvalue()
    fp.close()
    return contents
def produce_kafka_messages(topic, cluster, message, data_format):
    """Send basic messages to Kafka"""
    producer = cluster.kafka.producer()

    basic_data_formats = [
        'XML', 'CSV', 'SYSLOG', 'NETFLOW', 'COLLECTD', 'BINARY', 'LOG',
        'PROTOBUF', 'JSON', 'TEXT'
    ]

    # Write records into Kafka depending on the data_format.
    if data_format in basic_data_formats:
        producer.send(topic, message)

    elif data_format == 'WITH_KEY':
        producer.send(topic,
                      message,
                      key=get_random_string(string.ascii_letters, 10).encode())

    elif data_format == 'AVRO':
        writer = avro.io.DatumWriter(avro.schema.Parse(json.dumps(SCHEMA)))
        bytes_writer = io.BytesIO()
        encoder = avro.io.BinaryEncoder(bytes_writer)
        writer.write(message, encoder)
        raw_bytes = bytes_writer.getvalue()
        producer.send(topic, raw_bytes)

    elif data_format == 'AVRO_WITHOUT_SCHEMA':
        bytes_writer = io.BytesIO()
        datum_writer = avro.io.DatumWriter(
            avro.schema.Parse(json.dumps(SCHEMA)))
        data_file_writer = DataFileWriter(writer=bytes_writer,
                                          datum_writer=datum_writer,
                                          writer_schema=avro.schema.Parse(
                                              json.dumps(SCHEMA)))
        data_file_writer.append(message)
        data_file_writer.flush()
        raw_bytes = bytes_writer.getvalue()
        data_file_writer.close()
        producer.send(topic, raw_bytes)

    logger.info('Flushing producer')
    producer.flush()

    logger.info('Validating that the message can be seen in Kafka')
    consumer = cluster.kafka.consumer(consumer_timeout_ms=5000,
                                      auto_offset_reset='earliest')
    consumer.subscribe([topic])

    msgs_received = [msg for msg in consumer]
    assert 1 == len(msgs_received)
예제 #47
0
def dict_to_avro(data: Dict):
    # TO avro format file

    avro_schema = schema.Parse(open("rate.avsc", "rb").read())

    # write avro file
    writer = DataFileWriter(open("ratings.avro", "wb"), DatumWriter(), avro_schema)
    writer.append(data)
    writer.close()

    # read avro file
    reader = DataFileReader(open("ratings.avro", "rb"), DatumReader())
    for user in reader:
        pretty_print(json.dumps(user))
    reader.close()
예제 #48
0
  def _write_data(self,
                  directory=None,
                  prefix=tempfile.template,
                  codec='null',
                  count=len(RECORDS)):

    with tempfile.NamedTemporaryFile(
        delete=False, dir=directory, prefix=prefix) as f:
      writer = DataFileWriter(f, DatumWriter(), self.SCHEMA, codec=codec)
      len_records = len(self.RECORDS)
      for i in range(count):
        writer.append(self.RECORDS[i % len_records])
      writer.close()

      self._temp_files.append(f.name)
      return f.name
예제 #49
0
def generate_service_config(args, identifier, name, version, description, parameter, service_dir, service_schema, config_name, input_ports, output_ports):
    #Converting parameter tuples to param array
    params = []
    if parameter is not None:
        for p in parameter:
            param = {}
            param["key"] = p[0]
            param["name"] = p[1]
            param["parameterType"] = int(p[2])
            params.append(param)

    #Avro
    schema = avro.schema.parse(open(service_schema.name).read())
    writer = DataFileWriter(open(os.path.join(service_dir, config_name), "wb"), DatumWriter(), schema)
    writer.append({"id": identifier, "name": name, "version": version, "description": description, "inputPorts": input_ports, "outputPorts": output_ports, "params":params})
    writer.close()
def handle_avro_print_to_file(message):

    schema = avro.schema.Parse(open("schema/addressbook.avsc", "rb").read())

    message_buf = io.BytesIO(message)
    reader = avro.datafile.DataFileReader(message_buf, avro.io.DatumReader())

    dataFile = open("schema/addressbook.avro", "wb")

    writer = DataFileWriter(dataFile, DatumWriter(), schema)

    for thing in reader:
        writer.append(thing)
    reader.close()

    writer.close()
class AvroAppender(threading.Thread):
    def __init__(self, file):
        threading.Thread.__init__(self)
        self.avro_writer = DataFileWriter(open(file, "w"), DatumWriter(), schema)
        self.queue = Queue.Queue()
        self.should_stop = False
        self.mutex = threading.Lock()
        self.start()


    def log_append(self, user, advertiser, **kwargs):
        if user is not None and advertiser is not None:
            record = dict(user=user, advertiser=advertiser)
            if kwargs["ip"]:
                record["ip"] = kwargs["ip"]
            if kwargs["agent"]:
                record["agent"] = kwargs["agent"]
            if kwargs["time"]:
                record["timestamp"] = float(kwargs["time"])
            else:
                record["timestamp"] = float(time.time())
            if kwargs["keywords"]:
                record["keywords"] = list(set([string.strip() for string in kwargs["keywords"].split(",")]))
            self.queue.put_nowait(record)
        else:
            print "Missing user"


    def close_appender(self):
        self.mutex.acquire()
        self.should_stop = True
        self.mutex.release()

    def run(self):
        while True:
            try:
                record = self.queue.get(False, 1000)
                self.avro_writer.append(record)
            except Queue.Empty:
                self.mutex.acquire()
                stop = self.should_stop
                self.mutex.release()
                if stop:
                    break
        self.avro_writer.close()
예제 #52
0
    def write(self):
        try:
            schema = avro.schema.parse(open(self.schema).read())
            avrofile = open(self.outfile, 'w+')
            datawrite = DataFileWriter(avrofile, DatumWriter(), schema)

            for elem in self.listdata:
                datawrite.append(elem)

            datawrite.close()
            avrofile.close()

        except (avro.schema.SchemaParseException, avro.io.AvroTypeException):
            self.logger.error(" couldn't parse %s" % self.schema)
            raise SystemExit(1)
        except IOError as e:
            self.logger.error(e)
            raise SystemExit(1)
예제 #53
0
파일: avro_test.py 프로젝트: ivernaloo/avro
def main():
  """Start of execution"""
  #combine the schemas 
  known_schemas = avro.schema.Names()
  types_schema = LoadAvsc("parameter_types.avsc", known_schemas)
  param_schema = LoadAvsc("parameter.avsc", known_schemas)
  print json.dumps(param_schema.to_json(avro.schema.Names()), indent=2) 
  #test the schema works 
  param_file = open("parameters.avro", "w")
  writer = DataFileWriter(param_file, DatumWriter(), param_schema)
  param_1 = {"name": "test", "description":"An Avro test.", "type":"int"}
  param_2 = {"name": "test", "description":"An Avro test.", "type":"boolean"}
  writer.append(param_1)
  writer.append(param_2)
  writer.close()
  reader = DataFileReader(open("parameters.avro", "r"), DatumReader())
  for parameter in reader:
      print parameter
  reader.close()  
def readAndWriteAvro():
    """ Unlike java, avro does not let you generate
        code for Tweet in python. So only way to read and write
        data is without using code generation"""

    #Read the schema
    schema = avro.schema.parse(open("tweet.avsc").read())


    #write some data
    writer = DataFileWriter(open("tweets.avro", "w"), DatumWriter(), schema)
    writer.append({"tweetId": 5, "user": "******", "text" : "Tweeting from python as well"})
    writer.close()

    #read the same data
    tweets = DataFileReader(open("tweets.avro", "r"), DatumReader())
    for tweet in tweets:
        print tweet
    tweets.close()
def main(argv):
    try:
        schema_fn = argv[1]
        n_users = int(argv[2])
        avro_fn = argv[3]
    except IndexError:
        sys.exit('Usage: %s SCHEMA_FILE N_USERS AVRO_FILE' % argv[0])
    with open(schema_fn) as f_in:
        schema = avro.schema.parse(f_in.read())
    with open(avro_fn, 'wb') as f_out:
        writer = DataFileWriter(f_out, DatumWriter(), schema)
        for i in xrange(n_users):
            writer.append({
                'name': random.choice(NAME_POOL),
                'office': random.choice(OFFICE_POOL),
                'favorite_color': random.choice(COLOR_POOL),
                'favorite_number': i,
            })
        writer.close()
예제 #56
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-s', nargs=1, help='new schema', required=True, metavar='avro schema')
    parser.add_argument('-i', nargs='+', help='avro files', required=True, metavar='avro file')
    parser.add_argument('-ts', action='store_true', help='convert int tag values to str', required=False)
    parser.add_argument('-o', nargs=1, help='output directory', required=True, metavar='output directory')
    args = parser.parse_args()

    for f in args.i:
        out = []

        if args.o[0].startswith('/'):
            dest = args.o[0]
        else:
            dest = os.path.abspath('.') + '/' + args.o[0]

        try:
            os.makedirs(dest)
        except OSError as e:
            if e.args[0] != errno.EEXIST:
                print os.strerror(e.args[0]), e.args[1], args.o[0]
                raise SystemExit(1)

        schema = avro.schema.parse(open(args.s[0]).read())
        writer = DataFileWriter(open(dest + '/' + os.path.basename(f), 'w'), DatumWriter(), schema)
        reader = DataFileReader(open(f, 'r'), DatumReader())

        try:
            for i, entry in enumerate(reader):
                if args.ts:
                    for t in entry['tags']:
                        if isinstance(entry['tags'][t], int):
                            entry['tags'][t] = str(entry['tags'][t])
                writer.append(entry)

            writer.close()

        except UnicodeDecodeError as e:
            pprint.pprint(e)
            print f
예제 #57
0
파일: test.py 프로젝트: OpenGeoscience/nex
def traditional_avro(N):
    from avro.datafile import DataFileReader, DataFileWriter
    from avro.io import DatumWriter



    writer = DataFileWriter(open("traditional_avro_{}_ints.avro".format(N),
                             "w"), DatumWriter(), schema)
    try:
        INTERVAL=1
        import numpy as np
        t_start = time.time()
        t0 = time.time()
        nums = np.random.random_integers(0, 100, (N, 4))
        print("Generated data ({:.2f})".format(time.time() - t0))



        i = 0
        t0 = time.time()
        for item in nums:
            writer.append(dict(zip((col1, col2, col3, col4), item)))

            if (time.time() - t0) > INTERVAL:
                print_status("Completed {0:.2f}% ({1:.2f})".format(
                    (i / float(N)) * 100,
                    time.time() - t_start))

                t0 = time.time()
            i = i + 1
        print("\n")

        print("Finished ({:.2f})".format(time.time() - t_start))
        return (N, time.time() - t_start)

    except Exception, e:
        raise e
예제 #58
0
 def _produce_test_input(self):
     schema = avro.schema.parse("""
     {
       "type":"record",
       "name":"TrackEntity2",
       "namespace":"com.spotify.entity.schema",
       "doc":"Track entity merged from various sources",
       "fields":[
         {
           "name":"map_record",
           "type":{
             "type":"map",
             "values":{
               "type":"record",
               "name":"MapNestedRecordObj",
               "doc":"Nested Record in a map doc",
               "fields":[
                 {
                   "name":"element1",
                   "type":"string",
                   "doc":"element 1 doc"
                 },
                 {
                   "name":"element2",
                   "type":[
                     "null",
                     "string"
                   ],
                   "doc":"element 2 doc"
                 }
               ]
             }
           },
           "doc":"doc for map"
         },
         {
           "name":"additional",
           "type":{
             "type":"map",
             "values":"string"
           },
           "doc":"doc for second map record"
         },
         {
           "name":"track_gid",
           "type":"string",
           "doc":"Track GID in hexadecimal string"
         },
         {
           "name":"track_uri",
           "type":"string",
           "doc":"Track URI in base62 string"
         },
         {
           "name":"Suit",
           "type":{
             "type":"enum",
             "name":"Suit",
             "doc":"enum documentation broz",
             "symbols":[
               "SPADES",
               "HEARTS",
               "DIAMONDS",
               "CLUBS"
             ]
           }
         },
         {
           "name":"FakeRecord",
           "type":{
             "type":"record",
             "name":"FakeRecord",
             "namespace":"com.spotify.data.types.coolType",
             "doc":"My Fake Record doc",
             "fields":[
               {
                 "name":"coolName",
                 "type":"string",
                 "doc":"Cool Name doc"
               }
             ]
           }
         },
         {
           "name":"master_metadata",
           "type":[
             "null",
             {
               "type":"record",
               "name":"MasterMetadata",
               "namespace":"com.spotify.data.types.metadata",
               "doc":"metadoc",
               "fields":[
                 {
                   "name":"track",
                   "type":[
                     "null",
                     {
                       "type":"record",
                       "name":"Track",
                       "doc":"Sqoop import of track",
                       "fields":[
                         {
                           "name":"id",
                           "type":[
                             "null",
                             "int"
                           ],
                           "doc":"id description field",
                           "default":null,
                           "columnName":"id",
                           "sqlType":"4"
                         },
                         {
                           "name":"name",
                           "type":[
                             "null",
                             "string"
                           ],
                           "doc":"name description field",
                           "default":null,
                           "columnName":"name",
                           "sqlType":"12"
                         }
                       ],
                       "tableName":"track"
                     }
                   ],
                   "default":null
                 }
               ]
             }
           ]
         },
         {
           "name":"children",
           "type":{
             "type":"array",
             "items":{
               "type":"record",
               "name":"Child",
               "doc":"array of children documentation",
               "fields":[
                 {
                   "name":"name",
                   "type":"string",
                   "doc":"my specific child\'s doc"
                 }
               ]
             }
           }
         }
       ]
     }""")
     self.addCleanup(os.remove, "tmp.avro")
     writer = DataFileWriter(open("tmp.avro", "wb"), DatumWriter(), schema)
     writer.append({
         u'track_gid': u'Cool guid',
         u'map_record': {
             u'Cool key': {
                 u'element1': u'element 1 data',
                 u'element2': u'element 2 data'
             }
         },
         u'additional': {
             u'key1': u'value1'
         }, u'master_metadata': {
             u'track': {
                 u'id': 1,
                 u'name': u'Cool Track Name'
             }
         }, u'track_uri': u'Totally a url here',
         u'FakeRecord': {
             u'coolName': u'Cool Fake Record Name'
         },
         u'Suit': u'DIAMONDS',
         u'children': [
             {
                 u'name': u'Bob'
             },
             {
                 u'name': u'Joe'
             }
         ]
     })
     writer.close()
     self.gcs_client.put("tmp.avro", self.gcs_dir_url + "/tmp.avro")
예제 #59
0
def writeFile():
    writer = DataFileWriter(open("part-00000.avro", "w"), DatumWriter(), schema)
    writer.append({"logline": "2016\t30"})
    writer.close()