示例#1
0
def save_video(config, task, out, clip):
    # write to user defined local file
    if out.get('file'):
        clip.write_videofile(
            out['file'], codec='libx264', audio_codec='aac',
            logger=None)  # logger needed or fmmpeg writes to stderr

    # for storage, write to temporary file ( no alternative with moviepy ), then upload
    if out.get('storage', {}).get('file'):
        temporary_file_name = '/tmp/%s_%s' % (uuid.uuid1(),
                                              out['storage']['file'])
        clip.write_videofile(
            temporary_file_name,
            codec='libx264',
            audio_codec='aac',
            logger=None)  # logger needed or fmmpeg writes to stderr
        with open(temporary_file_name, 'rb') as temporary_file:
            object_put(config,
                       task['auth'],
                       '%s:%s' %
                       (out['storage']['bucket'], out['storage']['file']),
                       temporary_file,
                       mimetype=mimetypes.guess_type(out['storage']['file'],
                                                     strict=False)[0])
        os.remove(temporary_file_name)

    if out.get('dcm'):
        print('DCM not implemented yet.')

    # for youtube, write to temporary file ( no alternative with moviepy ), then upload
    if out.get('youtube', {}).get('title'):
        temporary_file_name = '/tmp/%s_%s' % (uuid.uuid1(),
                                              out['storage']['file'])
        clip.write_videofile(
            temporary_file_name,
            codec='libx264',
            audio_codec='aac',
            logger=None)  # logger needed or fmmpeg writes to stderr

        body = {
            'snippet': {
                'title': out['youtube']['title'],
                'description': out['youtube']['description'],
                'tags': out['youtube']['tags'],
                'categoryId': out['youtube']['category']
            },
            'status': {
                'privacyStatus': out['youtube']['privacy']
            }
        }
        try:
            API_YouTube(config, task['auth']).videos().insert(
                part=','.join(body.keys()),
                body=body,
                media_body=MediaFileUpload(temporary_file_name,
                                           chunksize=CHUNKSIZE,
                                           resumable=True)).upload()
        finally:
            os.remove(temporary_file_name)
示例#2
0
def bigquery():

    if 'run' in project.task and 'query' in project.task.get('run', {}):
        if project.verbose: print("QUERY", project.task['run']['query'])
        run_query(
            project.task['auth'],
            project.id,
            project.task['run']['query'],
            project.task['run'].get('legacy', True),
            #project.task['run'].get('billing_project_id', None)
        )

    elif 'values' in project.task['from']:
        rows = get_rows(project.task['auth'], project.task['from'])

        rows_to_table(project.task['to'].get('auth', project.task['auth']),
                      project.id, project.task['to']['dataset'],
                      project.task['to']['table'], rows,
                      project.task.get('schema', []), 0)

    elif 'query' in project.task['from']:
        if 'table' in project.task['to']:
            if project.verbose:
                print("QUERY TO TABLE", project.task['to']['table'])

            if 'pre_process_query' in project.task['to']:
                print('executing statement')
                execute_statement(project.task['auth'],
                                  project.id,
                                  project.task['to']['dataset'],
                                  project.task['to']['pre_process_query'],
                                  use_legacy_sql=project.task['from'].get(
                                      'legacy', project.task['from'].get(
                                          'useLegacySql', True)))
            query_to_table(
                project.task['auth'],
                project.id,
                project.task['to']['dataset'],
                project.task['to']['table'],
                query_parameters(project.task['from']['query'],
                                 project.task['from'].get('parameters')),
                disposition=project.task['write_disposition']
                if 'write_disposition' in project.task else 'WRITE_TRUNCATE',
                legacy=project.task['from'].get(
                    'legacy', project.task['from'].get(
                        'useLegacySql', True)),  # DEPRECATED: useLegacySql,
                target_project_id=project.task['to'].get(
                    'project_id', project.id))
        # NOT USED SO RIPPING IT OUT
        # Mauriciod: Yes, it is used, look at project/mauriciod/target_winrate.json
        elif 'storage' in project.task['to']:
            if project.verbose:
                print("QUERY TO STORAGE", project.task['to']['storage'])
            local_file_name = '/tmp/%s' % str(uuid.uuid1())
            rows = query_to_rows(project.task['auth'], project.id,
                                 project.task['from']['dataset'],
                                 project.task['from']['query'])

            f = open(local_file_name, 'wb')
            writer = csv.writer(f)
            writer.writerows(rows)
            f.close()

            f = open(local_file_name, 'rb')
            object_put(project.task['auth'], project.task['to']['storage'], f)
            f.close()

            os.remove(local_file_name)
        elif 'sheet' in project.task['to']:
            if project.verbose:
                print("QUERY TO SHEET", project.task['to']['sheet'])
            rows = query_to_rows(project.task['auth'],
                                 project.id,
                                 project.task['from']['dataset'],
                                 project.task['from']['query'],
                                 legacy=project.task['from'].get(
                                     'legacy', True))

            # makes sure types are correct in sheet
            rows = rows_to_type(rows)

            sheets_clear(project.task['auth'], project.task['to']['sheet'],
                         project.task['to']['tab'],
                         project.task['to'].get('range', 'A2'))
            sheets_write(project.task['auth'], project.task['to']['sheet'],
                         project.task['to']['tab'],
                         project.task['to'].get('range', 'A2'), rows)
        elif 'sftp' in project.task['to']:
            rows = query_to_rows(project.task['auth'],
                                 project.id,
                                 project.task['from']['dataset'],
                                 project.task['from']['query'],
                                 legacy=project.task['from'].get(
                                     'use_legacy_sql', True))

            if rows:
                if project.verbose: print("QUERY TO SFTP")
                put_rows(project.task['auth'], project.task['to'], rows)
        else:
            if project.verbose:
                print("QUERY TO VIEW", project.task['to']['view'])
            query_to_view(
                project.task['auth'],
                project.id,
                project.task['to']['dataset'],
                project.task['to']['view'],
                query_parameters(project.task['from']['query'],
                                 project.task['from'].get('parameters')),
                project.task['from'].get(
                    'legacy', project.task['from'].get(
                        'useLegacySql', True)),  # DEPRECATED: useLegacySql
                project.task['to'].get('replace', False))
    else:
        if project.verbose:
            print("STORAGE TO TABLE", project.task['to']['table'])
        storage_to_table(
            project.task['auth'], project.id, project.task['to']['dataset'],
            project.task['to']['table'], project.task['from']['bucket'] + ':' +
            project.task['from']['path'], project.task.get('schema', []),
            project.task.get('skip_rows', 1),
            project.task.get('structure', 'CSV'),
            project.task.get('disposition', 'WRITE_TRUNCATE'))
示例#3
0
def put_rows(auth, destination, rows, variant=''):
    """Processes standard write JSON block for dynamic export of data.

  Allows us to quickly write the results of a script to a destination.  For
  example
  write the results of a DCM report into BigQuery.

  - Will write to multiple destinations if specified.
  - Extensible, add a handler to define a new destination ( be kind update the
  documentation json ).

  Include the following JSON in a recipe, then in the run.py handler when
  encountering that block pass it to this function and use the returned results.

    from utils.data import put_rows

    var_json = {
      "out":{
        "bigquery":{
          "dataset": [ string ],
          "table": [ string ]
          "schema": [ json - standard bigquery schema json ],
          "skip_rows": [ integer - for removing header ]
          "disposition": [ string - same as BigQuery documentation ]
        },
        "sheets":{
          "sheet":[ string - full URL, suggest using share link ],
          "tab":[ string ],
          "range":[ string - A1:A notation ]
          "delete": [ boolean - if sheet range should be cleared before writing
          ]
        },
        "storage":{
          "bucket": [ string ],
          "path": [ string ]
        },
        "file":[ string - full path to place to write file ]
      }
    }

    values = put_rows('user', var_json)

  Or you can use it directly with project singleton.

    from util.project import project
    from utils.data import put_rows

    @project.from_parameters
    def something():
      values = get_rows(project.task['auth'], project.task['out'])

    if __name__ == "__main__":
      something()

  Args:
    auth: (string) The type of authentication to use, user or service.
    destination: (json) A json block resembling var_json described above. rows (
      list ) The data being written as a list object. variant (string) Appended
      to destination to differentieate multiple objects

  Returns:
    If single_cell is False: Returns a list of row values [[v1], [v2], ... ]
    If single_cell is True: Returns a list of values [v1, v2, ...]
"""

    if 'bigquery' in destination:

        if destination['bigquery'].get('format', 'CSV') == 'JSON':
            json_to_table(
                destination['bigquery'].get('auth', auth),
                destination['bigquery'].get('project_id', project.id),
                destination['bigquery']['dataset'],
                destination['bigquery']['table'] + variant,
                rows,
                destination['bigquery'].get('schema', []),
                destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'),
            )

        elif destination['bigquery'].get('is_incremental_load', False) == True:
            incremental_rows_to_table(
                destination['bigquery'].get('auth', auth),
                destination['bigquery'].get('project_id', project.id),
                destination['bigquery']['dataset'],
                destination['bigquery']['table'] + variant,
                rows,
                destination['bigquery'].get('schema', []),
                destination['bigquery'].get(
                    'skip_rows',
                    1),  #0 if 'schema' in destination['bigquery'] else 1),
                destination['bigquery'].get('disposition', 'WRITE_APPEND'),
                billing_project_id=project.id)

        else:
            rows_to_table(
                destination['bigquery'].get('auth', auth),
                destination['bigquery'].get('project_id', project.id),
                destination['bigquery']['dataset'],
                destination['bigquery']['table'] + variant,
                rows,
                destination['bigquery'].get('schema', []),
                destination['bigquery'].get(
                    'skip_rows',
                    1),  #0 if 'schema' in destination['bigquery'] else 1),
                destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'),
            )

    if 'sheets' in destination:
        if destination['sheets'].get('delete', False):
            sheets_clear(
                auth,
                destination['sheets']['sheet'],
                destination['sheets']['tab'] + variant,
                destination['sheets']['range'],
            )

        sheets_write(auth, destination['sheets']['sheet'],
                     destination['sheets']['tab'] + variant,
                     destination['sheets']['range'], rows)

    if 'file' in destination:
        path_out, file_ext = destination['file'].rsplit('.', 1)
        file_out = path_out + variant + '.' + file_ext
        if project.verbose:
            print('SAVING', file_out)
        makedirs_safe(parse_path(file_out))
        with open(file_out, 'w') as save_file:
            save_file.write(rows_to_csv(rows).read())

    if 'storage' in destination and destination['storage'].get(
            'bucket') and destination['storage'].get('path'):
        # create the bucket
        bucket_create(auth, project.id, destination['storage']['bucket'])

        # put the file
        file_out = destination['storage']['bucket'] + ':' + destination[
            'storage']['path'] + variant
        if project.verbose:
            print('SAVING', file_out)
        object_put(auth, file_out, rows_to_csv(rows))

    if 'sftp' in destination:
        try:
            cnopts = pysftp.CnOpts()
            cnopts.hostkeys = None

            path_out, file_out = destination['sftp']['file'].rsplit('.', 1)
            file_out = path_out + variant + file_out

            sftp = pysftp.Connection(host=destination['sftp']['host'],
                                     username=destination['sftp']['username'],
                                     password=destination['sftp']['password'],
                                     port=destination['sftp']['port'],
                                     cnopts=cnopts)

            if '/' in file_out:
                dir_out, file_out = file_out.rsplit('/', 1)
                sftp.cwd(dir_out)

            sftp.putfo(rows_to_csv(rows), file_out)

        except e:
            print(str(e))
            traceback.print_exc()
示例#4
0
def put_rows(auth, destination, filename, rows, variant=''):
    """Processes standard write JSON block for dynamic export of data.
  
  Allows us to quickly write the results of a script to a destination.  For example
  write the results of a DCM report into BigQuery.

  - Will write to multiple destinations if specified.
  - Extensible, add a handler to define a new destination ( be kind update the documentation json ).

  Include the following JSON in a recipe, then in the run.py handler when
  encountering that block pass it to this function and use the returned results.
  
    from utils.data import put_rows
  
    var_json = {
      "out":{
        "bigquery":{
          "dataset": [ string ],
          "table": [ string ]
          "schema": [ json - standard bigquery schema json ],
          "skip_rows": [ integer - for removing header ]
          "disposition": [ string - same as BigQuery documentation ]
        },
        "sheets":{
          "url":[ string - full URL, suggest using share link ],
          "tab":[ string ],
          "range":[ string - A1:A notation ]
          "delete": [ boolean - if sheet range should be cleared before writing ]
        },
        "storage":{
          "bucket": [ string ],
          "path": [ string ]
        },
        "directory":[ string - full path to place to write file ]
      } 
    } 
  
    values = put_rows('user', var_json)
  
  Or you can use it directly with project singleton.
  
    from util.project import project
    from utils.data import put_rows
  
    @project.from_parameters
    def something():
      values = get_rows(project.task['auth'], project.task['out'])
  
    if __name__ == "__main__":
      something()
  
  Args:
    auth: (string) The type of authentication to use, user or service.
    destination: (json) A json block resembling var_json described above.
    filename: (string) A unique filename if writing to medium requiring one, Usually gnerated by script.
    rows ( list ) The data being written as a list object.
    variant ( string ) Appends this to the destination name to create a variant ( for example when downloading multiple tabs in a sheet ).

  Returns:
    If single_cell is False: Returns a list of row values [[v1], [v2], ... ]
    If single_cell is True: Returns a list of values [v1, v2, ...]
"""

    if 'bigquery' in destination:

        if destination['bigquery'].get('format', 'CSV') == 'JSON':
            json_to_table(
                destination['bigquery'].get('auth', auth),
                destination['bigquery'].get('project_id', project.id),
                destination['bigquery']['dataset'],
                destination['bigquery']['table'] + variant,
                rows,
                destination['bigquery'].get('schema', []),
                destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'),
            )

        elif destination['bigquery'].get('is_incremental_load', False) == True:
            incremental_rows_to_table(
                destination['bigquery'].get('auth', auth),
                destination['bigquery'].get('project_id', project.id),
                destination['bigquery']['dataset'],
                destination['bigquery']['table'] + variant,
                rows,
                destination['bigquery'].get('schema', []),
                destination['bigquery'].get(
                    'skip_rows',
                    1),  #0 if 'schema' in destination['bigquery'] else 1),
                destination['bigquery'].get('disposition', 'WRITE_APPEND'),
                billing_project_id=project.id)

        else:
            rows_to_table(
                destination['bigquery'].get('auth', auth),
                destination['bigquery'].get('project_id', project.id),
                destination['bigquery']['dataset'],
                destination['bigquery']['table'] + variant,
                rows,
                destination['bigquery'].get('schema', []),
                destination['bigquery'].get(
                    'skip_rows',
                    1),  #0 if 'schema' in destination['bigquery'] else 1),
                destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'),
            )

    if 'sheets' in destination:
        if destination['sheets'].get('delete', False):
            sheets_clear(auth, destination['sheets']['sheet'],
                         destination['sheets']['tab'] + variant,
                         destination['sheets']['range'])
        sheets_write(auth, destination['sheets']['sheet'],
                     destination['sheets']['tab'] + variant,
                     destination['sheets']['range'], rows)

    if 'directory' in destination:
        file_out = destination['directory'] + variant + filename
        if project.verbose: print 'SAVING', file_out
        makedirs_safe(parse_path(file_out))
        with open(file_out, 'wb') as save_file:
            save_file.write(rows_to_csv(rows).read())

    if 'storage' in destination and destination['storage'].get(
            'bucket') and destination['storage'].get('path'):
        # create the bucket
        bucket_create(auth, project.id, destination['storage']['bucket'])

        # put the file
        file_out = destination['storage']['bucket'] + ':' + destination[
            'storage']['path'] + variant + filename
        if project.verbose: print 'SAVING', file_out
        object_put(auth, file_out, rows_to_csv(rows))

    # deprecated do not use
    if 'trix' in destination:
        trix_update(auth, destination['trix']['sheet_id'],
                    destination['trix']['sheet_range'], rows_to_csv(rows),
                    destination['trix']['clear'])

    if 'email' in destination:
        pass

    if 'sftp' in destination:
        try:
            sys.stderr = StringIO()

            cnopts = pysftp.CnOpts()
            cnopts.hostkeys = None

            file_prefix = 'report'
            if 'file_prefix' in destination['sftp']:
                file_prefix = destination['sftp'].get('file_prefix')
                del destination['sftp']['file_prefix']

            #sftp_configs = destination['sftp']
            #sftp_configs['cnopts'] = cnopts
            #sftp = pysftp.Connection(**sftp_configs)

            sftp = pysftp.Connection(host=destination['sftp']['host'],
                                     username=destination['sftp']['username'],
                                     password=destination['sftp']['password'],
                                     port=destination['sftp']['port'],
                                     cnopts=cnopts)

            if 'directory' in destination['sftp']:
                sftp.cwd(destination['sftp']['directory'])

            tmp_file_name = '/tmp/%s_%s.csv' % (
                file_prefix,
                datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S'))

            tmp_file = open(tmp_file_name, 'wb')
            tmp_file.write(rows_to_csv(rows).read())
            tmp_file.close()

            sftp.put(tmp_file_name)

            os.remove(tmp_file_name)

            sys.stderr = sys.__stderr__
        except e:
            print e
            traceback.print_exc()
示例#5
0
def put_rows(config, auth, destination, rows, schema=None, variant=''):
  """Processes standard write JSON block for dynamic export of data.

  Allows us to quickly write the results of a script to a destination.  For
  example
  write the results of a DCM report into BigQuery.

  - Will write to multiple destinations if specified.
  - Extensible, add a handler to define a new destination ( be kind update the
  documentation json ).

  Include the following JSON in a recipe, then in the run.py handler when
  encountering that block pass it to this function and use the returned results.

    from utils.data import put_rows

    var_json = {
      "out":{
        "bigquery":{
          "auth":"[ user or service ]",
          "dataset": [ string ],
          "table": [ string ]
          "schema": [ json - standard bigquery schema json ],
          "header": [ boolean - true if header exists in rows ]
          "disposition": [ string - same as BigQuery documentation ]
        },
        "sheets":{
          "auth":"[ user or service ]",
          "sheet":[ string - full URL, suggest using share link ],
          "tab":[ string ],
          "range":[ string - A1:A notation ]
          "append": [ boolean - if sheet range should be appended to ]
          "delete": [ boolean - if sheet range should be cleared before writing ]
          ]
        },
        "storage":{
          "auth":"[ user or service ]",
          "bucket": [ string ],
          "path": [ string ]
        },
        "file":[ string - full path to place to write file ]
      }
    }

    values = put_rows('user', var_json)

  Args:
    auth: (string) The type of authentication to use, user or service.
    rows: ( iterator ) The list of rows to be written, if NULL no action is performed.
    schema: (json) A bigquery schema definition.
    destination: (json) A json block resembling var_json described above. rows (
      list ) The data being written as a list object. variant (string) Appended
      to destination to differentieate multiple objects

  Returns:
    If unnest is False: Returns a list of row values [[v1], [v2], ... ]
    If unnest is True: Returns a list of values [v1, v2, ...]
"""

  if rows is None:
    if config.verbose:
      print('PUT ROWS: Rows is None, ignoring write.')
    return

  if 'bigquery' in destination:

    if not schema:
      schema = destination['bigquery'].get('schema')

    skip_rows = 1 if destination['bigquery'].get('header') and schema else 0

    if destination['bigquery'].get('format', 'CSV') == 'JSON':
      json_to_table(
          config,
          destination['bigquery'].get('auth', auth),
          destination['bigquery'].get('project_id', config.project),
          destination['bigquery']['dataset'],
          destination['bigquery']['table'] + variant,
          rows,
          schema,
          destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'),
      )

    elif destination['bigquery'].get('is_incremental_load', False) == True:
      incremental_rows_to_table(
          config,
          destination['bigquery'].get('auth', auth),
          destination['bigquery'].get('project_id', config.project),
          destination['bigquery']['dataset'],
          destination['bigquery']['table'] + variant,
          rows,
          schema,
          destination['bigquery'].get('skip_rows', skip_rows),
          destination['bigquery'].get('disposition', 'WRITE_APPEND'),
          billing_project_id=config.project)

    else:

      rows_to_table(
          config,
          destination['bigquery'].get('auth', auth),
          destination['bigquery'].get('project_id', config.project),
          destination['bigquery']['dataset'],
          destination['bigquery']['table'] + variant,
          rows,
          schema,
          destination['bigquery'].get('skip_rows', skip_rows),
          destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'),
      )

  if 'sheets' in destination:
    if destination['sheets'].get('delete', False):
      sheets_clear(
        config,
        destination['sheets'].get('auth', auth),
        destination['sheets']['sheet'],
        destination['sheets']['tab'] + variant,
        destination['sheets']['range'],
      )

    sheets_write(
      config,
      destination['sheets'].get('auth', auth),
      destination['sheets']['sheet'],
      destination['sheets']['tab'] + variant,
      destination['sheets']['range'],
      rows_to_type(rows),
      destination['sheets'].get('append', False),
    )

  if 'file' in destination:
    path_out, file_ext = destination['file'].rsplit('.', 1)
    file_out = path_out + variant + '.' + file_ext
    if config.verbose:
      print('SAVING', file_out)
    makedirs_safe(parse_path(file_out))
    with open(file_out, 'w') as save_file:
      save_file.write(rows_to_csv(rows).read())

  if 'storage' in destination and destination['storage'].get(
      'bucket') and destination['storage'].get('path'):
    bucket_create(
      config,
      destination['storage'].get('auth', auth),
      config.project,
      destination['storage']['bucket']
    )

    # put the file
    file_out = destination['storage']['bucket'] + ':' + destination['storage'][
        'path'] + variant
    if config.verbose:
      print('SAVING', file_out)
    object_put(config, auth, file_out, rows_to_csv(rows))

  if 'sftp' in destination:
    try:
      cnopts = pysftp.CnOpts()
      cnopts.hostkeys = None

      path_out, file_out = destination['sftp']['file'].rsplit('.', 1)
      file_out = path_out + variant + file_out

      sftp = pysftp.Connection(
          host=destination['sftp']['host'],
          username=destination['sftp']['username'],
          password=destination['sftp']['password'],
          port=destination['sftp']['port'],
          cnopts=cnopts)

      if '/' in file_out:
        dir_out, file_out = file_out.rsplit('/', 1)
        sftp.cwd(dir_out)

      sftp.putfo(rows_to_csv(rows), file_out)

    except e:
      print(str(e))
      traceback.print_exc()