Пример #1
0
 def test_media_inmemory_upload_json_roundtrip(self):
     media = MediaInMemoryUpload(os.urandom(64), "text/plain", chunksize=10, resumable=True)
     data = media.to_json()
     newmedia = MediaInMemoryUpload.new_from_json(data)
     self.assertEqual(media._body, newmedia._body)
     self.assertEqual(media._chunksize, newmedia._chunksize)
     self.assertEqual(media._resumable, newmedia._resumable)
     self.assertEqual(media._mimetype, newmedia._mimetype)
Пример #2
0
 def test_media_inmemory_upload_json_roundtrip(self):
     media = MediaInMemoryUpload(os.urandom(64),
                                 'text/plain',
                                 chunksize=10,
                                 resumable=True)
     data = media.to_json()
     newmedia = MediaInMemoryUpload.new_from_json(data)
     self.assertEqual(media._body, newmedia._body)
     self.assertEqual(media._chunksize, newmedia._chunksize)
     self.assertEqual(media._resumable, newmedia._resumable)
     self.assertEqual(media._mimetype, newmedia._mimetype)
Пример #3
0
    def update_file_content(self, file_id, content):
        """Update the file's contents of the given id on google drive."""
        item = self.cache.get(file_id, None)
        if not item or item.metadata is None or item.expired:
            try:
                metadata = self.service.files().get(fileId=file_id).execute()
            except errors.HttpError as e:
                raise OperationFailedError(opname='update_file_content',
                                           msg=e.resp.reason)
            except:
                return self._retry_operation(self.update_file_content,
                                             file_id, content)
            self.cache.set(metadata['id'], metadata)
        else:
            metadata = item.metadata

        media_body = MediaInMemoryUpload(content)
        try:
            updated_file = self.service.files().update(fileId=file_id,
                                                       body=metadata,
                                                       media_body=media_body
                                                       ).execute()
        except errors.HttpError as e:
            raise OperationFailedError(opname='update_file_content',
                                       msg=e.resp.reason)
        except TypeError as e:
            raise ResourceInvalidError("update_file_content %r" % e)
        except:
            return self._retry_operation(self.update_file_content, file_id,
                                         content)
        self.cache.pop(file_id, None)
        self._add_to_cache_from_dict(updated_file, updated_file['parents'])
        return updated_file
Пример #4
0
    def new_block(self, idx, data=None):

        with self.lock:

            if idx >= self.block_count or idx < 0:
                raise ValueError("Index out of bound")
            if self.mapping[idx] is not None:
                raise ValueError("None empty mapping @ {0}".format(idx))
            if data is not None:
                assert len(data) == self.block_size
            else:
                data = "\0" * self.block_size

            body = {
                'title': self.idx_to_name(idx),
                'mimeType': self.BLOCK_MIMETYPE,
                'parents': [{
                    'id': self.data_dir
                }],
            }
            media_body = MediaInMemoryUpload(data,
                                             mimetype=self.BLOCK_MIMETYPE,
                                             resumable=False)

            result = self.drive.files().insert(
                body=body, media_body=media_body).execute()
            self.mapping[idx] = result['id']
            return result
Пример #5
0
    def init_data_dir(self):

        logger.info("Initializing data dir")

        if 'default_block_size' in self.config:
            block_size = int(self.config['default_block_size'])
        else:
            block_size = int(raw_input("Desired block size: "))
        if 'default_total_size' in self.config:
            total_size = int(self.config['default_total_size'])
        else:
            total_size = int(raw_input("Total size: "))
        if total_size < block_size:
            raise ValueError("block_size should not be bigger than total_size.")

        used_size = total_size // block_size * block_size
        if used_size != total_size:
            logger.info("Only using {0} bytes instead of {1}".format(used_size, total_size))

        self.bd_attr = {
            'version': Metadata['version'],
            'block_size': block_size,
            'block_count': used_size // block_size,
        }
        body = {
            'title': 'config',
            'description': 'config file for gbd',
            'mimeType': 'application/json',
            'parents': [{'id': self.data_dir}],
        }
        media_body = MediaInMemoryUpload(json.dumps(self.bd_attr), mimetype='application/json', resumable=False)

        self.drive.files().insert(body=body, media_body=media_body).execute()
Пример #6
0
def upload_file(file_data, file_name, mime='application/octet-stream'):
    try:
        open(token_file)
    except IOError:
        http = authorize(token_file, create_token_file(token_file))
    http = authorize(token_file, None)
    drive_service = build('drive', 'v2', http=http)
    file_data = base64.b64decode(file_data)
    media_body = MediaInMemoryUpload(file_data, mimetype=mime, resumable=True)
    body = {
        'title': file_name,
        'description': 'File from OpenERP',
        'mimeType': mime,
    }
    permissions = {
        'role': 'reader',
        'type': 'anyone',
        'value': None,
        'withLink': True
    }
    # Заливаем файл
    file = drive_service.files().insert(body=body,
                                        media_body=media_body).execute()
    # Устанавливаем ему права
    drive_service.permissions().insert(fileId=file['id'],
                                       body=permissions).execute()
    # Получаем инстанс файла и линк на скачку
    file = drive_service.files().get(fileId=file['id']).execute()
    download_url = file.get('webContentLink')
    return download_url
Пример #7
0
def shatner (request):
  da = DriveAuth(request)
  creds = da.get_session_credentials()
  if creds is None:
    return http.HttpResponseForbidden('Login Again')
    
  task = request.POST.get('task', '')
  if task in ('open', 'save'):
    service = CreateService('drive', 'v2', creds)
    
    if service is None:
      return http.HttpResponseServerError('Something broke')
      
    if task == 'open':
      file_id = request.POST.get('file_id', '')
      if file_id:
        try:
          f = service.files().get(fileId=file_id).execute()
          
        except AccessTokenRefreshError:
          return http.HttpResponseForbidden('Login Again')
          
        downloadUrl = f.get('downloadUrl')
        f['content'] = ''
        if downloadUrl:
          resp, f['content'] = service._http.request(downloadUrl)
          
        return http.HttpResponse(
          json.dumps({'status': 'ok', 'file': f}),
          content_type='application/json'
        )
        
    elif task == 'save':
      mt = 'text/plain'
      name = request.POST.get('name')
      content = request.POST.get('content', '')
      file_id = request.POST.get('file_id', '')
        
      resource = {'title': name, 'mimeType': mt}
      
      file = MediaInMemoryUpload(content.encode('utf-8'), mt)
      try:
        google = service.files().update(
          fileId=file_id,
          newRevision=True,
          body=resource,
          media_body=file,
          useContentAsIndexableText=True,
        ).execute()
        
      except AccessTokenRefreshError:
        return http.HttpResponseForbidden('Login Again')
        
      return http.HttpResponse(
        json.dumps({'status': 'ok'}),
        content_type='application/json'
      )
      
    return http.HttpResponseBadRequest('Bad Request')
    
Пример #8
0
 def createSpreadsheet(self,
                       sheetName,
                       description=None,
                       parentId=None,
                       csv=None,
                       rows=1000,
                       cols=26):
     '''
 sheetName: file name for Google Drive
 description: description for Google Drive (default same as sheetName)
 parentId: create into parent folder (default 'root')
 csv: string (default None: empty sheet)
 rows: int (default 1000)
 cols: int (default 26)
 '''
     body = {
         'title': sheetName,
         'mimeType': 'text/csv',  # to be converted
         'description': description if description else sheetName
     }
     if parentId is None: parentId = 'root'
     body['parents'] = [{'id': parentId}]
     mbody = MediaInMemoryUpload(csv if csv else '\n'.join([',' * cols] *
                                                           rows),
                                 mimetype='text/csv',
                                 chunksize=256 * 1024,
                                 resumable=False)
     req = self.service.files().insert(body=body, media_body=mbody)
     req.uri += '&convert=true'
     fileObj = req.execute()
     if fileObj is None: return (None, None)
     self.sheetId = fileObj['id']
     self.set_activesheet()
     return (self.sheetId, fileObj)
Пример #9
0
 def write_block(self, idx, data):
     assert len(data) == self.gbd.block_size
     blkid = self.gbd.block_id(idx)
     if blkid is None:
         return self.gbd.new_block(idx, data)
     else:
         media_body = MediaInMemoryUpload(data, mimetype=self.gbd.BLOCK_MIMETYPE, resumable=False)
         return self.drive.files().update(fileId=blkid, media_body=media_body).execute()
Пример #10
0
 def test_media_inmemory_upload(self):
   media = MediaInMemoryUpload('abcdef', mimetype='text/plain', chunksize=10,
                               resumable=True)
   self.assertEqual('text/plain', media.mimetype())
   self.assertEqual(10, media.chunksize())
   self.assertTrue(media.resumable())
   self.assertEqual('bc', media.getbytes(1, 2))
   self.assertEqual(6, media.size())
Пример #11
0
 def upload_string(self, account_id, profile_id, string, data_source_id):
     """
     Upload to custom data sources - example function
     https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtReference/management/uploads/uploadData
     """
     analytics = self.get_service_object(name='management')
     media = MediaInMemoryUpload(string, mimetype='application/octet-stream', resumable=False)
     analytics.management().uploads().uploadData(
         accountId=account_id,
         webPropertyId=profile_id,
         customDataSourceId=data_source_id,
         media_body=media).execute()
Пример #12
0
 def test_media_inmemory_upload(self):
     media = MediaInMemoryUpload("abcdef", "text/plain", chunksize=10, resumable=True)
     self.assertEqual("text/plain", media.mimetype())
     self.assertEqual(10, media.chunksize())
     self.assertTrue(media.resumable())
     self.assertEqual("bc", media.getbytes(1, 2))
     self.assertEqual(6, media.size())
Пример #13
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')

    parser = gdrive.get_argparser(description=__doc__.strip())

    parser.add_argument('filename', help="Beancount input file")
    parser.add_argument('docid', help="Document ID")

    parser.add_argument(
        '-o',
        '--output',
        action='store',
        default=datetime.date.today().strftime('beancount.%Y-%m-%d.pdf'),
        help="Where to write out the collated PDF file")

    args = parser.parse_args()

    # Load the file.
    entries, unused_errors, options_map = loader.load_file(
        args.filename, log_timings=logging.info, log_errors=sys.stderr)

    # Generate a report.
    holdings_list = holdings_reports.report_holdings(
        None,
        False,
        entries,
        options_map,
        aggregation_key=lambda holding: holding.currency)

    oss = io.StringIO()
    table.table_to_csv(holdings_list, file=oss)
    csv_contents = oss.getvalue()

    # Connect, with authentication.
    # Check https://developers.google.com/drive/scopes for all available scopes.
    scopes = [
        'https://www.googleapis.com/auth/drive',
    ]
    http = gdrive.get_authenticated_http(" ".join(scopes), args)

    # Access the drive API.
    drive = gdrive.discovery.build('drive', 'v2', http=http)

    # Get the document and replace it.
    metadata = drive.files().get(fileId=args.docid).execute()
    upload = MediaInMemoryUpload(csv_contents.encode('utf-8'),
                                 mimetype=metadata['mimeType'])
    metadata = drive.files().update(fileId=args.docid,
                                    media_body=upload).execute()
    pprint(metadata)
 def test_media_inmemory_upload(self):
   media = MediaInMemoryUpload('abcdef', mimetype='text/plain', chunksize=10,
                               resumable=True)
   self.assertEqual('text/plain', media.mimetype())
   self.assertEqual(10, media.chunksize())
   self.assertTrue(media.resumable())
   self.assertEqual('bc', media.getbytes(1, 2))
   self.assertEqual(6, media.size())
Пример #15
0
 def insert(self, rows):
     '''
     rows は (refId, id, similarity) のリストまたはタプル
     '''
     rowsstr = '\r\n'.join([
         ','.join(row)
         for row in [map(lambda v: str(v), row) for row in rows]
     ])
     media = MediaInMemoryUpload(body=rowsstr.encode('UTF-8'),
                                 mimetype='application/octet-stream',
                                 resumable=True)
     return self.service.table().importRows(tableId=self.table_id,
                                            media_body=media,
                                            encoding='UTF-8').execute()
Пример #16
0
def download_images(urls, tag, photo_info):

    #get service, then create a new folder based on each tag
    service = get_credentials()

    folder_id = create_folder(tag)  # makes sure path exists

    #for loop through urls to create image in Drive
    for url in urls:
        image_name = url.split("/")[-1]
        file_metadata = {'name': image_name, 'parents': [folder_id]}

        #response opens up the data. then image_file stores it
        response = urllib.request.urlopen(url)
        image_file = io.BytesIO(response.read())

        #create Drive media, then upload it
        media = MediaInMemoryUpload(image_file.read(),
                                    mimetype='image/jpeg',
                                    resumable=True)
        file = service.files().create(body=file_metadata,
                                      media_body=media,
                                      fields='id').execute()

    #create CSV based on all images' information with certain tags
    with open('coordinates.csv', 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, quoting=csv.QUOTE_NONE)

        #write in field names, then add info from data into the fields
        writer.writerow([
            'Latitude', 'Longitude', "PhotoID", "Description", "Title",
            "Taken", "Real Name", "User Name"
        ])

        for field in photo_info:
            writer.writerow([
                field[0], field[1], field[2], field[3], field[4], field[5],
                field[6], field[7]
            ])

    csv_file_metadata = {'name': tag, 'parents': [folder_id]}

    #create mdeia file for csv and upload it
    media = MediaFileUpload('coordinates.csv',
                            mimetype='text/csv',
                            resumable=True)
    file = service.files().create(body=csv_file_metadata,
                                  media_body=media,
                                  fields='id').execute()
Пример #17
0
def replace_gdocs_document(http, docid, title, contents):
    """Upload new contents for a Google Doc for a plain/text file.

    Args:
      http: An http connection object with drive credentials.
      docid: A string, the ID of the document.
      title: A string, the title of the document.
      contents: A string, the body of the document.
    """
    service = discovery.build('drive', 'v3', http=http)
    media = MediaInMemoryUpload(contents.encode('utf8'),
                                mimetype='text/plain',
                                resumable=True)
    return service.files().update(fileId=docid,
                                  body=dict(name=title),
                                  media_body=media).execute()
Пример #18
0
    def uploadFromMemory(self, buff, title=None, mimetype=None):
        #media_body = MediaFileUpload(file_path_or_url, mimetype='text/plain', resumable=True)
        #media_body = MediaIoBaseUpload(open(file_path_or_url), mimetype='image/plain', resumable=True)
        media_body = MediaInMemoryUpload(buff,
                                         mimetype=mimetype,
                                         resumable=True)
        #im = cStringIO.StringIO(page.read())
        #media_body = MediaIoBaseUpload(im, mimetype=page.headers['Content-Type'], resumable=True)
        body = {'title': title, 'mimeType': mimetype}

        rv = None
        try:
            rv = self.getDriveService().files().insert(
                body=body, media_body=media_body).execute()
        except errors.HttpError, error:
            pass
Пример #19
0
    def put(self, path, content):
        """Upload the file

    Args:
      path: string
      content: string, size <= 5MB

    Returns:
      success: boolean

    Raises:
      apiclient.errors.HttpError if the response was not a 2xx.
      httplib2.HttpLib2Error if a transport error has occured.
    """
        uri = GoogleAPI.UPLOAD_URL + '/files?uploadType=multipart&alt=json'
        path = util.format_path(path)
        name = os.path.basename(path)
        folder = os.path.dirname(path)

        parent_id = None
        beg = time.time()
        if folder != '/':
            parent = self._path_to_metadata(folder, isfolder=True)
            if not parent:
                # if the parent folder doesn't exist, then create one
                self.putdir(folder)
                parent = self._path_to_metadata(folder, isfolder=True)
            parent_id = parent['id']
        end = time.time()
        dbg.google_time("get parent: %s" % (end - beg))
        beg = time.time()
        media_body = MediaInMemoryUpload(content)
        body = {'title': name, 'mimeType': 'application/octet-stream'}
        if parent_id:
            # if not at root folder
            body['parents'] = [{'id': parent_id}]

        try:
            drive_file = self.service.files().insert(
                body=body, media_body=media_body).execute()
            self._cache_metadata(path, drive_file)
            end = time.time()
            dbg.google_time("insert finish %s" % (end - beg))
            return True
        except errors.HttpError, error:
            print 'An error occurred: %s' % error
            return False
Пример #20
0
 def put_file(self, parent_id, title, content, description=None):
     """Add a file to folder of the given id."""
     media_body = MediaInMemoryUpload(content)
     body = {
         'title': title,
         'description': description,
         'parents': [{'id': parent_id}]
     }
     try:
         metadata = self.service.files().insert(body=body,
                                                media_body=media_body
                                                ).execute()
     except errors.HttpError as e:
         raise OperationFailedError(opname='put_file', msg=e.resp.reason)
     except TypeError as e:
         raise ResourceInvalidError("put_file")
     except:
         return self._retry_operation(self.put_file, parent_id, title,
                                      content, description)
     self.cache.set(metadata['id'], metadata, parents=[parent_id])
     return metadata
    def _subir_archivos(cls, service, archivos=[]):
        parent = cls._get_parent(service)

        faltantes = cls._filtrar_existentes(service, parent, archivos)
        logging.debug(f'faltan {len(faltantes)} archivos por subir')

        res = []
        for datos in faltantes:
            path = datos['path']
            archivo = datos['archivo']

            meta = {'name': path, 'parents': [parent]}
            logging.debug(f"subiendo archivo : {path}")
            """
            este codigo es para subirlo usando el disco como intermediario

            path_ = obtener_path_completo_local(datos['path'])
            with open(path_, 'wb') as f:
                contenido = datos['archivo'].contenido
                contenido_binario = base64.b64encode(contenido).decode('utf8')
                f.write(contenido_binario)

            media = MediaFileUpload(path_,
                        mimetype=datos['archivo'].tipo,
                        resumable=True)
            """

            contenido = archivo.contenido
            contenido_binario = base64.b64decode(contenido.encode())
            media = MediaInMemoryUpload(contenido_binario,
                                        mimetype=archivo.tipo,
                                        resumable=True)
            r = service.files().create(body=meta, media_body=media).execute()
            logging.debug(f"respuesta : {r}")
            res.append(r)
        return res
Пример #22
0
    def commit(self):
        '''
        キューのデータをDBに書き込む
        return: 件数
        '''
        print '[debug] QUEUE COUNT: %d' % (len(self.queue))
        if len(self.queue) == 0:
            return 0
        rows = ''
        # RssId, WordList, NumberList, NewsDate, pubDate
        for q in self.queue:
            row = u'%s,"%s","%s","%s","%s"' % (q['rss_id'], ' '.join(
                q['news'].noun()), ' '.join(q['news'].meta().values()),
                                               q['newsDatetime'], q['pubDate'])
            rows += row + '\r\n'

        media = MediaInMemoryUpload(body=rows.encode('UTF-8'),
                                    mimetype='application/octet-stream',
                                    resumable=True)
        response = self.service.table().importRows(tableId=self.table_id,
                                                   media_body=media,
                                                   encoding='UTF-8').execute()
        self.queue = []
        return response[u'numRowsReceived']
Пример #23
0
def main(argv):
    # Declare command-line flags.
    # addHelp=False here because it's added downstream in the sample_init
    argparser = argparse.ArgumentParser(add_help=False)

    argparser.add_argument(
        '-m',
        '--mailbox',
        help='The mailbox file containing the messages to be migrated.',
        required=True)
    argparser.add_argument(
        '-g',
        '--group',
        help='The email address of the group to receive the migrated messages.',
        required=True)
    argparser.add_argument(
        '-f',
        '--failed',
        help=
        'The mailbox file containing the messages that failed to be migrated.',
        required=False)
    argparser.add_argument('-v',
                           '--verbose',
                           help='Show progress',
                           action='store_true')
    argparser.add_argument('-a',
                           '--after',
                           help='Only import after date',
                           required=False)
    argparser.add_argument('-b',
                           '--before',
                           help='Only import before date',
                           required=False)
    argparser.add_argument('-n',
                           '--dryrun',
                           help='Dry-run',
                           action='store_true')
    argparser.add_argument('-r',
                           '--resume',
                           help='resume with message #',
                           required=False)
    argparser.add_argument('-l',
                           '--label',
                           help='import matching label (X-Gmail-Labels)',
                           required=False)

    # Authenticate and construct service
    scope = ("https://www.googleapis.com/auth/apps.groups.migration")

    service, flags = sample_tools.init(argv,
                                       'groupsmigration',
                                       'v1',
                                       __doc__,
                                       __file__,
                                       parents=[argparser],
                                       scope=scope)

    if (flags.after):
        flags.after = pytz.timezone('US/Pacific').localize(
            dateutil.parser.parse(flags.after))
        logging.info("only migrating messages after date: %s" % flags.after)
    if (flags.before):
        flags.before = pytz.timezone('US/Pacific').localize(
            dateutil.parser.parse(flags.before))
        logging.info("only migrating messages before date: %s" % flags.before)
    if (flags.resume):
        flags.resume = int(flags.resume)
        logging.info("resuming with id# %s" % flags.resume)
    if (flags.label):
        logging.info("only importing messags with label \"%s\"" % flags.label)

    mbox = mailbox.mbox(flags.mailbox, create=False)
    i, mboxLen = 0, len(mbox)
    print "mailbox size: %d messages" % mboxLen
    # create a new mbox with failed messages, to make reruns shorter
    if flags.failed:
        failbox = mailbox.mbox(flags.failed, create=True)
        failbox.lock()
    try:
        for message in mbox:
            i += 1
            message.x_date = None
            message.x_labels = []
            try:
                message.x_date = dateutil.parser.parse(message.get('Date'))
                try:
                    message.x_labels = map(
                        lambda s: s.strip(),
                        message.get('X-Gmail-Labels').split(','))
                    logging.debug("labels:  %s" % message.x_labels)
                    if flags.label and len(message.x_labels) > 0 and (
                            flags.label not in message.x_labels):
                        logging.debug(
                            "skipping: \"%s\" does not contain label \"%s\"" %
                            (message.x_labels, flags.label))
                        continue
                except:
                    pass
                if (flags.after and message.x_date
                        and (message.x_date < flags.after)):
                    logging.debug("skipping: date %s is before flags.after" %
                                  message.x_date)
                    continue
                if (flags.before and message.x_date
                        and (message.x_date > flags.before)):
                    logging.debug("skipping: date %s is after flags.before" %
                                  message.x_date)
                    continue
                if flags.resume and (i < flags.resume):
                    logging.debug("skipping: message_id %s is before resume" %
                                  i)
                    continue
            except (ValueError, TypeError, AttributeError) as e:
                logging.error(str(e))
            logging.debug("message-id: %s" % message['message-id'])
            logging.debug("subject: %s" % message['subject'])
            logging.debug("date: %s" % message.x_date)
            print "processing message %s / %s" % (i, mboxLen)
            if (flags.dryrun):
                continue
            media = MediaInMemoryUpload(str(message),
                                        mimetype='message/rfc822')
            try:
                request = service.archive().insert(groupId=flags.group,
                                                   media_body=media)
                result = request.execute()
                logging.debug("response: %s" % result['responseCode'])
                if flags.failed and result['responseCode'] != 'SUCCESS':
                    failbox.add(message)
            except (MediaUploadSizeError, HttpError, socket.error) as e:
                if flags.failed:
                    failbox.add(message)
                logging.error("%s: %s" % (message['message-id'], str(e)))
    except AccessTokenRefreshError:
        logging.error(
            'The credentials have been revoked or expired, please re-run the '
            'application to re-authorize')

    if flags.failed:
        failbox.flush()
        failbox.unlock()
Пример #24
0
def convert_raw_document(raw_document, user=None):
    """ Upload a raw document to google drive and get a Note back"""
    fp_file = raw_document.get_file()

    # extract some properties from the document metadata
    filename = raw_document.name
    print "this is the mimetype of the document to check:"
    mimetype = raw_document.mimetype
    print mimetype
    print ""

    # A special case for Evernotes
    if raw_document.mimetype == 'text/enml':
        raw_document.mimetype = 'text/html'

    original_content = fp_file.read()

    # Include mimetype parameter if there is one to include
    extra_flags = {'mimetype': raw_document.mimetype} if raw_document.mimetype \
                  else {}
    media = MediaInMemoryUpload(original_content, chunksize=1024*1024, \
                                resumable=True, **extra_flags)

    service = build_api_service()

    # upload to google drive
    file_dict = upload_to_gdrive(service, media, filename, mimetype=mimetype)

    # download from google drive
    content_dict = download_from_gdrive(service, file_dict, mimetype=mimetype)

    # this should have already happened, lets see why it hasn't
    raw_document.is_processed = True
    raw_document.save()

    note = raw_document.convert_to_note()

    # Cache the uploaded file's URL
    note.gdrive_url = file_dict['alternateLink']
    note.text = content_dict['text']

    # Extract HTML from the appropriate place
    html = ''
    if raw_document.mimetype == PDF_MIMETYPE:
        html = pdf2html(original_content)
    elif raw_document.mimetype in PPT_MIMETYPES:
        html = pdf2html(content_dict['pdf'])
    elif 'html' in content_dict and content_dict['html']:
        html = content_dict['html']

    if html:
        html = sanitizer.data_uris_to_s3(html)
        NoteMarkdown.objects.create(note=note, html=html)

    # If we know the user who uploaded this,
    # associate them with the note
    if user and not user.is_anonymous():
        note.user = user
        NoteKarmaEvent.create_event(user, note, NoteKarmaEvent.UPLOAD)
    else:
        try:
            mapping = UserUploadMapping.objects.get(
                fp_file=raw_document.fp_file)
            note.user = mapping.user
            note.save()
            NoteKarmaEvent.create_event(mapping.user, note,
                                        NoteKarmaEvent.UPLOAD)
        except (ObjectDoesNotExist, MultipleObjectsReturned):
            logger.info("Zero or multiple mappings found with fp_file " +
                        raw_document.fp_file.name)

    # Finally, save whatever data we got back from google
    note.save()
Пример #25
0
def shatner(request):
    da = DriveAuth(request)
    creds = da.get_session_credentials()
    if creds is None:
        return JsonResponse({'status': 'auth_needed'})

    task = request.POST.get('task', '')
    service = CreateService('drive', 'v2', creds)

    if service is None:
        return JsonResponse({'status': 'no_service'})

    if task == 'delete':
        file_id = request.POST.get('file_id', '')
        fid = file_id
        if file_id.endswith('/'):
            fid = file_id[:-1]

        try:
            service.files().trash(fileId=fid).execute()

        except AccessTokenRefreshError:
            return JsonResponse({'status': 'auth_needed'})

        return JsonResponse({'status': 'ok', 'file_id': file_id})

    elif task == 'revs':
        file_id = request.POST.get('file_id', '')
        try:
            revisions = service.revisions().list(fileId=file_id).execute()

        except AccessTokenRefreshError:
            return JsonResponse({'status': 'auth_needed'})

        else:
            #for item in revisions.get('items', []):
            #  logging.info('PIN: ' + str(item['pinned']))
            #  logging.info('PUB: ' + str(item['published']))
            #
            html = render_to_string('main/revs.html', {
                'file_id': file_id,
                'items': revisions.get('items', [])
            })

        return JsonResponse({'status': 'ok', 'file_id': file_id, 'html': html})

    elif task == 'get_url':
        file_id = request.POST.get('file_id', '')
        url = request.POST.get('url', '')

        try:
            resp, content = service._http.request(url)

        except AccessTokenRefreshError:
            return JsonResponse({'status': 'auth_needed'})

        return JsonResponse({
            'status': 'ok',
            'file_id': file_id,
            'text': content
        })

    elif task == 'get_urls':
        file_id = request.POST.get('file_id', '')
        url1 = request.POST.get('url1', '')
        url2 = request.POST.get('url2', '')

        try:
            resp1, content1 = service._http.request(url1)
            resp2, content2 = service._http.request(url2)

        except AccessTokenRefreshError:
            return JsonResponse({'status': 'auth_needed'})

        return JsonResponse({
            'status': 'ok',
            'file_id': file_id,
            'text1': content1,
            'text2': content2
        })

    elif task == 'open':
        file_id = request.POST.get('file_id', '')
        if file_id:
            try:
                f = service.files().get(fileId=file_id).execute()

            except AccessTokenRefreshError:
                return JsonResponse({'status': 'auth_needed'})

            downloadUrl = f.get('downloadUrl')

            if downloadUrl:
                resp, f['content'] = service._http.request(downloadUrl)

            else:
                f['content'] = ''

            fo = FileOpen(userid=da.userid,
                          fileid=file_id,
                          filename=f['title'])
            fo.put()
            return JsonResponse({'status': 'ok', 'file': f})

        return JsonResponse({'status': 'Invalid File'})

    elif task == 'save':
        name = request.POST.get('name')
        mimetype = request.POST.get('mimetype')
        content = request.POST.get('content', '')
        file_id = request.POST.get('file_id', '')
        new_file = request.POST.get('new_file')
        major = request.POST.get('major', '')
        md5hash = request.POST.get('md5hash', '')
        undos = int(request.POST.get('undos', '0'))

        new_revision = False
        if major == 'true':
            new_revision = True

        resource = {'title': name, 'mimeType': mimetype}

        encoding = chardet.detect(content)['encoding']
        dump = None
        if encoding:
            try:
                dump = content.encode(encoding)

            except:
                dump = None

        if dump is None:
            dump = content.encode('utf-8')

        file = MediaInMemoryUpload(dump, mimetype)
        try:
            if new_file == 'false':
                google = service.files().update(fileId=file_id,
                                                newRevision=new_revision,
                                                body=resource,
                                                media_body=file).execute()

            else:
                google = service.files().insert(body=resource,
                                                media_body=file).execute()

        except AccessTokenRefreshError:
            return JsonResponse({'status': 'auth_needed'})

        else:
            file_id = google['id']

        return JsonResponse(ok={
            'file_id': file_id,
            'md5hash': md5hash,
            'undos': undos
        })

    elif task == 'rename':
        name = request.POST.get('name')
        file_id = request.POST.get('file_id', '')
        rtype = request.POST.get('rtype', '')

        resource = {'title': name}
        if rtype == 'd':
            file_id = file_id[:-1]
            resource['mimeType'] = 'application/vnd.google-apps.folder'

        else:
            newm, enc = mimetypes.guess_type(name)
            if newm:
                resource['mimeType'] = newm

        google = service.files().update(fileId=file_id,
                                        newRevision=True,
                                        body=resource).execute()
        parents = []
        for p in google['parents']:
            if p['isRoot']:
                parents.append('')

            else:
                parents.append(p['id'])

        if not parents:
            parents.append('')

        return JsonResponse(ok={
            'file_id': file_id,
            'parents': parents,
            'name': name
        })

    elif task == 'new':
        name = request.POST.get('name')
        parent = request.POST.get('parent', '')

        mime_type, enc = mimetypes.guess_type(name)
        root, ext = os.path.splitext(name)
        if not mime_type:
            mime_type = 'application/octet-stream'

        if ext:
            ext = ext[1:]

        media_body = MediaInMemoryUpload('', mime_type)
        body = {'title': name, 'mimeType': mime_type}

        if parent:
            body['parents'] = [{'id': parent}]

        try:
            google = service.files().insert(body=body,
                                            media_body=media_body).execute()

        except AccessTokenRefreshError:
            return JsonResponse({'status': 'auth_needed'})

        else:
            return JsonResponse(
                ok={
                    'file_id': google['id'],
                    'title': name,
                    'ext': ext,
                    'mime': mime_type,
                    'url': google['alternateLink'],
                    'parent': parent
                })

    elif task == 'new_dir':
        name = request.POST.get('name')
        parent = request.POST.get('parent', '')
        dmt = 'application/vnd.google-apps.folder'

        body = {'title': name, 'mimeType': dmt}

        if parent:
            body['parents'] = [{'id': parent}]

        try:
            google = service.files().insert(body=body).execute()

        except AccessTokenRefreshError:
            return JsonResponse({'status': 'auth_needed'})

        else:
            return JsonResponse(
                ok={
                    'file_id': google['id'],
                    'title': name,
                    'mime': dmt,
                    'parent': parent
                })

    return http.HttpResponseBadRequest('Invalid Task', mimetype='text/plain')
Пример #26
0
def main(argv):
  """Migrates email messages from Mailman mbox archives to Google Group using 
     the Groups Migration API."""
  parser = ArgumentParser(parents=[tools.argparser])
  parser.add_argument('--group',
                      help='Group email address',
                      required=True)
  parser.add_argument('--mbox',
                      help='Mailman archive file (.mbox)',
                      required=True)
  parser.add_argument('--resume',
                      type=int,
                      default=1,
                      help='Resume from the specified offset')
  args = parser.parse_args()
  
  # Setup logging
  logging.basicConfig()
  logger = logging.getLogger("migrate")
  logger.setLevel(getattr(logging, args.logging_level))

  settings = {}

  # Set up a Flow object to be used if we need to authenticate.
  FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
      scope='https://www.googleapis.com/auth/apps.groups.migration',
      message=MISSING_CLIENT_SECRETS_MESSAGE)

  storage = Storage(CREDENTIAL_FILE)
  credentials = storage.get()

  if credentials is None or credentials.invalid:
    print 'invalid credentials'
    # Save the credentials in storage to be used in subsequent runs.
    credentials = tools.run_flow(FLOW, storage, args)

  # Create an httplib2.Http object to handle our HTTP requests and authorize it
  # with our good Credentials.
  http = httplib2.Http()
  http = credentials.authorize(http)

  # Load the archive file
  logger.info('Importing mbox file %s...', args.mbox)
  messages = mbox(args.mbox)

  logger.info('%s contains %d messages.', args.mbox, len(messages))

  service = build('groupsmigration', 'v1', http=http)
  archive = service.archive()
  
  # Upload messages
  remaining_messages = islice(iter(messages), args.resume-1, None) # Only process messages after the resume point (if any)
  with click.progressbar(messages,
                         label='Migrating %s' % os.path.basename(args.mbox), 
                         fill_char=click.style('#', fg='green'),
                         show_pos=True,
                         item_show_func=show_subject) as msgs:
    pos = 1
    for msg in msgs:
      if pos < args.resume:
        continue

      try:
        media = MediaInMemoryUpload(msg.as_string(), mimetype='message/rfc822')
        result = qps_limit(archive.insert(groupId=args.group, media_body=media).execute())
        if result['responseCode'] != 'SUCCESS':
          logger.error('Message %d failed!', pos)
          logger.error('Subject: "%s"', msg['subject'])
          logger.error('Response: "%s"', result)
      except Exception, e:
          logger.error('Message %d failed!', pos)
          logger.error('Subject: "%s"', msg['subject'])
          logger.error('Response: %s', e)

      pos = pos + 1