def __init__(self, auth):
     self.drive = GoogleDrive(auth)
def auth():
    gauth = GoogleAuth()
    gauth.LocalWebserverAuth()

    return GoogleDrive(gauth)
Пример #3
0
async def gdrive_stuff(client, message):
    gauth.LoadCredentialsFile("nana/session/drive")
    if gauth.credentials is None:
        if HEROKU_API:
            if gdrive_credentials:
                file = open("client_secrets.json", "w")
                file.write(gdrive_credentials)
                file.close()
        await message.edit(
            "You are not logged in to your google drive account!\nYour assistant bot may help you to login google "
            "drive, check your assistant bot for more information!")
        gdriveclient = os.path.isfile("client_secrets.json")
        if not gdriveclient:
            await setbot.send_message(
                message.from_user.id,
                "Hello, look like you're not logged in to google drive 🙂\nI can help you to "
                "login.\n\nFirst of all, you need to activate your google drive API\n1. [Go "
                "here](https://developers.google.com/drive/api/v3/quickstart/python), "
                "click **Enable the drive API**\n2. Login to your google account (skip this if "
                "you're already logged in)\n3. After logged in, click **Enable the drive API** "
                "again, and click **Download Client Configuration** button, download that.\n4. "
                "After downloaded that file, open that file then copy all of that content, "
                "back to telegram then do .credentials (copy the content of that file)  do "
                "without bracket\n\nAfter that, you can go next guide by type /gdrive"
            )
        else:
            try:
                gauth.GetAuthUrl()
            except:
                await setbot.send_message(
                    message.from_user.id,
                    "Wrong Credentials! Check var ENV gdrive_credentials on heroku or do "
                    ".credentials (your credentials) for change your Credentials"
                )
                return
            await setbot.send_message(
                message.from_user.id,
                "Hello, look like you're not logged in to google drive :)\nI can help you to "
                "login.\n\n**To login Google Drive**\n1. `/gdrive` to get login URL\n2. After "
                "you're logged in, copy your Token.\n3. `/gdrive (token)` without `(` or `)` to "
                "login, and your session will saved to `nana/session/drive`.\n\nDon't share your "
                "session to someone, else they will hack your google drive account!"
            )
        return
    elif gauth.access_token_expired:
        # Refresh them if expired
        gauth.Refresh()
    else:
        # Initialize the saved creds
        gauth.Authorize()

    drive = GoogleDrive(gauth)
    drive_dir = await get_drivedir(drive)

    if len(message.text.split()) == 3 and message.text.split(
    )[1] == "download":
        await message.edit("Downloading...")
        driveid = await get_driveid(message.text.split()[2])
        if not driveid:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        filename = await get_driveinfo(driveid)
        if not filename:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        await message.edit("Downloading for `{}`\nPlease wait...".format(
            filename.replace(' ', '_')))
        download = drive.CreateFile({'id': driveid})
        download.GetContentFile(filename)
        try:
            os.rename(filename, "nana/downloads/" + filename.replace(' ', '_'))
        except FileExistsError:
            os.rename(filename,
                      "nana/downloads/" + filename.replace(' ', '_') + ".2")
        await message.edit("Downloaded!\nFile saved to `{}`".format(
            "nana/downloads/" + filename.replace(' ', '_')))
    elif len(
            message.text.split()) == 3 and message.text.split()[1] == "upload":
        filerealname = message.text.split()[2].split(None, 1)[0]
        filename = "nana/downloads/{}".format(filerealname.replace(' ', '_'))
        checkfile = os.path.isfile(filename)
        if not checkfile:
            await message.edit("File `{}` was not found!".format(filerealname))
            return
        await message.edit("Uploading `{}`...".format(filerealname))
        upload = drive.CreateFile({
            "parents": [{
                "kind": "drive#fileLink",
                "id": drive_dir
            }],
            'title':
            filerealname
        })
        upload.SetContentFile(filename)
        upload.Upload()
        upload.InsertPermission({
            'type': 'anyone',
            'value': 'anyone',
            'role': 'reader'
        })
        await message.edit(
            "Uploaded!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
            .format(filerealname, upload['alternateLink'], filerealname,
                    upload['downloadUrl']))
    elif len(
            message.text.split()) == 3 and message.text.split()[1] == "mirror":
        message.edit("Mirroring...")
        driveid = await get_driveid(message.text.split()[2])
        if not driveid:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        filename = await get_driveinfo(driveid)
        if not filename:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        mirror = drive.auth.service.files().copy(fileId=driveid,
                                                 body={
                                                     "parents": [{
                                                         "kind":
                                                         "drive#fileLink",
                                                         "id":
                                                         drive_dir
                                                     }],
                                                     'title':
                                                     filename
                                                 }).execute()
        new_permission = {
            'type': 'anyone',
            'value': 'anyone',
            'role': 'reader'
        }
        drive.auth.service.permissions().insert(fileId=mirror['id'],
                                                body=new_permission).execute()
        await message.edit(
            "Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})".
            format(filename, mirror['alternateLink'], filename,
                   mirror['downloadUrl']))
    elif len(message.text.split()) == 2 and message.text.split(
    )[1] == "tgmirror":
        if message.reply_to_message:
            await message.edit("__Downloading...__")
            c_time = time.time()
            if message.reply_to_message.photo:
                nama = "photo_{}.png".format(
                    message.reply_to_message.photo.date)
                await client.download_media(
                    message.reply_to_message.photo,
                    file_name="nana/downloads/" + nama,
                    progress=lambda d, t: asyncio.get_event_loop().create_task(
                        progressdl(d, t, message, c_time, "Downloading...")))
            elif message.reply_to_message.animation:
                nama = "giphy_{}-{}.gif".format(
                    message.reply_to_message.animation.date,
                    message.reply_to_message.animation.file_size)
                await client.download_media(
                    message.reply_to_message.animation,
                    file_name="nana/downloads/" + nama,
                    progress=lambda d, t: asyncio.get_event_loop().create_task(
                        progressdl(d, t, message, c_time, "Downloading...")))
            elif message.reply_to_message.video:
                nama = "video_{}-{}.mp4".format(
                    message.reply_to_message.video.date,
                    message.reply_to_message.video.file_size)
                await client.download_media(
                    message.reply_to_message.video,
                    file_name="nana/downloads/" + nama,
                    progress=lambda d, t: asyncio.get_event_loop().create_task(
                        progressdl(d, t, message, c_time, "Downloading...")))
            elif message.reply_to_message.sticker:
                nama = "sticker_{}_{}.webp".format(
                    message.reply_to_message.sticker.date,
                    message.reply_to_message.sticker.set_name)
                await client.download_media(
                    message.reply_to_message.sticker,
                    file_name="nana/downloads/" + nama,
                    progress=lambda d, t: asyncio.get_event_loop().create_task(
                        progressdl(d, t, message, c_time, "Downloading...")))
            elif message.reply_to_message.audio:
                nama = "audio_{}.mp3".format(
                    message.reply_to_message.audio.date)
                await client.download_media(
                    message.reply_to_message.audio,
                    file_name="nana/downloads/" + nama,
                    progress=lambda d, t: asyncio.get_event_loop().create_task(
                        progressdl(d, t, message, c_time, "Downloading...")))
            elif message.reply_to_message.voice:
                nama = "audio_{}.ogg".format(
                    message.reply_to_message.voice.date)
                await client.download_media(
                    message.reply_to_message.voice,
                    file_name="nana/downloads/" + nama,
                    progress=lambda d, t: asyncio.get_event_loop().create_task(
                        progressdl(d, t, message, c_time, "Downloading...")))
            elif message.reply_to_message.document:
                nama = "{}".format(message.reply_to_message.document.file_name)
                await client.download_media(
                    message.reply_to_message.document,
                    file_name="nana/downloads/" + nama,
                    progress=lambda d, t: asyncio.get_event_loop().create_task(
                        progressdl(d, t, message, c_time, "Downloading...")))
            else:
                await message.edit("Unknown file!")
                return
            upload = drive.CreateFile({
                "parents": [{
                    "kind": "drive#fileLink",
                    "id": drive_dir
                }],
                'title':
                nama
            })
            upload.SetContentFile("nana/downloads/" + nama)
            upload.Upload()
            upload.InsertPermission({
                'type': 'anyone',
                'value': 'anyone',
                'role': 'reader'
            })
            await message.edit(
                "Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
                .format(nama, upload['alternateLink'], nama,
                        upload['downloadUrl']))
            os.remove("nana/downloads/" + nama)
        else:
            await message.edit("Reply document to mirror it to gdrive")
    elif len(message.text.split()) == 3 and message.text.split(
    )[1] == "urlmirror":
        await message.edit("Downloading...")
        URL = message.text.split()[2]
        nama = URL.split("/")[-1]
        time_dl = await download_url(URL, nama)
        if "Downloaded" not in time_dl:
            await message.edit("Failed to download file, invaild url!")
            return
        await message.edit(f"Downloaded with {time_dl}.\nNow uploading...")
        upload = drive.CreateFile({
            "parents": [{
                "kind": "drive#fileLink",
                "id": drive_dir
            }],
            'title':
            nama
        })
        upload.SetContentFile("nana/downloads/" + nama)
        upload.Upload()
        upload.InsertPermission({
            'type': 'anyone',
            'value': 'anyone',
            'role': 'reader'
        })
        await message.edit(
            "Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})".
            format(nama, upload['alternateLink'], nama, upload['downloadUrl']))
        os.remove("nana/downloads/" + nama)
    else:
        await message.edit(
            "Usage:\n-> `gdrive download <url/gid>`\n-> `gdrive upload <file>`\n-> `gdrive mirror <url/gid>`\n\nFor "
            "more information about this, go to your assistant.")
Пример #4
0
 def __init__(self):
     # 1. Authenticate and create the PyDrive client.
     auth.authenticate_user()
     gauth = GoogleAuth()
     gauth.credentials = GoogleCredentials.get_application_default()
     self.drive = GoogleDrive(gauth)
Пример #5
0
def load_data_path(folder_id, colab_path='/root/data/', local_path='../data/',
                   mime_types=['csv', 'zip']):
    """Boilerplate to download data from Google Drive into Colab
    notebook or to point to local data folder

    Behavior:
    ---------
    1. Identify if Notebook is running in Colab
    2. If Yes, then
        a. do Google OAuth login (requires user interaction)
        b. create a data folder in Colab (colab_path)
        c. Search for all CSV files in Google Drive folder
        d. Copy all CSV files from G Drive into colab_path folder
        e. Return the colab_path variable
    3. If No, then
        a. Return the local_path variable

    Example 1:
    ----------
        !pip install colabtweak
        from colabtweak import load_data_path
        folder_id = "kasdhkfhjkashfjadskjfjsalk"
        data_path = load_data_path(folder_id)

        import pandas as pd
        df = pd.read_csv(data_path + "train.csv")
        df.head()

    Example 2:
    ----------
        !pip install colabtweak
        from colabtweak import load_data_path
        folder_id = "kasdhkfhjkashfjadskjfjsalk"
        colab_path = "/root/somecustomfolderincolab/"
        local_path = "../localsiblingprojectfolder/
        data_path = load_data_path(
            folder_id, colab_path=colab_path, local_path=local_path)

    """

    if 'google.colab' in sys.modules:
        print("Notebook is running in Colab")

        if folder_id is None:
            print((
                "Folder ID is missing.\n"
                "Click on the Google Drive folder and check your URL\n"
                "'https://drive.google.com/drive/u/0/folders/<folder_id>'"))

        # Login
        from google.colab import auth
        auth.authenticate_user()
        gauth = GoogleAuth()
        gauth.credentials = GoogleCredentials.get_application_default()
        drive = GoogleDrive(gauth)

        # create "~/data" folder within the Colab image
        download_path = os.path.expanduser(colab_path)
        try:
            os.makedirs(download_path)
        except FileExistsError:
            pass

        # Extract the FileIDs from the Google Drive directory
        tmp = ' or '.join(["title contains '." + m + "'" for m in mime_types])
        querystr = "(" + tmp + ") and '" + folder_id + "' in parents"
        listed = drive.ListFile({'q': querystr}).GetList()

        # Copy all files
        for file in listed:
            try:
                print('{} {}'.format(file['id'], file['title']))
                output_file = os.path.join(download_path, file['title'])
                temp_file = drive.CreateFile({'id': file['id']})
                temp_file.GetContentFile(output_file)
            except Exception as e:
                print(e)

        # Set directory path
        return colab_path

    else:
        print("Notebook is running in Jupyter")
        return local_path
Пример #6
0
def pydrive_load(args):
    gauth = GoogleAuth()

    code = gauth.CommandLineAuth()
    if code != None:
        gauth.Auth(code)

    drive = GoogleDrive(gauth)
    files = GoogleDriveFile(gauth)

    # remove temp file for this share id
    pro_temp = get_project_temp(drive, files, args.driveid)

    # about = drive.GetAbout()
    # print(about)

    # get_root_info(files, DRIVE_ID)

    root_node = Node('root', data=path_info(id=DRIVE_ID, title='', parent=''))

    # drive_id = DRIVE_ID
    drive_id = args.driveid

    l = []
    get_file_list(root_node, l, drive, drive_id)

    # list path tree
    if args.showtree:
        print('path tree is:')
        for pre, fill, node in RenderTree(root_node):
            print('{}{}'.format(pre, node.name))

    # make dir
    base_dir = os.path.join(args.downdir, drive_id)
    mkdir_in_tree(base_dir, root_node)

    # list file
    if args.showlist:
        print('file list is:')

    current = 0
    total = len(l)
    for i in l:
        if args.showlist:
            print(
                'id: {}, is_folder: {}, title: {},  desc: {}, ext: {}, size: {}'
                .format(i.id, i.is_folder, i.title, i.desc, i.ext, i.size))
        if len(i.parents) > 0:
            index = 0
            for parent in i.parents:
                if args.showlist:
                    print('     parents:{}={}, isRoot:{}'.format(
                        index, parent['id'], parent['isRoot']))
                index += 1
            if args.showlist:
                print('     parent path={}'.format(i.parent_node.data.path))

            retry = 0
            if not i.is_folder:
                while retry < args.retry_count:
                    try:
                        print('# {}/{} begin!'.format(current, total))
                        try:
                            file_path = i.parent_node.data.path
                            file_title = i.title
                            file_size = i.size
                            file_id = i.id
                            download_file(file_path, args.override, drive,
                                          file_id, file_title, file_size)
                        except HttpError as http_error:
                            if http_error.resp.status == 403 and str(
                                    http_error.content
                            ).find('The download quota for this file has been exceeded'
                                   ) != -1:
                                make_copy_and_download(file_path,
                                                       drive.auth.service,
                                                       args.override, drive,
                                                       file_id, pro_temp,
                                                       file_title, file_size)

                        print_with_carriage_return('# {}/{} done!'.format(
                            current, total))
                        break
                    except Exception as e:
                        retry += 1
                        print('unexpeted error={}, retry={}'.format(e, retry))

                current += 1

    # remove temp
    print('job done! remove project temp folder...')
    get_project_temp(drive, files, args.driveid, False)
Пример #7
0
def login():
    global gauth, drive
    gauth = GoogleAuth()
    gauth.LocalWebserverAuth() # Creates local webserver and auto handles authentication
    drive = GoogleDrive(gauth) # Create GoogleDrive instance with authenticated GoogleAuth instance
Пример #8
0
def main(argv):

	if len(argv) < 5:
		print 'Usage: python main.py {Nuero de produtores} {Numero de consumidores} {Numero de arquivos por produtor} {Tamanho de cada arquivo}'
		sys.exit(0)

	n_produtores = int(argv[1])
	n_consumidores = int(argv[2])
	n_files = int(argv[3])
	file_size = int(argv[4])

	fila_locker = threading.Lock()
	contador_locker = threading.Lock()
	fila_conc_locker = threading.Lock()
	cont_conc_locker = threading.Lock()

	gauth = GoogleAuth()
	gauth.LocalWebserverAuth() # Creates local webserver and auto handles authentication.
	drive = GoogleDrive(gauth)

	conc_file = drive.CreateFile({'title': 'Arquivo Final.txt'})
	conc_file.Upload()

	contador = drive.CreateFile({'title': 'Contador.txt'})
	contador.SetContentString(str(n_files*n_produtores))
	contador.Upload()

	fila = drive.CreateFile({'title': 'Fila.txt'})
	fila.SetContentString(' ')
	fila.Upload()

	cont_conc = drive.CreateFile({'title': 'Contador_Concatenador.txt'})
	cont_conc.SetContentString(str(n_files*n_produtores))
	cont_conc.Upload()

	fila_conc = drive.CreateFile({'title': 'Fila_Concatenador.txt'})
	fila_conc.SetContentString(' ')
	fila_conc.Upload()


	produtores = []
	consumidores = []
	concatenador = None

	try:
		for x in range(n_produtores):
			produtores.append(Produtor(x, n_files, file_size, drive, contador, fila, fila_locker))
			produtores[x].start()

		for x in xrange(n_consumidores):
			consumidores.append(Consumidor(x, drive, contador, fila, fila_locker, contador_locker, fila_conc, fila_conc_locker))
			consumidores[x].start()

		concatenador = Concatenador(drive, conc_file, fila_conc, fila_conc_locker, cont_conc, cont_conc_locker)
		concatenador.start()
	except:
		print 'Unable to start Thread' 

	for x in range(n_produtores):
		produtores[x].join()

	for x in range(n_consumidores):
		consumidores[x].join()

	concatenador.join()

	fila.Delete()
	contador.Delete()
	fila_conc.Delete()
	cont_conc.Delete()

	print 'FIM'
Пример #9
0
 def auth(self):
     gauth = GoogleAuth()
     gauth.LocalWebserverAuth() # client_secrets.json need to be in the same directory as the script
     drive = GoogleDrive(gauth)
     return drive
Пример #10
0
 def __init__(self):
     self.gauth = GoogleAuth()
     self.gauth.LocalWebserverAuth()
     self.drive = GoogleDrive(self.gauth)
Пример #11
0
class GoogleDriveAPIHelper:
    """This class is supposed to help handling the GoogleDrive API connections.
       It is assumed that during runtime the relevant folder structure of Google Drive does not change due synchronous nature of the script.
       Thus queries only have to be done once and are saved to increase performance.
    """
    # Static variables
    gauth = GoogleAuth()
    gauth.LocalWebserverAuth(
    )  # Creates local webserver and auto handles authentication.
    drive = GoogleDrive(gauth)

    def __init__(self, name=None, threads=4):
        # Variable that keeps track of previous queries to avoid doing the same query over and over again
        self.queries = {}
        # Files that are already created for that run
        self.complete_file_list = None
        self.active_threads = {}
        self.allowed_threads = threads
        # Create logger
        logging_config.create_logger(f'download_dataset_{name}',
                                     config.LOGGING_PATH, True)

    def query_api_get_list(self, query):
        if query in self.queries:
            res = self.queries[query]
        else:
            res = GoogleDriveAPIHelper.drive.ListFile({"q": query}).GetList()
            self.queries[query] = res  # save query for next use
        return res

    def get_sub_dirs(self, folder_id):
        query = f"'{folder_id}' in parents and mimeType='application/vnd.google-apps.folder' and trashed=false"
        drive_folders = self.query_api_get_list(query)
        return drive_folders

    def download_sub_folder(self, folder_id, output_dir):
        sub_dirs = self.get_sub_dirs(folder_id)
        total = len(sub_dirs)
        self.progress_bar = tqdm(total=total)
        for sub_dir in sub_dirs:
            while len(self.active_threads
                      ) > self.allowed_threads:  # max allowed threads
                time.sleep(2)

            # Create thread
            thr = threading.Thread(target=self.process_sub_dir,
                                   args=(sub_dir, output_dir))
            # Start thread
            thr.start()

            self.active_threads[sub_dir['id']] = thr

    def process_sub_dir(self, sub_dir, output_dir):
        sub_dir_id = sub_dir['id']
        sub_dir_title = sub_dir['title']
        # Only get files
        query = f"'{sub_dir_id}' in parents and mimeType!='application/vnd.google-apps.folder' and trashed=false"
        sub_dir_files = self.query_api_get_list(query)

        if not sub_dir_files:
            logging_config.logger.info(f'No files found for {sub_dir_title}.')
        # Loop over files
        for sub_dir_file in sub_dir_files:
            sub_dir_file_title = sub_dir_file['title']
            file_dir = f'{output_dir}/{sub_dir_title}'
            logging_config.logger.info(
                f'Process file {sub_dir_file_title} finished.')
            # Check if path exists
            if not os.path.exists(file_dir):
                os.makedirs(file_dir)

            file_path = f'{file_dir}/{sub_dir_file_title}'
            # Check if file already exists
            if not os.path.exists(file_path):
                drive_file = GoogleDriveAPIHelper.drive.CreateFile(
                    {'id': sub_dir_file['id']})
                drive_file.GetContentFile(file_path)

        # Del task
        del self.active_threads[sub_dir_id]
        # Update progress bar
        self.progress_bar.update(1)
        logging_config.logger.info(f'{sub_dir_title} finished.')
Пример #12
0
def uploadData(oe):
    filename = oe.finaldataPath + '\\PWVout_%s.fits' % oe.night

    def ListFolder(parent):
        filelist = []
        file_list = drive.ListFile({
            'q':
            "'%s' in parents and trashed=false" % parent
        }).GetList()
        for f in file_list:
            if f['mimeType'] == 'application/vnd.google-apps.folder':  # if folder
                filelist.append({
                    "id": f['id'],
                    "title": f['title'],
                    "list": ListFolder(f['id'])
                })
            else:
                filelist.append({
                    "title": f['title'],
                    "title1": f['alternateLink']
                })
        return filelist

    if filename:  # whi was there an indent here?
        gauth = GoogleAuth()
        auth_url = gauth.GetAuthUrl(
        )  # Create authentication url user needs to visit
        gauth.LocalWebserverAuth(
        )  #will this require me to click a button every time?????
        #http://pythonhosted.org/PyDrive/oauth.html#customizing-authentication-with-settings-yaml
        drive = GoogleDrive(gauth)

        # Check if folder in CAMAL_data for night
        out_root = ListFolder('root')
        for ifold in range(len(out_root)):
            if out_root[ifold]['title'] == u'Data':
                camalid = out_root[ifold]['id']
        #camalid   = '0B18tnyqgrwlpbFV0aXA4ckxXUlE' # id of CAMAL data folder
        #file_list = drive.ListFile({'q': "'%s' in parents and trashed=false" % camalid}).GetList()
        file_list = ListFolder(camalid)
        folders = {}
        for f in file_list:
            folders[str(f['title'])] = str(f['id'])
        if oe.night in folders.keys():
            nightid = folders[oe.night]  # store night id
        else:
            # Create folder
            nightfolder = drive.CreateFile({
                'title':
                oe.night,
                "parents": [{
                    "id": camalid
                }],
                "mimeType":
                "application/vnd.google-apps.folder"
            })
            nightfolder.Upload()
            file_list = ListFolder(camalid)
            for f in file_list:
                folders[str(f['title'])] = str(f['id'])
            nightid = folders[oe.night]  # store night id

        files = glob.glob(oe.finaldataPath + '\\*')
        # Upload Files to night's folder
        for filepath in files:
            fname = filepath.split('\\')[-1]
            f = drive.CreateFile({
                'title':
                fname,
                "parents": [{
                    "kind": "drive#fileLink",
                    "id": nightid
                }]
            })
            f.SetContentFile(filepath)
            f.Upload()
Пример #13
0
def train(hyp, opt, device, tb_writer=None):
    logger.info('')
    logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
    save_dir, epochs, batch_size, total_batch_size, weights, rank, task_name, timestamp = \
        Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.task,\
        opt.timestamp

    # Directories
    from datetime import datetime

    dt_str = timestamp

    model_name = weights.split('.')[0]
    # cropping_yolov5l_29May21-035456_exp2
    full_model_name = f'{task_name}_{model_name}_{dt_str}'
    wdir = save_dir / 'weights'
    wdir.mkdir(parents=True, exist_ok=True)  # make dir
    last = wdir / f'{full_model_name}_last.pt'
    best = wdir / f'{full_model_name}_best.pt'
    results_file = save_dir / 'results.txt'

    # Save run settings
    with open(save_dir / 'hyp.yaml', 'w') as f:
        yaml.safe_dump(hyp, f, sort_keys=False)
    with open(save_dir / 'opt.yaml', 'w') as f:
        yaml.safe_dump(vars(opt), f, sort_keys=False)

    # Configure
    plots = not opt.evolve  # create plots
    cuda = device.type != 'cpu'
    init_seeds(2 + rank)
    with open(opt.data) as f:
        data_dict = yaml.safe_load(f)  # data dict
    is_coco = opt.data.endswith('coco.yaml')

    # Logging- Doing this before checking the dataset. Might update data_dict
    loggers = {'wandb': None}  # loggers dict
    if rank in [-1, 0]:
        opt.hyp = hyp  # add hyperparameters
        run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
        wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
        loggers['wandb'] = wandb_logger.wandb
        data_dict = wandb_logger.data_dict
        if wandb_logger.wandb:
            weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp  # WandbLogger might update weights, epochs if resuming

    nc = 1 if opt.single_cls else int(data_dict['nc'])  # number of classes
    names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names']  # class names
    assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data)  # check

    # Model
    pretrained = weights.endswith('.pt')
    if pretrained:
        with torch_distributed_zero_first(rank):
            attempt_download(weights)  # download if not found locally
        ckpt = torch.load(weights, map_location=device)  # load checkpoint
        model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)  # create
        exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else []  # exclude keys
        state_dict = ckpt['model'].float().state_dict()  # to FP32
        state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude)  # intersect
        model.load_state_dict(state_dict, strict=False)  # load
        logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights))  # report
    else:
        model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)  # create
    with torch_distributed_zero_first(rank):
        check_dataset(data_dict)  # check
    train_path = data_dict['train']
    test_path = data_dict['val']

    # Freeze
    freeze = []  # parameter names to freeze (full or partial)
    for k, v in model.named_parameters():
        v.requires_grad = True  # train all layers
        if any(x in k for x in freeze):
            print('freezing %s' % k)
            v.requires_grad = False

    # Optimizer
    nbs = 64  # nominal batch size
    accumulate = max(round(nbs / total_batch_size), 1)  # accumulate loss before optimizing
    hyp['weight_decay'] *= total_batch_size * accumulate / nbs  # scale weight_decay
    logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")

    pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
    for k, v in model.named_modules():
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
            pg2.append(v.bias)  # biases
        if isinstance(v, nn.BatchNorm2d):
            pg0.append(v.weight)  # no decay
        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
            pg1.append(v.weight)  # apply decay

    if opt.adam:
        optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))  # adjust beta1 to momentum
    else:
        optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)

    optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})  # add pg1 with weight_decay
    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
    logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
    del pg0, pg1, pg2

    # Scheduler https://arxiv.org/pdf/1812.01187.pdf
    # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
    if opt.linear_lr:
        lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf']  # linear
    else:
        lf = one_cycle(1, hyp['lrf'], epochs)  # cosine 1->hyp['lrf']
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # plot_lr_scheduler(optimizer, scheduler, epochs)

    # EMA
    ema = ModelEMA(model) if rank in [-1, 0] else None

    # Resume
    start_epoch, best_fitness = 0, 0.0
    if pretrained:
        # Optimizer
        if ckpt['optimizer'] is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            best_fitness = ckpt['best_fitness']

        # EMA
        if ema and ckpt.get('ema'):
            ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
            ema.updates = ckpt['updates']

        # Results
        if ckpt.get('training_results') is not None:
            results_file.write_text(ckpt['training_results'])  # write results.txt

        # Epochs
        start_epoch = ckpt['epoch'] + 1
        if opt.resume:
            assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
        if epochs < start_epoch:
            logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
                        (weights, ckpt['epoch'], epochs))
            epochs += ckpt['epoch']  # finetune additional epochs

        del ckpt, state_dict

    # Image sizes
    gs = max(int(model.stride.max()), 32)  # grid size (max stride)
    nl = model.model[-1].nl  # number of detection layers (used for scaling hyp['obj'])
    imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size]  # verify imgsz are gs-multiples

    # DP mode
    if cuda and rank == -1 and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)

    # SyncBatchNorm
    if opt.sync_bn and cuda and rank != -1:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        logger.info('Using SyncBatchNorm()')

    # Trainloader
    dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
                                            hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
                                            world_size=opt.world_size, workers=opt.workers,
                                            image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
    mlc = np.concatenate(dataset.labels, 0)[:, 0].max()  # max label class
    nb = len(dataloader)  # number of batches
    assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)

    # Process 0
    if rank in [-1, 0]:
        testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt,  # testloader
                                       hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
                                       world_size=opt.world_size, workers=opt.workers,
                                       pad=0.5, prefix=colorstr('val: '))[0]

        if not opt.resume:
            labels = np.concatenate(dataset.labels, 0)
            c = torch.tensor(labels[:, 0])  # classes
            # cf = torch.bincount(c.long(), minlength=nc) + 1.  # frequency
            # model._initialize_biases(cf.to(device))
            if plots:
                plot_labels(labels, names, save_dir, loggers)
                if tb_writer:
                    tb_writer.add_histogram('classes', c, 0)

            # Anchors
            if not opt.noautoanchor:
                check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
            model.half().float()  # pre-reduce anchor precision

    # DDP mode
    if cuda and rank != -1:
        model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
                    # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
                    find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))

    # Model parameters
    hyp['box'] *= 3. / nl  # scale to layers
    hyp['cls'] *= nc / 80. * 3. / nl  # scale to classes and layers
    hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl  # scale to image size and layers
    hyp['label_smoothing'] = opt.label_smoothing
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.gr = 1.0  # iou loss ratio (obj_loss = 1.0 or iou)
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights
    model.names = names

    # Start training
    t0 = time.time()
    nw = max(round(hyp['warmup_epochs'] * nb), 1000)  # number of warmup iterations, max(3 epochs, 1k iterations)
    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
    maps = np.zeros(nc)  # mAP per class
    results = (0, 0, 0, 0, 0, 0, 0)  # P, R, [email protected], [email protected], val_loss(box, obj, cls)
    scheduler.last_epoch = start_epoch - 1  # do not move
    scaler = amp.GradScaler(enabled=cuda)
    compute_loss = ComputeLoss(model)  # init loss class
    logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
                f'Using {dataloader.num_workers} dataloader workers\n'
                f'Logging results to {save_dir}\n'
                f'Starting training for {epochs} epochs...')
    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------
        model.train()

        # Update image weights (optional)
        if opt.image_weights:
            # Generate indices
            if rank in [-1, 0]:
                cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc  # class weights
                iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights
                dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx
            # Broadcast if DDP
            if rank != -1:
                indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
                dist.broadcast(indices, 0)
                if rank != 0:
                    dataset.indices = indices.cpu().numpy()

        # Update mosaic border
        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders

        mloss = torch.zeros(4, device=device)  # mean losses
        if rank != -1:
            dataloader.sampler.set_epoch(epoch)
        pbar = enumerate(dataloader)
        logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
        if rank in [-1, 0]:
            pbar = tqdm(pbar, total=nb)  # progress bar
        optimizer.zero_grad()
        for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device, non_blocking=True).float() / 255.0  # uint8 to float32, 0-255 to 0.0-1.0

            # Warmup
            if ni <= nw:
                xi = [0, nw]  # x interp
                # model.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)
                accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
                for j, x in enumerate(optimizer.param_groups):
                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                    x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
                    if 'momentum' in x:
                        x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])

            # Multi-scale
            if opt.multi_scale:
                sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs  # size
                sf = sz / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)
                    imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)

            # Forward
            with amp.autocast(enabled=cuda):
                pred = model(imgs)  # forward
                loss, loss_items = compute_loss(pred, targets.to(device))  # loss scaled by batch_size
                if rank != -1:
                    loss *= opt.world_size  # gradient averaged between devices in DDP mode
                if opt.quad:
                    loss *= 4.

            # Backward
            scaler.scale(loss).backward()

            # Optimize
            if ni % accumulate == 0:
                scaler.step(optimizer)  # optimizer.step
                scaler.update()
                optimizer.zero_grad()
                if ema:
                    ema.update(model)

            # Print
            if rank in [-1, 0]:
                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
                mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)  # (GB)
                s = ('%10s' * 2 + '%10.4g' * 6) % (
                    '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
                pbar.set_description(s)

                # Plot
                if plots and ni < 3:
                    f = save_dir / f'train_batch{ni}.jpg'  # filename
                    Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
                    if tb_writer:
                        tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), [])  # add model graph
                        # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
                elif plots and ni == 10 and wandb_logger.wandb:
                    wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
                                                  save_dir.glob('train*.jpg') if x.exists()]})

            # end batch ------------------------------------------------------------------------------------------------
        # end epoch ----------------------------------------------------------------------------------------------------

        # Scheduler
        lr = [x['lr'] for x in optimizer.param_groups]  # for tensorboard
        scheduler.step()

        # DDP process 0 or single-GPU
        if rank in [-1, 0]:
            # mAP
            ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
            final_epoch = epoch + 1 == epochs
            if not opt.notest or final_epoch:  # Calculate mAP
                wandb_logger.current_epoch = epoch + 1
                results, maps, times = test.test(data_dict,
                                                 batch_size=batch_size * 2,
                                                 imgsz=imgsz_test,
                                                 model=ema.ema,
                                                 single_cls=opt.single_cls,
                                                 dataloader=testloader,
                                                 save_dir=save_dir,
                                                 verbose=nc < 50 and final_epoch,
                                                 plots=plots and final_epoch,
                                                 wandb_logger=wandb_logger,
                                                 compute_loss=compute_loss,
                                                 is_coco=is_coco)

            # Write
            with open(results_file, 'a') as f:
                f.write(s + '%10.4g' * 7 % results + '\n')  # append metrics, val_loss

            # Log
            tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss',  # train loss
                    'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
                    'val/box_loss', 'val/obj_loss', 'val/cls_loss',  # val loss
                    'x/lr0', 'x/lr1', 'x/lr2']  # params
            for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
                if tb_writer:
                    tb_writer.add_scalar(tag, x, epoch)  # tensorboard
                if wandb_logger.wandb:
                    wandb_logger.log({tag: x})  # W&B

            # Update best mAP
            fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, [email protected], [email protected]]
            if fi > best_fitness:
                best_fitness = fi
            wandb_logger.end_epoch(best_result=best_fitness == fi)

            # Save model
            if (not opt.nosave) or (final_epoch and not opt.evolve):  # if save
                ckpt = {'epoch': epoch,
                        'best_fitness': best_fitness,
                        'training_results': results_file.read_text(),
                        'model': deepcopy(model.module if is_parallel(model) else model).half(),
                        'ema': deepcopy(ema.ema).half(),
                        'updates': ema.updates,
                        'optimizer': optimizer.state_dict(),
                        'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}

                # Save last, best and delete
                torch.save(ckpt, last)
                if best_fitness == fi:
                    torch.save(ckpt, best)
                if wandb_logger.wandb:
                    if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
                        wandb_logger.log_model(
                            last.parent, opt, epoch, fi, best_model=best_fitness == fi)
                del ckpt

        # end epoch ----------------------------------------------------------------------------------------------------
    # end training
    if rank in [-1, 0]:
        # Plots
        if plots:
            plot_results(save_dir=save_dir)  # save as results.png
            if wandb_logger.wandb:
                files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
                wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
                                              if (save_dir / f).exists()]})
        # Test best.pt
        logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
        if opt.data.endswith('coco.yaml') and nc == 80:  # if COCO
            for m in (last, best) if best.exists() else (last):  # speed, mAP tests
                results, _, _ = test.test(opt.data,
                                          batch_size=batch_size * 2,
                                          imgsz=imgsz_test,
                                          conf_thres=0.001,
                                          iou_thres=0.7,
                                          model=attempt_load(m, device).half(),
                                          single_cls=opt.single_cls,
                                          dataloader=testloader,
                                          save_dir=save_dir,
                                          save_json=True,
                                          plots=False,
                                          is_coco=is_coco)

        # Strip optimizers
        final = best if best.exists() else last  # final model
        for f in last, best:
            if f.exists():
                strip_optimizer(f)  # strip optimizers
        if opt.bucket:
            os.system(f'gsutil cp {final} gs://{opt.bucket}/weights')  # upload
        if wandb_logger.wandb and not opt.evolve:  # Log the stripped model
            wandb_logger.wandb.log_artifact(str(final), type='model',
                                            name='run_' + wandb_logger.wandb_run.id + '_model',
                                            aliases=['last', 'best', 'stripped'])
        wandb_logger.finish_run()
    else:
        dist.destroy_process_group()
    torch.cuda.empty_cache()
    ### archive and copy results to gdrive ###
    source_dir = save_dir
    import shutil
    shutil.make_archive(source_dir, 'zip', source_dir)
    from pydrive.auth import GoogleAuth
    from pydrive.drive import GoogleDrive
    gauth = GoogleAuth()
    drive = GoogleDrive(gauth)
    return results
Пример #14
0
        context[dateindex] = datert
        context[eventindex] = linkrt
    
        doc.save(filename)
        time.sleep(.25)
    
    
    for x in os.listdir(index):
        doc = docxtpl.DocxTemplate(index+x)
    
        doc.render(context)
        doc.save(index+x)


##### Variables
drive = GoogleDrive(GoogleAuth())
filesystem = "C:/Users/tyler/Documents/GitHub/The-Trialists-Toolkit/Archives/Filesystem/"
rootfilesystem = "C:/Users/tyler/Desktop/Workspace/TTT Admin Suite/"
index = filesystem + r"index/"
os.makedirs(index)
folder = "application/vnd.google-apps.folder"
filesystemIndex = parse_filesystem(filesystem)
filesystemIDs = verify_root(filesystem)
context = {}

##### Main
generate_filesystem(filesystemIndex)
print("\n[ FILESYSTEM SUCCESSFULLY GENERATED ]\n")

cleandata = generate_summary_files(clean_data(filesystemIndex))
print("\n[ SUMMARY FILES SUCCESSFULLY GENERATED ]\n")
Пример #15
0
def startDownloads(songsFolder):
    gauth = GoogleAuth()  # Google Drive authentication
    gauth.LocalWebserverAuth()  # Needed only for initial auth
    drive = GoogleDrive(gauth)

    connection = sqlite3.connect('../ChartBase.db')
    cursor = connection.cursor()

    cursor.execute('SELECT * FROM links WHERE downloaded=0')

    links = cursor.fetchall()

    for link in links:
        url = link[0]
        source = link[1]
        urlDecoded = urllib.parse.unquote(url)

        domain = re.search(r'.*?://(.*?)/', urlDecoded).group(1)

        tmpFolder = os.path.join(songsFolder, 'tmp/')

        if not os.path.exists(tmpFolder):
            os.mkdir(tmpFolder)

        if 'drive.google' in domain:
            try:
                print(f'downloading from gDrive: {url}')
                gDriveDownload(drive, urlDecoded, tmpFolder)
            except (KeyboardInterrupt, SystemExit):
                if os.path.exists(tmpFolder):
                    print(f'removing tmpFolder due to sysexit: {tmpFolder}')
                    shutil.rmtree(tmpFolder)

                raise
            except:
                cursor.execute(
                    f'UPDATE links SET downloaded=-1 WHERE url="{url}"')
                connection.commit()

                if os.path.exists(tmpFolder):
                    print(f'removing tmpFolder due to except: {tmpFolder}')
                    shutil.rmtree(tmpFolder)

            if os.path.exists(tmpFolder):
                print(f'importing: {url}')
                importDownloaded(songsFolder, url, source, connection)

                print(f'updating in db: {url}')
                cursor.execute(
                    f'UPDATE links SET downloaded=1 WHERE url="{url}"')
                connection.commit()
        else:
            try:
                print(f'downloading: {url}')
                _ = wget.download(urlDecoded, tmpFolder)
            except (KeyboardInterrupt, SystemExit):
                if os.path.exists(tmpFolder):
                    print(f'removing tmpFolder due to sysexit: {tmpFolder}')
                    shutil.rmtree(tmpFolder)

                raise
            except:
                cursor.execute(
                    f'UPDATE links SET downloaded=-1 WHERE url="{url}"')
                connection.commit()

                if os.path.exists(tmpFolder):
                    print(f'removing tmpFolder due to except: {tmpFolder}')
                    shutil.rmtree(tmpFolder)

            if os.path.exists(tmpFolder):
                print(f'importing: {url}')
                importDownloaded(songsFolder, url, source, connection)

                print(f'updating in db: {url}')
                cursor.execute(
                    f'UPDATE links SET downloaded=1 WHERE url="{url}"')
                connection.commit()
Пример #16
0
def get_link(bot, update):
    TRChatBase(update.from_user.id, update.text, "gofile")
    if str(update.from_user.id) in Config.BANNED_USERS:
        bot.send_message(chat_id=update.chat.id,
                         text=Translation.ABUSIVE_USERS,
                         reply_to_message_id=update.message_id,
                         disable_web_page_preview=True,
                         parse_mode=pyrogram.ParseMode.HTML)
        return
    logger.info(update.from_user)
    if update.reply_to_message is not None:
        reply_message = update.reply_to_message
        download_location = Config.DOWNLOAD_LOCATION + "/"
        start = datetime.now()
        a = bot.send_message(chat_id=update.chat.id,
                             text=Translation.DOWNLOAD_START,
                             reply_to_message_id=update.message_id)
        c_time = time.time()
        after_download_file_name = bot.download_media(
            message=reply_message,
            file_name=download_location,
            progress=progress_for_pyrogram,
            progress_args=(Translation.DOWNLOAD_START, a.message_id,
                           update.chat.id, c_time))
        download_extension = after_download_file_name.rsplit(".", 1)[-1]
        upload_name = after_download_file_name.rsplit("/", 1)[-1]
        upload_name = upload_name.replace(" ", "_")
        bot.edit_message_text(text=Translation.SAVED_RECVD_DOC_FILE,
                              chat_id=update.chat.id,
                              message_id=a.message_id)
        end_one = datetime.now()
        if str(update.from_user.id) in Config.G_DRIVE_AUTH_DRQ:
            gauth = Config.G_DRIVE_AUTH_DRQ[str(update.from_user.id)]
            # Create GoogleDrive instance with authenticated GoogleAuth instance.
            drive = GoogleDrive(gauth)
            file_inance = drive.CreateFile()
            # Read file and set it as a content of this instance.
            file_inance.SetContentFile(after_download_file_name)
            file_inance.Upload()  # Upload the file.
            end_two = datetime.now()
            time_taken_for_upload = (end_two - end_one).seconds
            logger.info(file_inance)
            adfulurl = file_inance.webContentLink
            max_days = 0
        else:
            url = "https://srv-file5.gofile.io/upload"
            max_days = 5
            timeseconds = int(time.time())
            timesecondsplusexpiry = int(
                time.time()) + (max_days * 24 * 60 * 60)
            command_to_exec = [
                "curl", "-F", "filesUploaded=@" + after_download_file_name,
                "-F", "expire=" + str(timesecondsplusexpiry), "-F",
                "category=file", "-F", "comments=0", url
            ]

            bot.edit_message_text(text=Translation.UPLOAD_START,
                                  chat_id=update.chat.id,
                                  message_id=a.message_id)
            try:
                logger.info(command_to_exec)
                t_response = subprocess.check_output(command_to_exec,
                                                     stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as exc:
                logger.info("Status : FAIL", exc.returncode, exc.output)
                bot.edit_message_text(chat_id=update.chat.id,
                                      text=exc.output.decode("UTF-8"),
                                      message_id=a.message_id)
                return False
            else:
                logger.info(t_response)
                print(t_response)
                t_response_arry = "https://gofile.io/?c=" + json.loads(
                    t_response.decode("UTF-8").split("\n")
                    [-1].strip())['data']['code']

                #shorten_api_url = "http://ouo.io/api/{}?s={}".format(Config.OUO_IO_API_KEY, t_response_arry)
                #adfulurl = requests.get(shorten_api_url).text
        bot.edit_message_text(chat_id=update.chat.id,
                              text=Translation.AFTER_GET_DL_LINK.format(
                                  t_response_arry, max_days),
                              parse_mode=pyrogram.ParseMode.HTML,
                              message_id=a.message_id,
                              disable_web_page_preview=True)
        try:
            os.remove(after_download_file_name)
        except:
            pass
    else:
        bot.send_message(chat_id=update.chat.id,
                         text=Translation.REPLY_TO_DOC_GET_LINK,
                         reply_to_message_id=update.message_id)
Пример #17
0
    def handle_action_tag(self, ttype, data):
        logging.debug("Open : %s", data)
        gauth = GoogleAuth()
        drive = GoogleDrive(gauth)
        gauth.LoadCredentialsFile("mycreds.txt")
        check = os.stat("mod/hashtags").st_size

        ######---find FOLDER ID
        #file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
        #for file1 in file_list:
        #	print 'title: %s, id: %s' % (file1['title'], file1['id'])

        ######---authentication
        if gauth.credentials is None:
            gauth.LocalWebserverAuth()
        if gauth.access_token_expired:
            gauth.Refresh()
        else:
            gauth.Authorize()
        gauth.SaveCredentialsFile("mycreds.txt")

        if (data == 'DOCS'):

            ### upload achat and depense
            file1 = drive.CreateFile({
                'parents': [{
                    "id":
                    '0B8mDDuHeuNHDfmM0OXlWTndpdkczNHBBY3VJaXJ2ZlNqVVBoWWk3UDZnc0NvMS1Gd1JtWU0'
                }]
            })
            file1.SetContentFile('mod/achat.txt')
            file1.Upload()

            file2 = drive.CreateFile({
                'parents': [{
                    "id":
                    '0B8mDDuHeuNHDfmM0OXlWTndpdkczNHBBY3VJaXJ2ZlNqVVBoWWk3UDZnc0NvMS1Gd1JtWU0'
                }]
            })
            file2.SetContentFile('mod/depense.txt')
            file2.Upload()
            logging.debug("Upload done.")

        if (data == 'PICS'):
            file = open('mod/filepath', 'r')
            path = file.readlines()
            pic = path[0]

            if (check == 0):
                ### list every photo
                onlyfiles = [
                    f for f in listdir('/home/pi/images/')
                    if isfile(join('/home/pi/images/', f))
                ]

                ### upload pic
                file1 = drive.CreateFile({
                    'parents': [{
                        "id":
                        '0B8mDDuHeuNHDfmM0OXlWTndpdkczNHBBY3VJaXJ2ZlNqVVBoWWk3UDZnc0NvMS1Gd1JtWU0'
                    }]
                })
                file1.SetContentFile(pic)
                file1.Upload()
                logging.debug("Upload done.")

                file = open('mod/filepath', 'w').close()
            else:
                file = open('mod/hashtags', 'r')
                hash = file.readlines()
                hashtag = hash[0]
                hashtag = hashtag[1:-1]

                file_list = drive.ListFile({
                    'q':
                    "'root' in parents and trashed=false"
                }).GetList()
                for file1 in file_list:
                    print 'title: %s, id: %s' % (file1['title'], file1['id'])
                    print hashtag
                    print file1['title']
                    if (file1['title'] == hashtag):
                        file1 = drive.CreateFile(
                            {'parents': [{
                                "id": file1['id']
                            }]})
                        file1.SetContentFile(pic)
                        file1.Upload()
                        logging.debug("Upload done.")

                file = open('mod/filepath', 'w').close()
                file = open('mod/hashtags', 'w').close()
Пример #18
0
def callback(bot, update):
    conn = sqlite3.connect('data/DMI_DB.db')
    keyboard2 = [[]]
    icona = ""
    number_row = 0
    number_array = 0

    update.callback_query.data = update.callback_query.data.replace("Drive_", "")
    # print('Callback query data: ' + str(update.callback_query.data))
    if len(update.callback_query.data) < 13:
        #conn.execute("DELETE FROM 'Chat_id_List'")
        array_value = update['callback_query']['message']['text'].split(" ")
        try:
            if len(array_value) == 4:
                array_value.insert(0, "None")

            if len(array_value) == 5:
                conn.execute("INSERT INTO 'Chat_id_List' VALUES ("+update.callback_query.data+",'" + array_value[4] + "','" + array_value[1] + "','" + array_value[2] + "','" + array_value[3] + "') ")
                bot.sendMessage(chat_id=update.callback_query.data, text="🔓 La tua richiesta è stata accettata. Leggi il file README")
                bot.sendDocument(chat_id=update.callback_query.data, document=open('data/README.pdf', 'rb'))

                request_elimination_text = "Richiesta di " + str(array_value[1]) + " " + str(array_value[2]) + " estinta"
                bot.editMessageText(text=request_elimination_text, chat_id=config_map['dev_group_chatid'], message_id=update.callback_query.message.message_id)

                bot.sendMessage(chat_id=config_map['dev_group_chatid'], text=str(array_value[1]) + " " + str(array_value[2] + str(" è stato inserito nel database")))

            elif len(array_value) == 4:
                conn.execute("INSERT INTO 'Chat_id_List'('Chat_id','Nome','Cognome','Email') VALUES (" + update.callback_query.data + ",'" + array_value[1] + "','" + array_value[2] + "','" + array_value[3] + "')")
                bot.sendMessage(chat_id=update.callback_query.data, text="🔓 La tua richiesta è stata accettata. Leggi il file README")
                bot.sendDocument(chat_id=update.callback_query.data, document=open('data/README.pdf', 'rb'))

            else:
                bot.sendMessage(chat_id=config_map['dev_group_chatid'], text=str("ERRORE INSERIMENTO: ") + str(update['callback_query']['message']['text']) + " " + str(update['callback_query']['data']))
            conn.commit()
        except Exception as error:
            print(error)
            bot.sendMessage(chat_id=config_map['dev_group_chatid'], text=str("ERRORE INSERIMENTO: ") + str(update['callback_query']['message']['text']) + " " + str(update['callback_query']['data']))

        text = ""

    else:
        pid = os.fork()
        if (pid == 0):
            settings_file = "config/settings.yaml"
            gauth2 = GoogleAuth(settings_file=settings_file)
            gauth2.CommandLineAuth()
            # gauth2.LocalWebserverAuth()
            drive2 = GoogleDrive(gauth2)
            bot2 = telegram.Bot(TOKEN)

            file1 = drive2.CreateFile({'id': update.callback_query.data})
            if file1['mimeType'] == "application/vnd.google-apps.folder":
                file_list2 = None

                try:
                    istance_file = drive2.ListFile({'q': "'"+file1['id']+"' in parents and trashed=false", 'orderBy': 'folder,title'})
                    file_list2 = istance_file.GetList()
                    with open("./logs/debugDrive.txt", "a") as debugfile:
                        debugfile.write("- Log time:\n {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
                        debugfile.write("- File:\n {}".format(str(json.dumps(file1))))
                        debugfile.write("- IstanceFile:\n {}".format(str(json.dumps(istance_file))))
                        debugfile.write("- FileList:\n {}".format(str(json.dumps(file_list2))))
                        debugfile.write("\n------------\n")
                except Exception as e:
                    with open("./logs/debugDrive.txt", "a") as debugfile:
                        debugfile.write("- Log time:\n {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
                        debugfile.write("- Error:\n {}".format(e))
                        debugfile.write("\n------------\n")
                    print("- Drive error: {}".format(e))
                    bot2.sendMessage(chat_id=update['callback_query']['from_user']['id'], text="Si è verificato un errore, ci scusiamo per il disagio. Contatta i devs. /help")
                    sys.exit(0)

                formats = {
                	** { "pdf" : "📕 " },
                	** dict.fromkeys([' a', 'b', 'c'], 10),
                	** dict.fromkeys(["doc", "docx", "txt"], "📘 "),
                	** dict.fromkeys(["jpg", "png", "gif"], "📷 "),
                	** dict.fromkeys(["rar", "zip"], "🗄 "),
                	** dict.fromkeys(["out", "exe"], "⚙ "),
                	** dict.fromkeys(["c", "cpp", "h", "py", "java", "js", "html", "php"], "💻 ")
                }

                for file2 in file_list2:

                    if file2['mimeType'] == "application/vnd.google-apps.folder":
                        if number_row >= 1:
                            keyboard2.append([InlineKeyboardButton("🗂 "+file2['title'], callback_data="Drive_" + file2['id'])])
                            number_row = 0
                            number_array += 1
                        else:
                            keyboard2[number_array].append(InlineKeyboardButton("🗂 "+file2['title'], callback_data="Drive_" + file2['id']))
                            number_row += 1
                    else:
                        file_format = file2['title'][-5:] # get last 5 characters of strings
                        file_format = file_format.split(".") # split file_format per "."
                        file_format = file_format[len(file_format)-1] # get last element of file_format

                        icona = "📄 "

                        if file_format in formats.keys():
                            icona = formats[file_format]

                        if number_row >= 1:
                            keyboard2.append([InlineKeyboardButton(icona+file2['title'], callback_data="Drive_" + file2['id'])])
                            number_row = 0
                            number_array += 1
                        else:
                            keyboard2[number_array].append(InlineKeyboardButton(icona+file2['title'], callback_data="Drive_" + file2['id']))
                            number_row += 1

                if len(file1['parents']) > 0 and file1['parents'][0]['id'] != '0ADXK_Yx5406vUk9PVA':
                    keyboard2.append([InlineKeyboardButton("🔙", callback_data="Drive_" + file1['parents'][0]['id'])])

                reply_markup3 = InlineKeyboardMarkup(keyboard2)
                bot2.sendMessage(chat_id=update['callback_query']['from_user']['id'], text=file1['title']+":", reply_markup=reply_markup3)

            elif file1['mimeType'] == "application/vnd.google-apps.document":
                bot2.sendMessage(chat_id=update['callback_query']['from_user']['id'], text="Impossibile scaricare questo file poichè esso è un google document, Andare sul seguente link")
                bot2.sendMessage(chat_id=update['callback_query']['from_user']['id'], text=file1['exportLinks']['application/pdf'])

            else:
                try:
                    file_d = drive2.CreateFile({'id': file1['id']})
                    if int(file_d['fileSize']) < 5e+7:
                        file_d.GetContentFile('file/'+file1['title'])
                        file_s = file1['title']
                        filex = open(str("file/" + file_s), "rb")
                        bot2.sendChatAction(chat_id=update['callback_query']['from_user']['id'], action="UPLOAD_DOCUMENT")
                        bot2.sendDocument(chat_id=update['callback_query']['from_user']['id'], document=filex)
                        os.remove(str("file/" + file_s))
                    else:
                        bot2.sendMessage(chat_id=update['callback_query']['from_user']['id'], text="File troppo grande per il download diretto, scarica dal seguente link")
                        # file_d['downloadUrl']
                        bot2.sendMessage(chat_id=update['callback_query']['from_user']['id'], text=file_d['alternateLink'])
                except Exception as e:
                    print("- Drive error: {}".format(e))
                    bot2.sendMessage(chat_id=update['callback_query']['from_user']['id'], text="Impossibile scaricare questo file, contattare gli sviluppatori del bot")
                    open("logs/errors.txt", "a+").write(str(e) + str(file_d['title'])+"\n")

            sys.exit(0)

        os.waitpid(pid, 0)
    conn.close()
Пример #19
0
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import os
import time
import stat

gAuth = GoogleAuth()
drive = GoogleDrive(gAuth)
time_to_sleep = 5 * 60
file_list = []


def getNewPhotos(folder_name=None):
    authenticateUser()
    walktree('./pictures', addtolist)
    toDownload = getFileList(folder_name)
    downloadFiles(toDownload)
    time.sleep(time_to_sleep)


def walktree(top, callback):
    """recursively descend the directory tree rooted at top, calling the
    callback function for each regular file. Taken from the module-stat
    example at: http://docs.python.org/lib/module-stat.html
    """
    for f in os.listdir(top):
        pathname = os.path.join(top, f)
        mode = os.stat(pathname)[stat.ST_MODE]
        if stat.S_ISDIR(mode):
            # It's a directory, recurse into it
            walktree(pathname, callback)
Пример #20
0
 def __init__(self, credsfile='mycreds.txt'):
     from pydrive.drive import GoogleDrive
     self.gdrive = GoogleDrive(self._get_auth(credsfile))
def auth_drive():
    gauth = GoogleAuth()
    gauth.LocalWebserverAuth()
    drive = GoogleDrive(gauth)
    return drive
def addtoGDrive(pdfloc, pdfname):
    """

    Uploads water quality PDF file to Google drive account under My Drive\Projects\Water_Quality\pdf location,
    id:1GRunRWB7SKmH3I0wWbtyJ_UOCDiHGAxO

    Parameters
    ----------
    pdfloc: String. Location of PDF to uploaded with filename
    pdfname: String. Filename of PDF to upload, uploaded this with name.

    Returns
    -------
    Print statement
    """
    try:
        application.logger.debug(
            "Attempting to authenticate with Google Drive API through saved OAuth  credentials"
        )
        # Change location for client_secrets.json, application.py sits one level above this file and the file isn't
        # called properly when this function is called, but the file needs to stay in that location such that
        # quickstart.py can be called directly if needed
        GoogleAuth.DEFAULT_SETTINGS['client_config_file'] = os.path.join(
            app.root_path, 'WebAppProjects', 'WaterQualityViewer',
            'credentials.json')
        # Use command line to auth, must connect to host, visit URL, login/auth with Google, then paste provided text
        # into terminal
        # gauth.CommandLineAuth()

        # Create authenticated GoogleDrive instance using settings from the setting.yaml file to auto-authenticate with
        # saved credentials
        gauth = GoogleAuth(
            settings_file=os.path.join(app.root_path, 'WebAppProjects',
                                       'WaterQualityViewer', 'settings.yaml'))

        # Establish connection with Google Drive API
        drive = GoogleDrive(gauth)
        application.logger.debug("Connected to Google Drive account")
        # Create a new GoogleDriveFile instance with a PDF mimetype in the water quality PDF folder using the parent folder's ID
        newfile = drive.CreateFile({
            "title":
            pdfname,
            'mimeType':
            'application/pdf',
            'parents': [{
                'id': "1GRunRWB7SKmH3I0wWbtyJ_UOCDiHGAxO"
            }]
        })
        # Read file and set the content of the new file instance
        newfile.SetContentFile(pdfloc)
        # Upload the file to Google Drive
        newfile.Upload()
        # print("File uploaded to Google Drive!")
        application.logger.debug(f"File {pdfname} uploaded to Google Drive!")
    except Exception as e:
        print("GoogleDrive upload threw an error, emailing exception")
        application.logger.error("Failed to upload to Google Drive account")
        application.logger.error(e)
        errorEmail.sendErrorEmail(script="GoogleDrive",
                                  exceptiontype=e.__class__.__name__,
                                  body=e)


# testing
# pdfloc = r"G:\My Drive\Projects\test_documents\Ocean_Water_Quality_Report_testing_20201002.pdf"
# pdfname = r"Ocean_Water_Quality_Report_testing_20201002.pdf"
#
# addtoGDrive(pdfloc, pdfname)
Пример #23
0
def googledrive_login():
    gauth = GoogleAuth()
    gauth.LocalWebserverAuth()

    return GoogleDrive(gauth)
Пример #24
0
error_found     = False 
counter         = 0
occurance       = 0
current_workdir = os.getcwd()                  # Getting Current working directory Information
xmlFile         = current_workdir + '/' + 'apple_health_export' + '/' + 'export.xml'
beatFile        = current_workdir + '/' +  'heartbeat.csv
hBFile          = current_workdir + '/' +  'heartbeat_List.csv'



print("+++ Message : Working Directory = " + current_workdir)

# Connecting to Google Drive
gLogin          = GoogleAuth()
gLogin.LocalWebserverAuth()                    # This opens a google login page and select account to access program
drive           = GoogleDrive(gLogin)

heartBeat       = '1aoURMlMlb0DYPeI2vcM7nGKI44uMnu_I' # Shared Folder ID - Folder Name "HearBeat"
data_files      = drive.ListFile({'q':"'"+heartBeat+"' in parents and trashed=false"}).GetList()

for file1 in data_files:
    print('+++ Message : Downloading File = %s' % (file1['title']))
    file1.GetContentFile(file1['title'])

if not os.path.isfile(current_workdir + '/' + 'export.zip'):
    print('!!! Error : Unable to locate data folder - export.zip ')
    error_found = True
else:
    # Extract zip folder
    with zipfile.ZipFile(current_workdir + '/' + 'export.zip',"r") as zip_ref:
        zip_ref.extractall()
Пример #25
0
    def drive(self):
        from pydrive.auth import RefreshError

        if not hasattr(self, "_gdrive"):
            from pydrive.auth import GoogleAuth
            from pydrive.drive import GoogleDrive

            if os.getenv(RemoteGDrive.GDRIVE_USER_CREDENTIALS_DATA):
                with open(
                    self.gdrive_user_credentials_path, "w"
                ) as credentials_file:
                    credentials_file.write(
                        os.getenv(RemoteGDrive.GDRIVE_USER_CREDENTIALS_DATA)
                    )

            GoogleAuth.DEFAULT_SETTINGS["client_config_backend"] = "settings"
            GoogleAuth.DEFAULT_SETTINGS["client_config"] = {
                "client_id": self.client_id,
                "client_secret": self.client_secret,
                "auth_uri": "https://accounts.google.com/o/oauth2/auth",
                "token_uri": "https://oauth2.googleapis.com/token",
                "revoke_uri": "https://oauth2.googleapis.com/revoke",
                "redirect_uri": "",
            }
            GoogleAuth.DEFAULT_SETTINGS["save_credentials"] = True
            GoogleAuth.DEFAULT_SETTINGS["save_credentials_backend"] = "file"
            GoogleAuth.DEFAULT_SETTINGS[
                "save_credentials_file"
            ] = self.gdrive_user_credentials_path
            GoogleAuth.DEFAULT_SETTINGS["get_refresh_token"] = True
            GoogleAuth.DEFAULT_SETTINGS["oauth_scope"] = [
                "https://www.googleapis.com/auth/drive",
                "https://www.googleapis.com/auth/drive.appdata",
            ]

            # Pass non existent settings path to force DEFAULT_SETTINGS loading
            gauth = GoogleAuth(settings_file="")

            try:
                gauth.CommandLineAuth()
            except RefreshError as exc:
                raise GDriveAccessTokenRefreshError(
                    "Google Drive's access token refreshment is failed"
                ) from exc
            except KeyError as exc:
                raise GDriveMissedCredentialKeyError(
                    "Google Drive's user credentials file '{}' "
                    "misses value for key '{}'".format(
                        self.gdrive_user_credentials_path, str(exc)
                    )
                )
            # Handle pydrive.auth.AuthenticationError and others auth failures
            except Exception as exc:
                raise DvcException(
                    "Google Drive authentication failed"
                ) from exc
            finally:
                if os.getenv(RemoteGDrive.GDRIVE_USER_CREDENTIALS_DATA):
                    os.remove(self.gdrive_user_credentials_path)

            self._gdrive = GoogleDrive(gauth)

            self.remote_root_id = self.get_remote_id(
                self.path_info, create=True
            )
            self._cached_dirs, self._cached_ids = self.cache_root_dirs()

        return self._gdrive
Пример #26
0
 def __init__(self, settings_file="settings.yaml"):
     self.gauth = GoogleAuth(settings_file)
     self.gauth.ServiceAuth()
     self.drive = GoogleDrive(self.gauth)        
Пример #27
0
 def __init__(self):
     """Create an instance of UploadDrive."""
     self.gauth = GoogleAuth()
     self.drive = GoogleDrive(self.gauth)
Пример #28
0
def main():
    print("\n--------Exporting {} Applications--------".format(year))

    # Import Qualtrics Data for use here- this is only the data, no file uploads
    print("Importing Data...")
    regs = ImportRegistrationData()
    apps = ImportApplicationData()
    recs = ImportRecommendationData()
    dems = ImportDemographicData()

    # Join registration data with application data
    print("Combining data...")
    for app in apps:
        for reg in regs:
            if reg["Email"] == app["Email"]:
                app.update(reg)
                break

    # Join recommendation data with application data
    for app in apps:
        # Save number of recommenders
        recCount = 0
        for rec in recs:
            # Link recommenders with applications
            for num in range(1, 5):
                if app["Rec{}Email".format(num)] is not "":
                    if app["Email".format(num)] == rec["AppEmail"] and app[
                            "Rec{}Email".format(num)] == rec["Email"]:
                        app["Rec{}ID".format(num)] = rec["recID"]
                        recCount += 1
        app["RecCount"] = recCount

    # Join demographic info with applications
    for dem in dems:
        for app in apps:
            if dem["AppID"] == app["AppID"]:
                app.update(dem)

    # Create and/or clean up workspace for files
    print("Creating folder for applications...")
    appFolder = "../{}_Applications".format(year)
    if not os.path.exists(appFolder):
        # Create workspace (e.g. folder to hold applications)
        os.makedirs(appFolder)
    else:
        # Clean up workspace (e.g. delete all files in folder)
        for file in os.listdir(appFolder):
            file_path = os.path.join(appFolder, file)
            if os.path.isfile(file_path):
                os.remove(file_path)
            elif os.path.isdir(file_path):
                shutil.rmtree(file_path)

    # Make section template PDFs
    templates = [
        "Cover Page", "Statement of Interest", "CV or Resume",
        "(Unofficial) Transcript", "Recommendation Letter #1",
        "Recommendation Letter #2", "Recommendation Letter #3",
        "Recommendation Letter #4"
    ]
    for template in templates:
        MakeSectionPdf(template)

    # Make application PDFs
    print("\n--------Making PDFs--------")
    appCount = 1
    for app in apps:
        print("Starting Application {} of {}...".format(appCount, len(apps)))

        #Create dictionary to hold PDF pages
        docs = collections.OrderedDict()

        # Make SOI first (basic info last, to check if all parts submitted)
        MakeSoiPdf(app)
        soi = GetPdf("{}_SOI.pdf".format(app["AppID"]))
        docs["Statement of Interest"] = soi

        # Get CV
        cvExists = False
        cv = GetPdf("../Summer_Course_{}_Application/Q12/{}*.pdf".format(
            year, app["AppID"]))
        if cv:
            docs["CV or Resume"] = cv
            cvExists = True

        # Get transcript
        transcriptExists = False
        transcript = GetPdf(
            "../Summer_Course_{}_Application/Q11/{}*.pdf".format(
                year, app["AppID"]))
        if transcript:
            docs["(Unofficial) Transcript"] = transcript
            transcriptExists = True

        # Get recommendation letters and add it to WIP PDF
        letterExists = [None]
        for num in range(1, 5):
            letterExists.append(False)
            if "Rec{}ID".format(num) in app.keys():
                letter = GetPdf("../Q1/{}*.pdf".format(
                    app["Rec{}ID".format(num)]))
                if letter:
                    docs["Recommendation Letter #" + str(num)] = letter
                    letterExists[num] = True

        # Dictionary of Existence
        fileExists = {
            "CV": cvExists,
            "Transcript": transcriptExists,
            "Letters": letterExists
        }

        # Make Cover Page
        completed = MakeCoverPage(app, fileExists)

        # Get Cover Page
        cover = GetPdf("{}_cover.pdf".format(app["AppID"]))

        # Add pages to PDF (with header and watermark, if appropriate)
        appPdf = PdfFileWriter()
        pages = AddHeader(cover.pages, app)
        pages = AddSection(pages, "Cover Page")
        if not completed:
            pages = AddWatermark(pages)
        for page in pages:
            appPdf.addPage(page)

        for section, doc in docs.items():
            pages = AddHeader(doc.pages, app)
            pages = AddSection(pages, section)
            if not completed:
                pages = AddWatermark(pages)
            for page in pages:
                appPdf.addPage(page)

        # Write PDF
        appStream = open(
            "../{}_Applications/{}_{}.pdf".format(year, app["Last"],
                                                  app["AppID"]), "wb")
        appPdf.write(appStream)

        # Increase count for display
        appCount += 1

    print("\n--------Post-Processing PDFs--------")

    # Delete temporary files
    print("Deleting Temporary Files...")
    filesToDelete = ["SOI", "cover", "WIP", "Header"]
    for ext in filesToDelete:
        for file in glob("*_{}.pdf".format(ext)):
            os.remove(file)

    os.remove("Cover Page.pdf")
    os.remove("Statement of Interest.pdf")
    os.remove("CV or Resume.pdf")
    os.remove("(Unofficial) Transcript.pdf")
    os.remove("Recommendation Letter #1.pdf")
    os.remove("Recommendation Letter #2.pdf")
    os.remove("Recommendation Letter #3.pdf")
    os.remove("Recommendation Letter #4.pdf")

    # Create applicant CSV file
    print("Creating Applicant CSV File...")
    with open("../{}_Applications/{} Applicants.csv".format(year, year),
              "w") as appCsv:
        csvHeader = [
            "AppID", "First", "Last", "Email", "Gender", "Hispanic", "Race",
            "Education"
        ]
        writer = csv.DictWriter(appCsv,
                                fieldnames=csvHeader,
                                restval="ERROR",
                                extrasaction="ignore")
        writer.writeheader()
        for app in apps:
            writer.writerow(app)

    print("\n--------Uploading files to Google Drive--------")

    # Authenticate Google Drive
    gauth = GoogleAuth()

    # Create local webserver and auto-handle authentication.
    gauth.LocalWebserverAuth()

    # Create GoogleDrive instance with authenticated GoogleAuth instance.
    drive = GoogleDrive(gauth)

    # Delete all old application files
    file_list = drive.ListFile({
        'q':
        "'{}' in parents and trashed=false".format(GDriveDestID)
    }).GetList()
    for file in file_list:
        if ("R_" in file["title"]
                and ".pdf" in file["title"]) or ".csv" in file["title"]:
            file.Delete()

    # Upload files to Google Drive
    appCount = 1
    print("\n\n--------Starting Drive Upload--------")
    for app in apps:
        print("Uploading {} of {}...".format(appCount, len(apps)))
        file = drive.CreateFile({
            "parents": [{
                "kind": "drive#fileLink",
                "id": "{}".format(GDriveDestID)
            }],
            "title":
            "{}: {}.pdf".format(app["Last"], app["AppID"])
        })

        # Read file and set it as a content of this instance.
        file.SetContentFile("../{}_Applications/{}_{}.pdf".format(
            year, app["Last"], app["AppID"]))
        file.Upload()  # Upload the file.
        appCount += 1

    file = drive.CreateFile({
        "parents": [{
            "kind": "drive#fileLink",
            "id": "{}".format(GDriveDestID)
        }],
        "title":
        "{} Applicants.csv".format(year)
    })
    file.SetContentFile("../{}_Applications/{} Applicants.csv".format(
        year, year))
    file.Upload()

    print("\n--------Distributing Applications--------")

    print("Getting application links...")
    app_list = drive.ListFile({
        'q':
        "'{}' in parents and trashed=false".format(GDriveDestID)
    }).GetList()
    appPdfs = []
    for file in app_list:
        if "R_" in file["title"] and ".pdf" in file["title"]:
            appPdfs.append(file)

    print("Apportioning applications...")
    # Divide up applications
    numApps = len(appPdfs)
    numReviewers = len(appReviewers)
    reviewBurden = (numApps * 2) // numReviewers

    # Generate list to sample from
    appsToReview = appPdfs + appPdfs
    random.shuffle(appsToReview)

    # Make list of reviewers
    reviewers = []
    for reviewer, email in appReviewers.items():
        reviewers.append({"Name": reviewer, "Email": email, "Apps": []})

    # Distribute applications
    while appsToReview:
        for reviewer in reviewers:
            if appsToReview:
                selection = random.choice(appsToReview)
                if selection in reviewer["Apps"]:
                    selection = random.choice(appsToReview)
                else:
                    reviewer["Apps"].append(selection)
                    appsToReview.remove(selection)

    print("Emailing applications...")
    #Create string of application links
    for reviewer in reviewers:
        reviewer["AppLinks"] = ""
        reviewer["HtmlLinks"] = ""
        for app in reviewer["Apps"]:
            reviewer["AppLinks"] += "{}: {}\n\t\t\t".format(
                app["title"].replace(".pdf", ""), app["webContentLink"])
            reviewer["HtmlLinks"] += '<p><a href="{}">{}</a></p>'.format(
                app["webContentLink"], app["title"].replace(".pdf", ""))

    # Create the base text message.
    for reviewer in reviewers:
        msg = EmailMessage()
        msg['Subject'] = "{} NCAN Summer Course Application Evaluations".format(
            year)
        msg['From'] = Address("William Schmitt",
                              addr_spec="*****@*****.**")
        msg['To'] = Address(reviewer["Name"], addr_spec=reviewer["Email"])
        msg.set_content("""\
            Dear {},

            The application window for the {} NCAN Summer Course is now closed!
            As such, it is now time for you to begin reviewing applictions to 
            determine who should be admitted to the Course. There were {} 
            applications this year, so we need you to review {} applications. In
            an attempt to streamline this process, we have created an evaluation
            form that you can quickly and easily fill out for each application.
            This form is located here (https://goo.gl/forms/AHxAvtZDglX54DWd2).
            NOTE: this form and all links below require the use of your 
            @neurotechcenter.org account. Please make sure you are logged into
            your account (if you are not, you will be prompted to do so when 
            you click on the link).

            The applications you have been assigned are: 
            (listed as lastName: Applicant ID)
            {}

            The links above should automatically download each application to
            your computer for easy viewing, but in case they do not work, you
            should be able to access all applications here
            (https://drive.google.com/drive/folders/0B67b4FFl6pYlVnY2cVpFbjlGdmM?usp=sharing).

            Thank you for the anticipated time and attention you will spend reviewing
            these applications. If you have any questions about the process, please
            feel free to contact Billy or Dr. Carp.

            Thank you,
            The NCAN Summer Course Bot""".format(reviewer["Name"], year,
                                                 numApps, reviewBurden,
                                                 reviewer["AppLinks"]))

        # Add the html version.  This converts the message into a multipart/alternative
        # container, with the original text message as the first part and the new html
        # message as the second part.
        msg.add_alternative("""\
        <html>
          <head></head>
          <body>
            <p>Dear {},</p>
            <p>
                The application window for the {} NCAN Summer Course is now 
                closed! As such, it is now time for you to begin reviewing applictions 
                to determine who should be admitted to the Course. There were {} 
                applications this year, so we need you to review {} applications. 
                In an attempt to streamline this process, we have created an 
                evaluation form that you can quickly and easily fill out for each 
                application. This form is located 
                <a href="https://goo.gl/forms/PftRKWtL6SnG1Ozp1">here</a>. 
                NOTE: this form and all links below require the use of your 
                @neurotechcenter.org account. Please make sure you are logged 
                into your account (if you are not, you will be prompted to do so
                when you click on the link).
            </p>
            <p>The applications you have been assigned are:</p>
            (listed as lastName: Applicant ID)
            {}
            <p>
                The links above should automatically download each application to 
                your computer for easy viewing, but in case they do not work, you 
                should be able to access all applications 
                <a href="https://drive.google.com/drive/folders/0B67b4FFl6pYlVnY2cVpFbjlGdmM?usp=sharing">here</a>.
            </p>
            <p>
            Thank you for the anticipated time and attention you will spend 
            reviewing these applications. If you have any questions about the 
            process, please feel free to contact Billy or Dr. Carp.</p>
            <p>Thank you,</p>
            <p>The NCAN Summer Course Bot</p>
          </body>
        </html>
        """.format(reviewer["Name"], year, numApps, reviewBurden,
                   reviewer["HtmlLinks"]),
                            subtype='html')

        # Send the message via local SMTP server.
        with smtplib.SMTP('smtp.gmail.com', 587) as server:  #port 465 or 587
            server.ehlo()
            server.starttls()
            server.ehlo()
            server.login('*****@*****.**', password)
            server.send_message(msg)
            server.close()

    print("\n--------Success! All Done.--------")
Пример #29
0
Requires correct client_secrets, credentials, and settings files.
"""
gauth = GoogleAuth(os.getcwd() + "/settings.yaml")
gauth.LoadCredentialsFile("mycreds.txt")
if gauth.credentials is None:
    # Authenticate if they're not there
    gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
    # Refresh them if expired
    gauth.Refresh()
else:
    # Initialize the saved creds
    gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)


def remove_file(filename):
    """
    Determines if a file exists before trying to delete it
    :param filename: str, name of file to delete
    :return: boolean if the filename doesn't exist
    """
    if os.path.exists(filename):
        os.remove(filename)
    else:
        return False
    return

Пример #30
0
    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)
        try:
            global pydrive
            import httplib2
            from apiclient.discovery import build
            from pydrive.auth import GoogleAuth
            from pydrive.drive import GoogleDrive
            from pydrive.files import ApiRequestError, FileNotUploadedError
        except ImportError as e:
            raise BackendException(u"""\
PyDrive backend requires PyDrive installation.  Please read the manpage for setup details.
Exception: %s""" % str(e))

        # let user get by with old client while he can
        try:
            from oauth2client.client import SignedJwtAssertionCredentials
            self.oldClient = True
        except:
            from oauth2client.service_account import ServiceAccountCredentials
            from oauth2client import crypt
            self.oldClient = False

        if u'GOOGLE_DRIVE_ACCOUNT_KEY' in os.environ:
            account_key = os.environ[u'GOOGLE_DRIVE_ACCOUNT_KEY']
            if self.oldClient:
                credentials = SignedJwtAssertionCredentials(
                    parsed_url.username + u'@' + parsed_url.hostname,
                    account_key,
                    scopes=u'https://www.googleapis.com/auth/drive')
            else:
                signer = crypt.Signer.from_string(account_key)
                credentials = ServiceAccountCredentials(
                    parsed_url.username + u'@' + parsed_url.hostname,
                    signer,
                    scopes=u'https://www.googleapis.com/auth/drive')
            credentials.authorize(httplib2.Http())
            gauth = GoogleAuth()
            gauth.credentials = credentials
        elif u'GOOGLE_DRIVE_SETTINGS' in os.environ:
            gauth = GoogleAuth(
                settings_file=os.environ[u'GOOGLE_DRIVE_SETTINGS'])
            gauth.CommandLineAuth()
        elif (u'GOOGLE_SECRETS_FILE' in os.environ
              and u'GOOGLE_CREDENTIALS_FILE' in os.environ):
            gauth = GoogleAuth()
            gauth.LoadClientConfigFile(os.environ[u'GOOGLE_SECRETS_FILE'])
            gauth.LoadCredentialsFile(os.environ[u'GOOGLE_CREDENTIALS_FILE'])
            if gauth.credentials is None:
                gauth.CommandLineAuth()
            elif gauth.access_token_expired:
                gauth.Refresh()
            else:
                gauth.Authorize()
            gauth.SaveCredentialsFile(os.environ[u'GOOGLE_CREDENTIALS_FILE'])
        else:
            raise BackendException(
                u'GOOGLE_DRIVE_ACCOUNT_KEY or GOOGLE_DRIVE_SETTINGS environment '
                u'variable not set. Please read the manpage to fix.')
        self.drive = GoogleDrive(gauth)

        # Dirty way to find root folder id
        file_list = self.drive.ListFile({
            u'q':
            u"'Root' in parents and trashed=false"
        }).GetList()
        if file_list:
            parent_folder_id = file_list[0][u'parents'][0][u'id']
        else:
            file_in_root = self.drive.CreateFile({u'title': u'i_am_in_root'})
            file_in_root.Upload()
            parent_folder_id = file_in_root[u'parents'][0][u'id']

        # Fetch destination folder entry and create hierarchy if required.
        folder_names = string.split(parsed_url.path, u'/')
        for folder_name in folder_names:
            if not folder_name:
                continue
            file_list = self.drive.ListFile({
                u'q':
                u"'" + parent_folder_id + u"' in parents and trashed=false"
            }).GetList()
            folder = next(
                (item
                 for item in file_list if item[u'title'] == folder_name and
                 item[u'mimeType'] == u'application/vnd.google-apps.folder'),
                None)
            if folder is None:
                folder = self.drive.CreateFile({
                    u'title':
                    folder_name,
                    u'mimeType':
                    u"application/vnd.google-apps.folder",
                    u'parents': [{
                        u'id': parent_folder_id
                    }]
                })
                folder.Upload()
            parent_folder_id = folder[u'id']
        self.folder = parent_folder_id
        self.id_cache = {}