Пример #1
0
def get_link(bot, update):
    TRChatBase(update.from_user.id, update.text, "getlink3")
    if str(update.from_user.id) in Config.BANNED_USERS:
        bot.send_message(chat_id=update.chat.id,
                         text=Translation.ABUSIVE_USERS,
                         reply_to_message_id=update.message_id,
                         disable_web_page_preview=True,
                         parse_mode=pyrogram.ParseMode.HTML)
        return
    logger.info(update.from_user)
    if update.reply_to_message is not None:
        reply_message = update.reply_to_message
        download_location = Config.DOWNLOAD_LOCATION + "/"
        start = datetime.now()
        a = bot.send_message(chat_id=update.chat.id,
                             text=Translation.DOWNLOAD_START,
                             reply_to_message_id=update.message_id)
        c_time = time.time()
        after_download_file_name = bot.download_media(
            message=reply_message,
            file_name=download_location,
            progress=progress_for_pyrogram,
            progress_args=(Translation.DOWNLOAD_START, a.message_id,
                           update.chat.id, c_time))
        download_extension = after_download_file_name.rsplit(".", 1)[-1]
        upload_name = after_download_file_name.rsplit("/", 1)[-1]
        upload_name = upload_name.replace(" ", "_")
        bot.edit_message_text(text=Translation.SAVED_RECVD_DOC_FILE,
                              chat_id=update.chat.id,
                              message_id=a.message_id)
        end_one = datetime.now()
        if str(update.from_user.id) in Config.G_DRIVE_AUTH_DRQ:
            gauth = Config.G_DRIVE_AUTH_DRQ[str(update.from_user.id)]
            # Create GoogleDrive instance with authenticated GoogleAuth instance.
            drive = GoogleDrive(gauth)
            file_inance = drive.CreateFile()
            # Read file and set it as a content of this instance.
            file_inance.SetContentFile(after_download_file_name)
            file_inance.Upload()  # Upload the file.
            end_two = datetime.now()
            time_taken_for_upload = (end_two - end_one).seconds
            logger.info(file_inance)
            adfulurl = file_inance.webContentLink
            max_days = 0
        else:
            url = "https://srv-file5.gofile.io/upload"
            max_days = 5
            timeseconds = int(time.time())
            timesecondsplusexpiry = int(
                time.time()) + (max_days * 24 * 60 * 60)
            command_to_exec = [
                "curl", "-F", "filesUploaded=@" + after_download_file_name,
                "-F", "expire=" + str(timesecondsplusexpiry), "-F",
                "category=file", "-F", "comments=0", url
            ]

            bot.edit_message_text(text=Translation.UPLOAD_START,
                                  chat_id=update.chat.id,
                                  message_id=a.message_id)
            try:
                logger.info(command_to_exec)
                t_response = subprocess.check_output(command_to_exec,
                                                     stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as exc:
                logger.info("Status : FAIL", exc.returncode, exc.output)
                bot.edit_message_text(chat_id=update.chat.id,
                                      text=exc.output.decode("UTF-8"),
                                      message_id=a.message_id)
                return False
            else:
                logger.info(t_response)
                print(t_response)
                t_response_arry = "https://gofile.io/?c=" + json.loads(
                    t_response.decode("UTF-8").split("\n")
                    [-1].strip())['data']['code']

                #shorten_api_url = "http://ouo.io/api/{}?s={}".format(Config.OUO_IO_API_KEY, t_response_arry)
                #adfulurl = requests.get(shorten_api_url).text
        bot.edit_message_text(chat_id=update.chat.id,
                              text=Translation.AFTER_GET_DL_LINK.format(
                                  t_response_arry, max_days),
                              parse_mode=pyrogram.ParseMode.HTML,
                              message_id=a.message_id,
                              disable_web_page_preview=True)
        try:
            os.remove(after_download_file_name)
        except:
            pass
    else:
        bot.send_message(chat_id=update.chat.id,
                         text=Translation.REPLY_TO_DOC_GET_LINK,
                         reply_to_message_id=update.message_id)
Пример #2
0
    def upload_file(self, test=False):
        logger.debug("Uploading file to Google Drive.")

        # Create GoogleDrive instance with authenticated GoogleAuth instance.
        drive = GoogleDrive(self.gauth)

        # Get lists of folders
        root_folders = drive.ListFile({
            "q":
            "'root' in parents and mimeType='application/vnd.google-apps.folder' and trashed=false"
        }).GetList()
        all_folders = drive.ListFile({
            "q":
            "mimeType='application/vnd.google-apps.folder' and trashed=false"
        }).GetList()
        all_pdfs = drive.ListFile({
            "q":
            "mimeType='application/pdf' and trashed=false"
        }).GetList()

        # Check for "Fantasy_Football" root folder and create it if it does not exist
        google_drive_root_folder_name = self.config.get(
            "Drive",
            "google_drive_root_folder_name",
            fallback="Fantasy_Football")
        google_drive_root_folder_id = self.make_root_folder(
            drive,
            self.check_file_existence(google_drive_root_folder_name,
                                      root_folders, "root"),
            google_drive_root_folder_name)

        if not test:
            # Check for season folder and create it if it does not exist
            # noinspection PyTypeChecker
            season_folder_name = self.filename.split(os.sep)[-3]

            season_folder_id = self.make_parent_folder(
                drive,
                self.check_file_existence(season_folder_name, all_folders,
                                          google_drive_root_folder_id),
                season_folder_name, google_drive_root_folder_id)

            # Check for league folder and create it if it does not exist
            # noinspection PyTypeChecker
            league_folder_name = self.filename.split(os.sep)[-2].replace(
                "-", "_")
            league_folder_id = self.make_parent_folder(
                drive,
                self.check_file_existence(league_folder_name, all_folders,
                                          season_folder_id),
                league_folder_name, season_folder_id)

            # Check for league report and create if if it does not exist
            report_file_name = self.filename.split(os.sep)[-1]
            report_file = self.check_file_existence(report_file_name, all_pdfs,
                                                    league_folder_id)
        else:
            report_file_name = self.filename
            report_file = self.check_file_existence(report_file_name, all_pdfs,
                                                    "root")
            league_folder_id = "root"

        if report_file:
            report_file.Delete()
        upload_file = drive.CreateFile({
            "title":
            report_file_name,
            "mimeType":
            "application/pdf",
            "parents": [{
                "kind": "drive#fileLink",
                "id": league_folder_id
            }]
        })
        upload_file.SetContentFile(self.filename)

        # Upload the file.
        upload_file.Upload()

        upload_file.InsertPermission({
            "type": "anyone",
            "role": "reader",
            "withLink": True
        })

        return "\nFantasy Football Report\nGenerated %s\n*%s*\n\n_Google Drive Link:_\n%s" % (
            "{:%Y-%b-%d %H:%M:%S}".format(datetime.datetime.now()),
            upload_file['title'], upload_file["alternateLink"])
Пример #3
0
def initialize_gdrive():
    gauth = GoogleAuth()
    scopes = ['https://www.googleapis.com/auth/drive']
    gauth.credentials = ServiceAccountCredentials.from_json_keyfile_dict(
        json_keyfile, scopes)
    return GoogleDrive(gauth)
Пример #4
0
def not_first_login(
):  #this function will be used when a user had already logged in before.
    global drive
    gauth = GoogleAuth()
    gauth.LoadCredentialsFile("mycreds.txt")
    drive = GoogleDrive(gauth)
Пример #5
0
from zipfile import ZipFile
import io
import shutil
from email.message import EmailMessage
import pymongo
from pymongo import MongoClient
import asyncio
import random
import json
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from pathlib import Path

gauth = GoogleAuth()
gauth.LocalWebserverAuth()
drive = GoogleDrive(gauth)

client_id = ''
client_secret = ''

client = MongoClient("")
db = client.spotipydl
collection1 = db['users']
collection2 = db['download_queue']


class SpotifyAPI(object):
    access_token = None
    access_token_expires = datetime.datetime.now()
    access_token_did_expire = True
    client_id = None
Пример #6
0
class GDrive():
    def __init__(self, creditsLocation, ListLocation, logFilePath):
        self.fileNumber = 0
        self.downLoadList = []
        self.gauth = GoogleAuth()
        self.CreditsLocation = creditsLocation
        self.ListLocation = ListLocation
        self.logFilePath = logFilePath

    def Authorize(self):
        self.gauth.LoadCredentialsFile(self.CreditsLocation)
        if self.gauth.credentials is None:
            #Authenticate through browser if it hasnt been done yet
            self.gauth.LocalWebserverAuth()
        elif self.gauth.access_token_expired:
            # Refresh them if expired
            self.gauth.Refresh()
        else:
            # Initialise the saved credentials
            self.gauth.Authorize()
        self.gauth.SaveCredentialsFile(self.CreditsLocation)

    def Connect(self):
        self.drive = GoogleDrive(self.gauth)

    def LoadDownLoadList(self):
        #this function will load a json file which will have the fileID and
        #local file name so that duplicates are not downloaded
        try:
            with open(self.ListLocation, 'r') as fo:
                self.downLoadList = json.load(fo)
            print(self.downLoadList)
        except:
            print("No File")

    def SaveLatestDownLoadList(self):
        try:
            with open(self.ListLocation, 'w') as fo:
                json.dump(self.downLoadList, fo)
            print("Download List File Saved")
        except:
            print("Download List File Not Saved")

    def DownloadNewImages(self, PhotoLocation, strQuery):
        file_list = self.drive.ListFile({'q': strQuery}).GetList()

        for file in file_list:
            #for debugging
            print('-' * 20)
            #print(file)
            #print(file["mimeType"])
            print('-' * 20)

            #if the file type is not an image then dont download
            if file["mimeType"] != "image/jpeg":
                print("Not an image")
                continue

            #get working directory
            dir_path = os.path.dirname(os.path.realpath(__file__))
            #get photos directory
            directory_path = "%s/%s" % (dir_path, PhotoLocation)
            #if the file from gDrive has not been downloaded already
            if file['id'] not in self.downLoadList:
                fileName = fileNaming.getName(directory_path)
                filePath = "%s/%s/%s" % (dir_path, PhotoLocation, fileName)
                print("New File!")
                print(filePath)
                #get file
                photo = self.drive.CreateFile({'id': file['id']})
                #download file
                photo.GetContentFile(filePath)
                #add to the download list
                self.downLoadList.append(file['id'])
                #log that this new file has been saved
                self.logDownload(file)

            else:
                print("%s has already been Downloaded" % file['id'])

            self.SaveLatestDownLoadList()
            #at the end of each iteration i want to save the DownLoad List
            #This is because if the program stops mid iteration of the file
            #list and there is a lot of new files these would be redownload

    def logDownload(self, file):
        #lets get some useful info to save
        try:
            fileUniqueID = file['id']
        except:
            fileUniqueID = "Cant Get ID"
        try:
            fileName = file['originalFilename']
        except:
            fileName = "Cant get Filename"
        try:
            modifiedDate = file['modifiedDate']
        except:
            modifiedDate = "Cant get modified Date"

        logIntro = "A new file has been downloaded from GDrive, here is its information"

        #create the info list
        Info = [fileUniqueID, fileName, "Uploaded Date: ", modifiedDate]

        report.logToFile(self.logFilePath, logIntro, Info)
Пример #7
0
class Distributor(object):
    """
    Class to easily place scripts on the googledrive for the robot.
    """

    folder_list = {
        'evo_worklist': '0BxfZbreCWWAAcVF1ZE9RcDJBdTg',
        'infinite_script': '0BxfZbreCWWAAY09qeHdYeHBjTU0',
        'momentum_process': '0BxfZbreCWWAAdVMtblBiSkFFLWM',
        'hpd300_protocol': '0BxfZbreCWWAAai1oRDc3a2VsNXM',
        'hpd300_report': '0BxfZbreCWWAAQmU1ajFEXzFSN2M',
        'infinite_platedefinition': '0BxfZbreCWWAAYjN5SGpiaDUyZXM',
        'infinite_result': '0BxfZbreCWWAAMnNuNHJ2Tml5NDg',
        'momentum_experiment': '0BxfZbreCWWAAUHpRSTFCLXFuSDQ'
    }

    file_type = {
        'evo_worklist': '.gwl',
        'infinite_script': '.xml',
        'momentum_process': '.mpr',
        'hpd300_protocol': '.hpdd',
        'hpd300_report': '.DATA.xml',
        'infinite_platedefinition': '.pdfx',
        'infinite_result': '.xml',
        'momentum_experiment': '.mex'
    }

    def __init__(self):
        """Initializes a Distributor object and opens the connection to the googledrive [email protected]

        Notes
        -----

        Requires pydrive and google python api to be installed. Alsoa clients_secrets.json file needs to be present in the folder.
        """

        # Setup google drive

        self.gauth = GoogleAuth()
        self.gauth.LocalWebserverAuth()
        self.drive = GoogleDrive(self.gauth)

    @property
    def supported(self):
        """Returns a list of supported file locations/types

        Returns
        -------
        filetypes : list of string
            A list of strings that contain the supported device locations where files can be place and read.
        """
        return Distributor.file_type.keys()

    def place(self, name, device, s):
        """Create a new file on the google drive in the appropriate folder

        Parameters
        ----------
        name : string
            The file name of the file to be created. An 32bit UUID is added to the end as well as the correct file ending
        device : string
            A string specifing the type and folder to be used. Distributor.supported lists the allowed types
        s : string
            The content of the file to be placed on the googledrive

        Returns
        -------
        full_name : string
            Full filename used to create file including uuid and fileextension
        """

        if device in self.supported:
            u = str(uuid.uuid4()).split("-")[0]
            full_name = name + "-" + u + Distributor.file_type[device]

            file1 = self.drive.CreateFile({
                'title':
                full_name,
                "parents": [{
                    "kind": "drive#fileLink",
                    "id": Distributor.folder_list[device]
                }]
            })
            file1.SetContentString(s)
            file1.Upload()

            return full_name

    def ls(self, device):
        if device in self.supported:
            folder = Distributor.folder_list[device]
            file_list = self.drive.ListFile({
                'q':
                "'" + folder + "' in parents and trashed=false"
            }).GetList()

            return file_list
            return {file['id']: file['title'] for file in file_list}
        else:
            return []
            return {}

    def get(self, file_id):
        file_handle = self.drive.CreateFile({'id': file_id})
        content = file_handle.GetContentString()
        return content
Пример #8
0
import os
import sys
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive

fileID = str(sys.argv[1])

gauth = GoogleAuth()
gauth.CommandLineAuth()
drive = GoogleDrive(gauth)

file = drive.CreateFile({'id': "" + fileID + ""})
file.Delete()
Пример #9
0
"""
Created on Sun Apr  7 15:05:26 2019
@author: June
"""

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive

gauth = GoogleAuth()
gauth.LocalWebserverAuth(
)  # client_secrets.json need to be in the same directory as the script
drive = GoogleDrive(gauth)

# View all folders and file in your Google Drive
fileList = drive.ListFile({
    'q': "'root' in parents and trashed=false"
}).GetList()
for file in fileList:
    print('Title: %s, ID: %s' % (file['title'], file['id']))
    # Get the folder ID that you want
    if (file['title'] == "To Share"):
        fileID = file['id']

# Initialize GoogleDriveFile instance with file id.
file1 = drive.CreateFile({
    "mimeType": "text/csv",
    "parents": [{
        "kind": "drive#fileLink",
        "id": fileID
    }]
})
Пример #10
0
class TensorBERT(pl.LightningModule):
    def __init__(
        self, config
    ):  #proj_dim, num_inner_products, batch_size, weight =2., answer_punishment_coeff=1.):
        super(TensorBERT, self).__init__()
        self.config = config
        self.batch_size = self.config.model.batch_size
        self.max_len = self.config.model.max_len
        self.proj_dim = self.config.model.proj_dim
        self.weight = self.config.model.weight
        self.answer_punishment_coeff = self.config.model.answer_punishment_coeff
        self.num_inner_products = self.config.model.num_inner_products
        self.val_metrics = ['plain']
        self.lr = self.config.model.lr

        self.bert = get_transformer(self.config).cuda()
        self.bert_dim = self.bert.config.hidden_size  #768

        self.Proj = nn.Linear(self.bert_dim, self.proj_dim)
        self.Proj_cls = nn.Linear(self.bert_dim, self.proj_dim)
        self.BL = nn.Bilinear(
            self.proj_dim, self.proj_dim,
            self.num_inner_products)  # l scalar products of 2 vectors of dim d
        self.L = nn.Linear(self.num_inner_products, 2)
        self.CLS = nn.Linear(self.bert_dim, 2)  #(a,b) e^a/(e^a+e^b)
        self.squad_train_dataloader, self.squad_val_dataloader, self.squad_test_dataloader = generate_squad_dataloaders(
            self.config)
        self.save_hyperparameters()
        self.custom_step = 0
        if self.config.dirs.py_drive:
            gauth = GoogleAuth()
            gauth.credentials = GoogleCredentials.get_application_default()
            self.py_drive = GoogleDrive(gauth)

    def my_forward_pass(self, cls_bert_output, bert_output_full):
        current_batch_size = bert_output_full.shape[0]
        bert_output_full = torch.reshape(
            bert_output_full,
            (current_batch_size * self.max_len, self.bert_dim))
        proj_output_full = self.Proj(bert_output_full)
        proj_cls = self.Proj_cls(cls_bert_output)
        proj_cls = torch.cat(
            [proj_cls] * self.max_len
        )  # replicated proj_cls to make it the same shape as proj_output_full
        long_logits = self.BL(proj_cls, proj_output_full)
        long_logits = nn.LeakyReLU(negative_slope=0.1)(long_logits)
        long_logits = self.L(long_logits)
        long_logits = torch.reshape(long_logits,
                                    (current_batch_size, self.max_len, 2))
        return long_logits

    def forward(self, batch):
        input_ids, attention_mask, token_type_ids, _, _, _, _, _ = batch
        bert_output_full, cls_pooler_output = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids)
        # bert_output_full.shape = (batch_size, max_len, bert_dim) -- one vector of dim=bert_dim for each token
        # cls_pooler_output of shape (batch_size, bert_dim) -- Last layer hidden-state of the first token of the sequence (classification token)
        # further processed by a Linear layer and a Tanh activation function.
        # The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.
        cls_bert_output = bert_output_full[:,
                                           0, :]  # vector corresponding to CLS token
        # long_logits will have shape (batch_size, max_len, 2)
        # each output of bert is projected to smaller dimension, then take a few inner products with projection of the cls vector,
        # then another dense layer to get logits
        long_logits = self.my_forward_pass(cls_bert_output, bert_output_full)
        cls_logits = self.CLS(cls_pooler_output)
        #cls_logits will have shape (batch_size, 2)
        return cls_logits, long_logits

    def training_step(self, batch, batch_nb):
        predictions = self.forward(batch)
        loss = self.compute_loss(predictions, batch)

        self.custom_step += batch[0].shape[0]
        # logs
        self.logger.experiment.log(
            {
                'train_loss': loss,
                'epoch': self.current_epoch
            },
            step=self.custom_step)

        return {'loss': loss}

    def compute_loss(self, predictions, batch):
        cls_logits, long_logits = predictions
        _, _, _, label, answer_mask, _, _, _ = batch
        # loss for not guessing if there is an answer
        loss1 = F.cross_entropy(cls_logits,
                                label,
                                weight=torch.Tensor([self.weight, 1.]))
        # loss for each individual word -- is it in the answer?
        # TODO: need to insert pass weight -- around 90? bc of mismatch of 0s and 1s -- only 1% are 1s
        loss2 = F.cross_entropy(
            torch.reshape(long_logits,
                          (long_logits.shape[0] * long_logits.shape[1],
                           long_logits.shape[2])),
            torch.reshape(answer_mask,
                          (answer_mask.shape[0] * answer_mask.shape[1], )),
            weight=torch.Tensor([1., 50.]))

        loss = self.answer_punishment_coeff * loss1 + loss2
        return loss

    def validation_step(self, batch, batch_nb):
        evaluator = Evaluator(self)
        _, val_dict = evaluator.evaluate_on_batch(batch)
        return val_dict

    def validation_epoch_end(self, val_step_outputs):
        log_dict = {}
        for key in val_step_outputs[0]:
            aggregated = np.mean(
                [accuracy_dict[key] for accuracy_dict in val_step_outputs])
            log_dict[key] = aggregated
        self.logger.experiment.log(log_dict, step=self.custom_step)
        self.log('val_loss', log_dict['val_loss'], prog_bar=True, logger=False)
        # delete models from Trash using pydrive
        if self.config.dirs.py_drive:
            for a_file in self.py_drive.ListFile({
                    'q': "trashed=true"
            }).GetList():
                if a_file['title'] in {'model.ckpt', 'model-v0.ckpt'}:
                    a_file.Delete()

    def configure_optimizers(self):
        return torch.optim.Adam(
            [p for p in self.parameters() if p.requires_grad],
            lr=self.lr,
            eps=1e-08)

    def train_dataloader(self):
        return self.squad_train_dataloader

    def val_dataloader(self):
        return self.squad_val_dataloader

    def test_dataloader(self):
        return self.squad_test_dataloader

    def get_predictions(self, batch):
        '''
        Returns arrays (label probabilities, individual word probabilities) on given batch 
        '''
        with torch.no_grad():
            labels_prob, individual_words_prob = self.forward(batch)

        return labels_prob, individual_words_prob

    def convert_predictions(self, predictions, min_start, metric='plain'):
        '''
        TODO: write proper description once a few metrics are added
        Return numpy arrays of predictions of indices of starts and ends for:
        - metric='plain' - as argmax of unnormalized probability vectors
        - metric='bysum' - as argmax of the sum of unrromalized probabilities over all pairs (i,j) such that i<j (and i>min_start if given)
        - metric='byend' - as argmax of unrromalized probabilities over all i>min_start for end and
                        as argmax of unrromalized probabilities over all min_start<j<end_pred for start   
        '''
        labels_prob, individual_words_prob = predictions
        labels_prob = labels_prob[:, 1] - labels_prob[:, 0]
        individual_words_prob = individual_words_prob[:, :,
                                                      1] - individual_words_prob[:, :,
                                                                                 0]
        neg_inf = -100
        batch_size, max_len = individual_words_prob.shape
        if metric == 'plain':
            labels_pred = (labels_prob > 0).astype(int)

            max_indices = np.argmax(individual_words_prob, axis=1)
            start_pred = np.zeros(labels_pred.shape)
            end_pred = np.zeros(labels_pred.shape)
            for i in range(batch_size):
                if individual_words_prob[i, max_indices[i]] <= 0:
                    start_pred[i] = 0
                    end_pred[i] = 0
                    continue

                current_index = max_indices[i] - 1
                while True:
                    if current_index >= min_start[i]:
                        if individual_words_prob[i, current_index] > 0:
                            current_index -= 1
                        else:
                            break
                    else:
                        break
                start_pred[i] = current_index + 1

                current_index = max_indices[i] + 1
                while True:
                    if current_index < max_len:
                        if individual_words_prob[i, current_index] > 0:
                            current_index += 1
                        else:
                            break
                    else:
                        break
                end_pred[i] = current_index - 1

            start_pred = start_pred * labels_pred
            end_pred = end_pred * labels_pred
        return start_pred, end_pred
Пример #11
0
class SQUADBERT(pl.LightningModule):
    def __init__(self, config):
        super(SQUADBERT, self).__init__()
        # initializing parameters
        self.config = config
        self.batch_size = self.config.model.batch_size
        self.max_len = self.config.model.max_len
        self.freeze_layers = self.config.model.freeze_layers
        self.lr = self.config.model.lr
        # save hyperparameters for .hparams attribute
        self.save_hyperparameters()
        # initializing BERT
        self.bert = get_transformer(self.config).cuda()
        self.bert_dim = self.bert.config.hidden_size
        # evaluation metrics
        self.val_metrics = ['plain', 'bysum', 'byend']
        # initializing dataloaders
        self.squad_train_dataloader, self.squad_val_dataloader, self.squad_test_dataloader = generate_squad_dataloaders(
            self.config)
        # initializing additional layers -- start and end vectors
        self.Start = nn.Linear(self.bert_dim, 1)
        self.End = nn.Linear(self.bert_dim, 1)
        self.custom_step = 0
        if self.config.dirs.py_drive:
            gauth = GoogleAuth()
            gauth.credentials = GoogleCredentials.get_application_default()
            self.py_drive = GoogleDrive(gauth)

    def new_layers(self, bert_output, new_layer):
        logits_wrong_shape = new_layer(
            torch.reshape(bert_output,
                          (bert_output.shape[0] * bert_output.shape[1],
                           bert_output.shape[2])))
        logits = torch.reshape(logits_wrong_shape,
                               (bert_output.shape[0], bert_output.shape[1]))
        return logits

    def forward(self, batch):
        input_ids, attention_mask, token_type_ids, _, _, _, _, _ = batch
        # _ should be used for classification answer/no answer
        bert_output, _ = self.bert(input_ids=input_ids,
                                   attention_mask=attention_mask,
                                   token_type_ids=token_type_ids)
        # shape of q will be (batch_size, max_len, bert_dim) = (batch_size, 256, 768)
        # take inner products of output vectors with trainable start and end vectors
        start_logits = self.new_layers(bert_output, self.Start)
        end_logits = self.new_layers(bert_output, self.End)

        return start_logits, end_logits

    # this is the main function of pl modules. defines architecture and loss function. training loop comes for free -- implemented inside PL
    def training_step(self, batch, batch_nb):
        predictions = self.forward(batch)
        loss = self.compute_loss(predictions, batch)

        self.custom_step += batch[0].shape[0]
        # logs
        self.logger.experiment.log(
            {
                'train_loss': loss,
                'epoch': self.current_epoch
            },
            step=self.custom_step)

        return {'loss': loss}

    def compute_loss(self, predictions, batch):
        start_logits, end_logits = predictions
        # LOSS: compute cross_entropy loss between predictions and actual labels for start and end
        _, _, _, _, _, _, answer_starts, answer_ends = batch
        start_loss = F.cross_entropy(start_logits, answer_starts)
        end_loss = F.cross_entropy(end_logits, answer_ends)
        loss = start_loss + end_loss
        return loss

    def validation_step(self, batch, batch_nb):
        evaluator = Evaluator(self)
        _, val_dict = evaluator.evaluate_on_batch(batch)
        return val_dict

    def validation_epoch_end(self, val_step_outputs):
        log_dict = {}
        for key in val_step_outputs[0]:
            aggregated = np.mean(
                [accuracy_dict[key] for accuracy_dict in val_step_outputs])
            log_dict[key] = aggregated
        self.logger.experiment.log(log_dict, step=self.custom_step)
        self.log('val_loss', log_dict['val_loss'], prog_bar=True, logger=False)
        # delete models from Trash using pydrive
        if self.config.dirs.py_drive:
            for a_file in self.py_drive.ListFile({
                    'q': "trashed=true"
            }).GetList():
                if a_file['title'] in {'model.ckpt', 'model-v0.ckpt'}:
                    title = a_file['title']
                    a_file.Delete()
                    print(f'File {title} was deleted from Trash.')

    def configure_optimizers(self):
        return torch.optim.Adam(
            [p for p in self.parameters() if p.requires_grad],
            lr=self.lr,
            eps=1e-08)

    def get_predictions(self, batch):
        '''
        Returns arrays (start probabilities, end probabilities) on given batch 
        '''
        with torch.no_grad():
            start_prob, end_prob = self.forward(batch)
        return start_prob, end_prob

    def convert_predictions(self, predictions, min_start, metric='plain'):
        '''
        Return numpy arrays of predictions of indices of starts and ends for:
        - metric='plain' - as argmax of unnormalized probability vectors
        - metric='bysum' - as argmax of the sum of unrromalized probabilities over all pairs (i,j) such that i<j (and i>min_start if given)
        - metric='byend' - as argmax of unrromalized probabilities over all i>min_start for end and
                        as argmax of unrromalized probabilities over all min_start<j<end_pred for start   
        '''
        start_prob, end_prob = predictions
        neg_inf = -100
        batch_size, max_len = start_prob.shape
        if metric == 'plain':
            start_pred = np.argmax(start_prob, axis=1)
            end_pred = np.argmax(end_prob, axis=1)
        elif metric == 'bysum':
            probs = start_prob.reshape(-1, max_len, 1) + end_prob.reshape(
                -1, 1, max_len
            )  # array of shape: (batch_size, max_len, max_len), matrix of pairwise sums per each element of the batch
            mask = np.zeros(
                probs.shape
            )  # create a mask to avoid including cases where i > j or i > min_start or j > min_start
            for i, s in enumerate(min_start):
                mask[i, :s, :] = 1
                mask[i, :, :s] = 1
                mask[i][np.tril_indices(max_len, -1)] = 1
            mask[:, 0,
                 0] = 0  # we however leave i=j=0 to detect questions without answers
            probs = np.ma.array(probs, mask=mask)
            probs = np.ma.filled(probs, neg_inf)
            max_probs = np.argmax(
                probs.reshape(batch_size, -1), axis=-1
            )  # array of shape: (batch_size,), argmaxes of flattened matrices of pairwise sums
            start_pred, end_pred = np.unravel_index(
                max_probs, (max_len, max_len)
            )  # two arrays of shape: (batch_size,), 'unflattenning' of max_probs
        elif metric == 'byend':
            # first we deal with ends
            mask = np.zeros(
                end_prob.shape
            )  # create a mask to avoid including cases where end > min_start
            for i, s in enumerate(min_start):
                mask[i, :s] = 1
            mask[:,
                 0] = 0  # we however leave end=0 to detect questions without answers
            end_prob = np.ma.array(end_prob, mask=mask)
            start_prob = np.ma.array(start_prob, mask=mask)
            end_prob = np.ma.filled(end_prob, neg_inf)
            start_prob = np.ma.filled(start_prob, neg_inf)
            end_pred = np.argmax(
                end_prob, axis=-1
            )  # array of shape: (batch_size,), argmaxes of ends' probabilities
            # now we deal with starts
            mask = np.zeros(
                start_prob.shape
            )  # create a mask to avoid including cases where end > min_start
            for i, e in enumerate(end_pred):
                mask[i, e + 1:] = 1
            start_prob = np.ma.array(start_prob, mask=mask)
            start_prob = np.ma.filled(start_prob, neg_inf)
            start_pred = np.argmax(
                start_prob, axis=-1
            )  # array of shape: (batch_size,), argmaxes of starts' probabilities
        return start_pred, end_pred

    def train_dataloader(self):
        return self.squad_train_dataloader

    def val_dataloader(self):
        return self.squad_val_dataloader

    def test_dataloader(self):
        return self.squad_test_dataloader
#Installs google drive python modules
!pip install PyDrive
!pip install regex
#Import functions to create connections between Drive and Colab. 
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
import pandas as pd
import numpy as np
import regex as re 
#Authenticate and create pydrive client, click on the link, allow Google SDK to access drive, and past code into the text box
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
downloaded = drive.CreateFile({'id':"1_wcpzL5I8LXHeTYczMVPUW4UXIN4dHXH"})   #File ID
downloaded.GetContentFile('LeagueofLegends.csv')        # Specifying file
data = pd.read_csv('LeagueofLegends.csv') #Read in the data

data.keys()

KillTimeRegex = re.compile(r'\d+\.\d+')
index = 0 
allKillTimes = []
for games in data['bKills']:
  KillFind = KillTimeRegex.findall(games)
  killTimes = []
  for nums in KillFind:
    killTimes.append(float(nums))
  allKillTimes.append(killTimes)
  y.axis('off')

plt.subplots_adjust(wspace = 1)

#Saving the model
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive 
from google.colab import auth 
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()                      
drive = GoogleDrive(gauth)

model.save('signlanguage.h5')
model_file = drive.CreateFile({'title' : 'signlanguage.h5'})                       
model_file.SetContentFile('signlanguage.h5')                      
model_file.Upload()

# download to google drive                       
drive.CreateFile({'id': model_file.get('id')})






from google.colab import auth
from google.colab import files
from oauth2client.client import GoogleCredentials
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from reportlab.platypus import SimpleDocTemplate
from reportlab.lib.pagesizes import letter
from reportlab.platypus import Table
from reportlab.platypus import TableStyle
from reportlab.lib import colors

#@title Google Drive Authentication
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# Download dataset from "DATAFRAME_halodoc_translated.csv"
# https://drive.google.com/file/d/1TFwZWD6ctiTPbexsdLfDJl5JeRzdTGSx/view?usp=sharing
downloaded = drive.CreateFile({'id':"1TFwZWD6ctiTPbexsdLfDJl5JeRzdTGSx"})   
downloaded.GetContentFile('DATAFRAME_halodoc_translated.csv')
# https://drive.google.com/file/d/16Uc9YxKpAPMgfULqIb3P7G0_lIPvoACe/view?usp=sharing
downloaded = drive.CreateFile({'id':"16Uc9YxKpAPMgfULqIb3P7G0_lIPvoACe"})   
downloaded.GetContentFile('NEG_DATAFRAME_halodoc_translated.csv')
# https://drive.google.com/file/d/1OIZ8eaN1-3Xb-klZkQ7wZOLU3mTM9sb0/view?usp=sharing
downloaded = drive.CreateFile({'id':"1OIZ8eaN1-3Xb-klZkQ7wZOLU3mTM9sb0"})   
downloaded.GetContentFile('sentence_rank.csv')

DATAFRAME = pd.read_csv('DATAFRAME_halodoc_translated.csv')
NEG_DATAFRAME = pd.read_csv('NEG_DATAFRAME_halodoc_translated.csv')
sentence_rank = pd.read_csv('sentence_rank.csv')
Пример #15
0
def authenticateGoogleDrive():
    gauth = GoogleAuth()
    drive = GoogleDrive(gauth)
    return drive
Пример #16
0
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive

cwd = Path.home() / "Desktop/WEC/Code"

os.chdir(cwd)

#list(cwd.glob("*"))

#Authenticates google connection
g_auth = GoogleAuth()
g_auth.LocalWebserverAuth()


#Local Instance of Google Drive
drive = GoogleDrive(g_auth)


#See all files (excluding trashed) on drive
#Other queries: "title contains '^Copy' and trashed=false"
#q is query
#'root'for all in My Drive
#wec_drive_id = '1qGEHvhuxZKhRXq6CntpSkDXgOx6mhgcl'

files = drive.ListFile({'q':"'0AO0qQjqQb1rKUk9PVA' in parents and trashed=false",
                        'corpora':'teamDrive', 
                        'teamDriveId':'0AO0qQjqQb1rKUk9PVA',
                        'includeTeamDriveItems':'True',
                        'supportsAllDrives':'True'}).GetList()

Пример #17
0
class GoogleDriveDriver:
    '''
    GoogleDriveクラス
    GoogleDriveからアイコンとなるイメージファイルをダウンロードする。
    また、アップロードを行うことも出来る。
    '''
    def __init__(self, yamldata):
        self.gauth = GoogleAuth()
        self.gauth.LocalWebserverAuth()
        self.drive = GoogleDrive(self.gauth)
        self.folder_id = yamldata.get('folder_id')
        self.max_results = 100
        self.query = f"'{self.folder_id}' in parents and trashed=false"

    def _get_mimeType(self, filename):
        """
        アップロードする際のmimeTypeの判別をする。イメージファイル専用
        :params filename: アップロードファイル名
        :return: jpg or png
        """
        extension = os.path.splitext(filename)[1][1:]
        if extension == 'jpg' or extension == 'jpeg':
            r = 'image/jpeg'
        elif extension == 'png':
            r = 'image/png'
        return r

    def _upload(self, filename):
        """
        ファイルのアップロードを行う。
        :params filename: アップロードファイル名
        """
        # folder_idの指定がない場合はhomeに
        if self.folder_id is None:
            f = self.drive.CreateFile({
                'title': os.path.basename(filename),
                'mimeType': self._get_mimeType(filename)
            })
        # folder_idの指定がある場合はそのフォルダに
        else:
            f = self.drive.CreateFile({
                'title':
                os.path.basename(filename),
                'mimeType':
                self._get_mimeType(filename),
                'parents': [{
                    'kind': 'drive#fileLink',
                    'id': self.folder_id
                }]
            })
        f.SetContentFile(filename)
        f.Upload()

    def _get_filelist(self):
        """
        GoogleDriveのファイルリストを作成する。
        :return: GoogleDriveのファイル辞書データリスト
        """
        l = []
        for file_list in self.drive.ListFile({
                'q': self.query,
                'maxResults': self.max_results
        }):
            for file in file_list:
                l.append(file)
        return l

    def _download(self, file, downloadpath="./"):
        """
        ファイルをローカルにダウンロードする。
        :params downloadpath: ダウンロード先フォルダ。未指定の場合は実行ファイルと同階層。
        """
        file.GetContentFile(os.path.join(downloadpath, file['title']))

    def _pick_file(self, filelist):
        """
        GoogleDriveのファイルリストから1つを無作為に選ぶ。
        :params filelist: GoogleDriveのファイル辞書データリスト
        :return: GoogleDriveのファイル辞書データ
        """
        return random.choice(filelist)

    def _pick_image(self, downloadpath="./"):
        """
        アイコンにするイメージを1枚選択し、ローカルにダウンロードする。
        :params downloadpath: ダウンロード先フォルダ。未指定の場合は実行ファイルと同階層。
        :return1: status: 0でないならエラー
        :return2:    msg: ファイル名。エラーの場合はエラーメッセージ
        """
        try:
            l = self._get_filelist()
            file = self._pick_file(l)
            self._download(file, downloadpath)
            return 0, file['title']

        except Exception as e:
            import traceback
            msg = f"GoogleDriveDriver._pick_image Error!\n{traceback.format_exc()}"
            return 9, msg
Пример #18
0
gauth = GoogleAuth()
gauth.LocalWebserverAuth()

gauth.LoadCredentialsFile("mycreds.txt")
if gauth.credentials is None:
    # Authenticate if they're not there
    gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
    # Refresh them if expired
    gauth.Refresh()
else:
    # Initialize the saved creds
    gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
file1 = drive.CreateFile({'title': 'test.txt'})
file1.SetContentString('IoT Ping/Trace Results\n\n')
file1.Upload()
file_list = drive.ListFile({
    'q': "'root' in parents and trashed=false"
}).GetList()
for file1 in file_list:
    print('title: %s, id: %s' % (file1['title'], file1['id']))
    file_id = file1['id']

while devices == 0:
    print('Server IP Address: ' + server_address[0])
    print('Waiting for connection . . .')
    client, address = server.accept()
    print('... connected from:', address)
Пример #19
0
 def Connect(self):
     self.drive = GoogleDrive(self.gauth)
Пример #20
0
class FileStorage():
    def __init__(self):
        self.gauth = gauth = GoogleAuth()
        self.CREDENTIALS_FILE = CREDENTIALS_PATH
        self.gauth.LoadCredentialsFile(self.CREDENTIALS_FILE)
        self.drive = GoogleDrive(gauth)
        logging.info('Init Server')

    def upload_file(self, file_name, file_content):
        file = self.drive.CreateFile({'title': file_name})
        file.SetContentString(file_content)
        file.Upload()
        logging.info(
            f'{self.upload_file.__name__} -> (file_name: {file_name}, file_content: {file_content[:50]})'
        )

    def upload_file_with_path(self, file_path):
        file_name = file_path.split('/')[-1]
        file = self.drive.CreateFile({'title': file_name})
        file.content = open(file_path, 'rb')
        if file.get('mimeType') is None:
            file['mimeType'] = mimetypes.guess_type(file_path)[0]
        file.Upload()
        logging.info(
            f'{self.upload_file.__name__} -> (file_name: {file_name})')

    def upload_file_with_path_in_specific_folder(self, file_path, folder_name):
        file_name = file_path.split('/')[-1]
        file = self.drive.CreateFile({
            'title':
            file_name,
            'parents': [{
                'id': self.get_folder_id(folder_name)
            }]
        })
        file.content = open(file_path, 'rb')
        if file.get('mimeType') is None:
            file['mimeType'] = mimetypes.guess_type(file_path)[0]
        file.Upload()
        logging.info(
            f'{self.upload_file.__name__} -> (file_name: {file_name})')

    def create_folder(self, folder_name):
        folder = self.drive.CreateFile({
            'title':
            folder_name,
            'mimeType':
            'application/vnd.google-apps.folder'
        })
        folder.Upload()

    def create_folder_in_specific_folder(self, folder_name, folder_name_store):
        folder = self.drive.CreateFile({
            'title':
            folder_name,
            'parents': [{
                'id': self.get_folder_id(folder_name_store)
            }],
            'mimeType':
            'application/vnd.google-apps.folder'
        })
        folder.Upload()

    def get_folder_id(self, folder_name):

        folders = self.drive.ListFile({
            'q':
            "title='" + folder_name +
            "' and mimeType='application/vnd.google-apps.folder' and trashed=false"
        }).GetList()
        if len(folders) == 0:
            print("ERROR")
        else:
            return folders[0]['id']

    def download_file(self, file, file_path):
        file.GetContentFile(file_path)
        # logging.info(f'{self.download_file.__name__} -> (file_name: {file_path})')

    def change_title_file(self, file, new_title):
        file['title'] = new_title
        file.Upload()
        logging.info(
            f'{self.change_title_file.__name__} -> (file: {file["id"]} - {file["title"]}, new_title: {new_title})'
        )

    def get_content_file(self, file):
        logging.info(
            f'{self.get_content_file.__name__} -> (file : {file["id"]} - {file["title"]})'
        )
        return file.GetContentString()

    def change_content_file(self, file, new_content):
        file.SetContentString(new_content)
        file.Upload()
        logging.info(
            f'{self.change_content_file.__name__} -> (file: {file["id"]} - {file["title"]}, appended_content: {new_content[:50]})'
        )

    def append_content_file(self, file, appended_content):
        content = file.GetContentString()
        file.SetContentString(content + appended_content)
        file.Upload()
        logging.info(
            f'{self.append_content_file.__name__} -> (file: {file["id"]} - {file["title"]}, appended_content: {appended_content})'
        )

    def get_file_list(self):
        file_list = self.drive.ListFile({'q': "'root' in parents"}).GetList()
        logging.info(f'{self.get_file_list.__name__}')
        return file_list

    def get_file_list_in_specific_folder(self, folder_name):
        file_list = self.drive.ListFile({
            'q':
            f"'{self.get_folder_id(folder_name)}' in parents"
        }).GetList()
        logging.info(f'{self.get_file_list.__name__}')
        return file_list

    def get_files_by_title(self, title):
        file_list = self.drive.ListFile({'q': f"title = '{title}'"}).GetList()
        logging.info(f'{self.get_files_by_title.__name__} -> (title: {title})')
        return file_list

    def get_files_by_title_in_specific_folder(self, title, folder_name):
        file_list = self.drive.ListFile({
            'q':
            f"'{self.get_folder_id(folder_name)}' in parents"
        }).GetList()
        logging.info(f'{self.get_files_by_title.__name__} -> (title: {title})')
        return file_list

    def get_file_by_id(self, file_id):
        # result_file = self.drive.ListFile({'q': f"id = '{file_id}'"}).GetList()
        result_file = self.drive.CreateFile({'id': file_id})
        if (len(result_file) > 0):
            logging.info(
                f'{self.get_file_by_id.__name__} -> (file_id: {file_id})')
            return result_file
        else:
            logging.warning(
                f'{self.get_file_by_id.__name__} -> (file_id: {file_id})')
            return None

    def delete_file(self, file):
        file.Delete()
        logging.info(
            f'{self.delete_file.__name__} -> (file: {file["id"]} - {file["title"]})'
        )

    def delete_file_by_id(self, file_id):
        file = self.get_file_by_id(file_id)
        if file is not None:
            file.Delete()
            logging.info(
                f'{self.delete_file_by_id.__name__} -> (file_id: {file_id})')
        else:
            logging.warning(
                f'{self.delete_file_by_id.__name__} -> (file_id: {file_id})')
            return None
Пример #21
0

# Code to read csv file into Colaboratory:
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

link="Your google drive csv location link"

fluff, id = link.split('=')
print (id) # Verify that you have everything after '='

import pandas as pd

downloaded = drive.CreateFile({'id':id}) 
downloaded.GetContentFile('new_all.csv')  
data = pd.read_csv('new_all.csv')

import scipy as sp
from sklearn.model_selection import train_test_split
from keras.preprocessing import sequence
import numpy as np

x=data.iloc[:,0:82]
Пример #22
0
 def __init__(self):
     self.gauth = gauth = GoogleAuth()
     self.CREDENTIALS_FILE = CREDENTIALS_PATH
     self.gauth.LoadCredentialsFile(self.CREDENTIALS_FILE)
     self.drive = GoogleDrive(gauth)
     logging.info('Init Server')
Пример #23
0
def get_link(bot, update):
    TRChatBase(update.from_user.id, update.text, "getlink5")
    if str(update.from_user.id) in Config.BANNED_USERS:
        bot.send_message(
            chat_id=update.chat.id,
            text=Translation.ABUSIVE_USERS,
            reply_to_message_id=update.message_id,
            disable_web_page_preview=True,
            parse_mode=pyrogram.ParseMode.HTML
        )
        return
    logger.info(update.from_user)
    if update.reply_to_message is not None:
        reply_message = update.reply_to_message
        download_location = Config.DOWNLOAD_LOCATION + "/"
        start = datetime.now()
        a = bot.send_message(
            chat_id=update.chat.id,
            text=Translation.DOWNLOAD_START,
            reply_to_message_id=update.message_id
        )
        c_time = time.time()
        after_download_file_name = bot.download_media(
            message=reply_message,
            file_name=download_location,
            progress=progress_for_pyrogram,
            progress_args=(Translation.DOWNLOAD_START, a.message_id, update.chat.id, c_time)
        )
        download_extension = after_download_file_name.rsplit(".", 1)[-1]
        bot.edit_message_text(
            text=Translation.SAVED_RECVD_DOC_FILE,
            chat_id=update.chat.id,
            message_id=a.message_id
        )
        end_one = datetime.now()
        if str(update.from_user.id) in Config.G_DRIVE_AUTH_DRQ:
            gauth = Config.G_DRIVE_AUTH_DRQ[str(update.from_user.id)]
            # Create GoogleDrive instance with authenticated GoogleAuth instance.
            drive = GoogleDrive(gauth)
            file_inance = drive.CreateFile()
            # Read file and set it as a content of this instance.
            file_inance.SetContentFile(after_download_file_name)
            file_inance.Upload() # Upload the file.
            end_two = datetime.now()
            time_taken_for_upload = (end_two - end_one).seconds
            logger.info(file_inance)
            adfulurl = file_inance.webContentLink
            max_days = 0
        else:
            max_days = 5
            verystreamLOGIN = '******'
            verystreamKEY = 'JcGF53rcBri'
            
            # check video extension
            if not after_download_file_name.lower().endswith(('.mp4', '.mkv', '.avi', '.webm', '.vob', '.mpg')):
                bot.edit_message_text(
                    chat_id=update.chat.id,
                    text="This is not a video.",
                    message_id=a.message_id
                )
                return False
            
            
            rget = rs.get("https://api.verystream.com/file/ul?login="******"&key=" + verystreamKEY)
            rjson = json.loads(rget.text)

            if rjson['status'] == 200:
                url = rjson['result']['url']
            else:
                bot.edit_message_text(
                    chat_id=update.chat.id,
                    text="Failed to initiate the upload process.",
                    message_id=a.message_id
                )
                return False

            command_to_exec = [
                "curl",
                "-F","file1=@"+after_download_file_name,
                "-H","Transfer-Encoding: chunked",
                url
            ]

            # {"status":200,"msg":"OK","result":{"name":"small.mp4","size":"383631","sha1":"5c5a07267317b166a218e5edb7667ccd2b5351be","content_type":"video\\/mp4","id":"h2qdQpteEk9","url":"https:\\/\\/verystream.com\\/stream\\/h2qdQpteEk9\\/small.mp4"}}
                
            bot.edit_message_text(
                text=Translation.UPLOAD_START,
                chat_id=update.chat.id,
                message_id=a.message_id
            )
            try:
                logger.info(command_to_exec)
                t_response = subprocess.check_output(command_to_exec, stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as exc:
                logger.info("Status : FAIL", exc.returncode, exc.output)
                bot.edit_message_text(
                    chat_id=update.chat.id,
                    text=exc.output.decode("UTF-8"),
                    message_id=a.message_id
                )
                return False
            else:
                logger.info(t_response)
                print ( t_response )

                t_response_arry = json.loads(t_response.decode("UTF-8").split("\n")[-2].strip())['result']['url']


        bot.edit_message_text(
            chat_id=update.chat.id,
            text=Translation.AFTER_GET_DL_LINK.format(t_response_arry, max_days),
            parse_mode=pyrogram.ParseMode.HTML,
            message_id=a.message_id,
            disable_web_page_preview=True
        )
        try:
            os.remove(after_download_file_name)
        except:
            pass
    else:
        bot.send_message(
            chat_id=update.chat.id,
            text=Translation.REPLY_TO_DOC_GET_LINK,
            reply_to_message_id=update.message_id
        )
Пример #24
0
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import argparse

print('Begin authentication')
gauth = GoogleAuth()
drive = GoogleDrive(gauth)

parser = argparse.ArgumentParser()
parser.add_argument('-i', '--folderId')
args = parser.parse_args()

file_list = drive.ListFile({
    'q':
    "'%s' in parents and trashed=false" % args.folderId
}).GetList()

for drive_file in file_list:
    if drive_file['title'] == 'PillowData.txt':
        file_local = drive.CreateFile({'id': drive_file['id']})
        file_local.GetContentFile('PillowData.txt')
        print('Download `PillowData.txt` to local has been successed.')
        break
Пример #25
0
async def gdrive_stuff(client, message):
    gauth.LoadCredentialsFile("naruto/session/drive")
    if gauth.credentials is None:
        if ENV and gdrive_credentials:
            with open("client_secrets.json", "w") as file:
                file.write(gdrive_credentials)
        await edrep(
            message,
            text=
            "You are not logged in to your google drive account!\nYour assistant bot may help you to login google "
            "drive, check your assistant bot for more information!",
        )
        gdriveclient = os.path.isfile("client_secrets.json")
        if gdriveclient:
            try:
                gauth.GetAuthUrl()
            except Exception as e:
                print(e)
                await setbot.send_message(
                    message.from_user.id,
                    "Wrong Credentials! Check var ENV gdrive_credentials on heroku or do "
                    ".credentials (your credentials) for change your Credentials",
                )
                return
            await setbot.send_message(
                message.from_user.id,
                "Hello, look like you're not logged in to google drive :)\nI can help you to "
                "login.\n\n**To login Google Drive**\n1. `/gdrive` to get login URL\n2. After "
                "you're logged in, copy your Token.\n3. `/gdrive (token)` without `(` or `)` to "
                "login, and your session will saved to `naruto/session/drive`.\n\nDon't share your "
                "session to someone, else they will hack your google drive account!",
            )
        else:
            await setbot.send_message(
                message.from_user.id,
                "Hello, look like you're not logged in to google drive 🙂\nI can help you to "
                "login.\n\nFirst of all, you need to activate your google drive API\n1. [Go "
                "here](https://developers.google.com/drive/api/v3/quickstart/python), "
                "click **Enable the drive API**\n2. Login to your google account (skip this if "
                "you're already logged in)\n3. After logged in, click **Enable the drive API** "
                "again, and click **Download Client Configuration** button, download that.\n4. "
                "After downloaded that file, open that file then copy all of that content, "
                "back to telegram then do .credentials (copy the content of that file)  do "
                "without bracket\n\nAfter that, you can go next guide by type /gdrive",
            )
        return
    elif gauth.access_token_expired:
        # Refresh them if expired
        gauth.Refresh()
    else:
        # Initialize the saved creds
        gauth.Authorize()

    drive = GoogleDrive(gauth)
    drive_dir = await get_drivedir(drive)

    if len(message.text.split()) == 3 and message.text.split(
    )[1] == "download":
        await edrep(message, text="Downloading...")
        driveid = await get_driveid(message.text.split()[2])
        if not driveid:
            await edrep(
                message,
                text=
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`",
            )
            return
        filename = await get_driveinfo(driveid)
        if not filename:
            await edrep(
                message,
                text=
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`",
            )
            return
        await edrep(
            message,
            text="Downloading for `{}`\nPlease wait...".format(
                filename.replace(" ", "_")),
        )
        download = drive.CreateFile({"id": driveid})
        download.GetContentFile(filename)
        try:
            os.rename(filename,
                      "naruto/downloads/" + filename.replace(" ", "_"))
        except FileExistsError:
            os.rename(filename,
                      "naruto/downloads/" + filename.replace(" ", "_") + ".2")
        await edrep(
            message,
            text="Downloaded!\nFile saved to `{}`".format(
                "naruto/downloads/" + filename.replace(" ", "_")),
        )
    elif len(
            message.text.split()) == 3 and message.text.split()[1] == "upload":
        filerealname = message.text.split()[2].split(None, 1)[0]
        filename = "naruto/downloads/{}".format(filerealname.replace(" ", "_"))
        checkfile = os.path.isfile(filename)
        if not checkfile:
            await edrep(message,
                        text="File `{}` was not found!".format(filerealname))
            return
        await edrep(message, text="Uploading `{}`...".format(filerealname))
        upload = drive.CreateFile({
            "parents": [{
                "kind": "drive#fileLink",
                "id": drive_dir
            }],
            "title":
            filerealname,
        })
        upload.SetContentFile(filename)
        upload.Upload()
        upload.InsertPermission({
            "type": "anyone",
            "value": "anyone",
            "role": "reader"
        })
        await edrep(
            message,
            text=
            "Uploaded!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
            .format(
                filerealname,
                upload["alternateLink"],
                filerealname,
                upload["downloadUrl"],
            ),
        )
    elif len(
            message.text.split()) == 3 and message.text.split()[1] == "mirror":
        await edrep(message, text="Mirroring...")
        driveid = await get_driveid(message.text.split()[2])
        if not driveid:
            await edrep(
                message,
                text=
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`",
            )
            return
        filename = await get_driveinfo(driveid)
        if not filename:
            await edrep(
                message,
                text=
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`",
            )
            return
        mirror = (drive.auth.service.files().copy(
            fileId=driveid,
            body={
                "parents": [{
                    "kind": "drive#fileLink",
                    "id": drive_dir
                }],
                "title": filename,
            },
        ).execute())
        new_permission = {
            "type": "anyone",
            "value": "anyone",
            "role": "reader"
        }
        drive.auth.service.permissions().insert(fileId=mirror["id"],
                                                body=new_permission).execute()
        await edrep(
            message,
            text="Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
            .format(filename, mirror["alternateLink"], filename,
                    mirror["downloadUrl"]),
        )
    elif len(message.text.split()) == 2 and message.text.split(
    )[1] == "tgmirror":
        if message.reply_to_message:
            await edrep(message, text="__Downloading...__")
            c_time = time.time()
            if message.reply_to_message.photo:
                if message.reply_to_message.caption:
                    nama = f"{message.reply_to_message.caption}.png".replace(
                        " ", "_")
                else:
                    nama = f"photo_{message.reply_to_message.photo.date}.png"
                await client.download_media(
                    message.reply_to_message.photo,
                    file_name="naruto/downloads/" + nama,
                    progress=lambda d, t: client.loop.create_task(
                        progressdl(d, t, message, c_time, "Downloading...")),
                )
            elif message.reply_to_message.animation:
                if message.reply_to_message.caption:
                    nama = f"{message.reply_to_message.caption}.gif".replace(
                        " ", "_")
                else:
                    nama = "giphy_{}-{}.gif".format(
                        message.reply_to_message.animation.date,
                        message.reply_to_message.animation.file_size,
                    )
                await client.download_media(
                    message.reply_to_message.animation,
                    file_name="naruto/downloads/" + nama,
                    progress=lambda d, t: client.loop.create_task(
                        progressdl(d, t, message, c_time, "Downloading...")),
                )
            elif message.reply_to_message.video:
                if message.reply_to_message.caption:
                    nama = f"{message.reply_to_message.caption}.mp4".replace(
                        " ", "_").replace(".mkv", "")
                else:
                    nama = "video_{}-{}.mp4".format(
                        message.reply_to_message.video.date,
                        message.reply_to_message.video.file_size,
                    )
                await client.download_media(
                    message.reply_to_message.video,
                    file_name="naruto/downloads/" + nama,
                    progress=lambda d, t: client.loop.create_task(
                        progressdl(d, t, message, c_time, "Downloading...")),
                )
            elif message.reply_to_message.sticker:
                if not message.reply_to_message.caption:
                    nama = "sticker_{}_{}.webp".format(
                        message.reply_to_message.sticker.date,
                        message.reply_to_message.sticker.set_name,
                    )
                else:
                    nama = f"{message.reply_to_message.caption}.webp".replace(
                        " ", "_")
                await client.download_media(
                    message.reply_to_message.sticker,
                    file_name="naruto/downloads/" + nama,
                    progress=lambda d, t: client.loop.create_task(
                        progressdl(d, t, message, c_time, "Downloading...")),
                )
            elif message.reply_to_message.audio:
                if message.reply_to_message.caption:
                    nama = f"{message.reply_to_message.caption}.mp3".replace(
                        " ", "_")
                else:
                    nama = "audio_{}.mp3".format(
                        message.reply_to_message.audio.date)
                await client.download_media(
                    message.reply_to_message.audio,
                    file_name="naruto/downloads/" + nama,
                    progress=lambda d, t: client.loop.create_task(
                        progressdl(d, t, message, c_time, "Downloading...")),
                )
            elif message.reply_to_message.voice:
                if message.reply_to_message.caption:
                    nama = f"{message.reply_to_message.caption}.ogg".replace(
                        " ", "_")
                else:
                    nama = "audio_{}.ogg".format(
                        message.reply_to_message.voice.date)
                await client.download_media(
                    message.reply_to_message.voice,
                    file_name="naruto/downloads/" + nama,
                    progress=lambda d, t: client.loop.create_task(
                        progressdl(d, t, message, c_time, "Downloading...")),
                )
            elif message.reply_to_message.document:
                nama = "{}".format(message.reply_to_message.document.file_name)
                await client.download_media(
                    message.reply_to_message.document,
                    file_name="naruto/downloads/" + nama,
                    progress=lambda d, t: client.loop.create_task(
                        progressdl(d, t, message, c_time, "Downloading...")),
                )
            else:
                await edrep(message, text="Unknown file!")
                return
            upload = drive.CreateFile({
                "parents": [{
                    "kind": "drive#fileLink",
                    "id": drive_dir
                }],
                "title":
                nama,
            })
            upload.SetContentFile("naruto/downloads/" + nama)
            upload.Upload()
            upload.InsertPermission({
                "type": "anyone",
                "value": "anyone",
                "role": "reader"
            })
            await edrep(
                message,
                text=
                "Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
                .format(nama, upload["alternateLink"], nama,
                        upload["downloadUrl"]),
            )
            os.remove("naruto/downloads/" + nama)
        else:
            await edrep(message, text="Reply document to mirror it to gdrive")
    elif len(message.text.split()) == 3 and message.text.split(
    )[1] == "urlmirror":
        await edrep(message, text="Downloading...")
        URL = message.text.split()[2]
        nama = URL.split("/")[-1]
        time_dl = await download_url(URL, nama)
        if "Downloaded" not in time_dl:
            await edrep(message, text="Failed to download file, invaild url!")
            return
        await edrep(message,
                    text=f"Downloaded with {time_dl}.\nNow uploading...")
        upload = drive.CreateFile({
            "parents": [{
                "kind": "drive#fileLink",
                "id": drive_dir
            }],
            "title":
            nama
        })
        upload.SetContentFile("naruto/downloads/" + nama)
        upload.Upload()
        upload.InsertPermission({
            "type": "anyone",
            "value": "anyone",
            "role": "reader"
        })
        await edrep(
            message,
            text="Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
            .format(nama, upload["alternateLink"], nama,
                    upload["downloadUrl"]),
        )
        os.remove("naruto/downloads/" + nama)
    else:
        await edrep(
            message,
            text=
            "Usage:\n-> `gdrive download <url/gid>`\n-> `gdrive upload <file>`\n-> `gdrive mirror <url/gid>`\n\nFor "
            "more information about this, go to your assistant.",
        )
Пример #26
0
BATCH_SIZE = 100
DISPLAY_STEP = 10
DROPOUT_CONV = 0.8
DROPOUT_HIDDEN = 0.6
VALIDATION_SIZE = 2000      # Set to 0 to train on all available data

!pip install -U -q PyDrive ## you will have install for every colab session
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

file_list = drive.ListFile({'q': "'18C7EzsX9LetIhz9WuUV4Bd6yAeeaubMz' in parents and trashed=false"}).GetList()
for file1 in file_list:
  print('title: %s, id: %s' % (file1['title'], file1['id']))

test = drive.CreateFile({'id': '1vdBec1A_SCu4v0r_SRqd5oqPVAkUYYyC'})
test.GetContentFile('test.csv')
sample = drive.CreateFile({'id': '1Zq7yOsJeedmfqmK2srbJU0rMvFkQEK_Q'})
sample.GetContentFile('sample.csv')
train = drive.CreateFile({'id': '1PNwiiiKC8ziiCnlOYE6EPMNWC0BDdkuH'})
train.GetContentFile('train.csv')

"""## Análisis Exploratorio

Lo que sabemos: <br>
Пример #27
0
import os, sys
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive

gauth = GoogleAuth()
gauth.CommandLineAuth()
drive = GoogleDrive(gauth)

folder_id = "1cTnFkxo0QkG02_3saDWMqObVhS6D6M3f"


def upload_movie(fname, basename):

    ext = fname.split(".")[-1]
    mine_type = "audio/mp4"

    f = drive.CreateFile({
        'title':
        basename,
        'mimeType':
        mine_type,
        'parents': [{
            'kind': 'drive#fileLink',
            'id': folder_id
        }]
    })

    f.SetContentFile(fname)
    f.Upload()
    print("success : upload movie : {}".format(fname))
Пример #28
0
async def gdrive_stuff(client, message):
    gauth.LoadCredentialsFile("nana/session/drive")
    if gauth.credentials is None:
        await message.edit(
            "You are not logged in to your google drive account!\nYour assistant bot may help you to login google drive, check your assistant bot for more information!"
        )
        gdriveclient = os.path.isfile("client_secrets.json")
        if not gdriveclient:
            await setbot.send_message(
                message.from_user.id,
                "Hello, look like you're not logged in to google drive 🙂\nI can help you to login.\n\nFirst of all, you need to activate your google drive API\n1. [Go here](https://developers.google.com/drive/api/v3/quickstart/python), click **Enable the drive API**\n2. Login to your google account (skip this if you're already logged in)\n3. After logged in, click **Enable the drive API** again, and click **Download Client Configuration** button, download that.\n4. After downloaded that file, rename `credentials.json` to `client_secrets.json`, and upload to your bot dir (not in nana dir)\n\nAfter that, you can go next guide by type /gdrive"
            )
        else:
            await setbot.send_message(
                message.from_user.id,
                "Hello, look like you're not logged in to google drive :)\nI can help you to login.\n\n**To login Google Drive**\n1. `/gdrive` to get login URL\n2. After you're logged in, copy your Token.\n3. `/gdrive (token)` without `(` or `)` to login, and your session will saved to `nana/session/drive`.\n\nDon't share your session to someone, else they will hack your google drive account!"
            )
        return
    elif gauth.access_token_expired:
        # Refresh them if expired
        gauth.Refresh()
    else:
        # Initialize the saved creds
        gauth.Authorize()

    drive = GoogleDrive(gauth)
    drive_dir = await get_drivedir(drive)

    if len(message.text.split()) == 3 and message.text.split(
    )[1] == "download":
        await message.edit("Downloading...")
        driveid = await get_driveid(message.text.split()[2])
        if not driveid:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        filename = await get_driveinfo(driveid)
        if not filename:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        await message.edit(
            "Downloading for `{}`\nPlease wait...".format(filename))
        download = drive.CreateFile({'id': driveid})
        download.GetContentFile(filename)
        try:
            os.rename(filename, "nana/downloads/" + filename)
        except FileExistsError:
            os.rename(filename, "nana/downloads/" + filename + ".2")
        await message.edit(
            "Downloaded!\nFile saved to `{}`".format("nana/downloads/" +
                                                     filename))
    elif len(
            message.text.split()) == 3 and message.text.split()[1] == "upload":
        filename = message.text.split()[2].split(None, 1)[0]
        checkfile = os.path.isfile(filename)
        if not checkfile:
            await message.edit("File `{}` was not found!".format(filename))
            return
        await message.edit("Uploading `{}`...".format(filename))
        upload = drive.CreateFile({
            "parents": [{
                "kind": "drive#fileLink",
                "id": drive_dir
            }],
            'title':
            filename
        })
        upload.SetContentFile(filename)
        upload.Upload()
        upload.InsertPermission({
            'type': 'anyone',
            'value': 'anyone',
            'role': 'reader'
        })
        await message.edit(
            "Uploaded!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
            .format(filename, upload['alternateLink'], filename,
                    upload['downloadUrl']))
    elif len(
            message.text.split()) == 3 and message.text.split()[1] == "mirror":
        message.edit("Mirroring...")
        driveid = await get_driveid(message.text.split()[2])
        if not driveid:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        filename = await get_driveinfo(driveid)
        if not filename:
            await message.edit(
                "Invaild URL!\nIf you think this is bug, please go to your Assistant bot and type `/reportbug`"
            )
            return
        mirror = drive.auth.service.files().copy(fileId=driveid,
                                                 body={
                                                     "parents": [{
                                                         "kind":
                                                         "drive#fileLink",
                                                         "id":
                                                         drive_dir
                                                     }],
                                                     'title':
                                                     filename
                                                 }).execute()
        new_permission = {
            'type': 'anyone',
            'value': 'anyone',
            'role': 'reader'
        }
        drive.auth.service.permissions().insert(fileId=mirror['id'],
                                                body=new_permission).execute()
        await message.edit(
            "Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})".
            format(filename, mirror['alternateLink'], filename,
                   mirror['downloadUrl']))
    elif len(message.text.split()) == 2 and message.text.split(
    )[1] == "tgmirror":
        if message.reply_to_message:
            await message.edit("__Downloading...__")
            if message.reply_to_message.photo:
                nama = "photo_{}_{}.png".format(
                    message.reply_to_message.photo,
                    message.reply_to_message.photo.date)
                await client.download_media(message.reply_to_message.photo,
                                            file_name="nana/downloads/" + nama)
            elif message.reply_to_message.animation:
                nama = "giphy_{}-{}.gif".format(
                    message.reply_to_message.animation.date,
                    message.reply_to_message.animation.file_size)
                await client.download_media(message.reply_to_message.animation,
                                            file_name="nana/downloads/" + nama)
            elif message.reply_to_message.video:
                nama = "video_{}-{}.mp4".format(
                    message.reply_to_message.video.date,
                    message.reply_to_message.video.file_size)
                await client.download_media(message.reply_to_message.video,
                                            file_name="nana/downloads/" + nama)
            elif message.reply_to_message.sticker:
                nama = "sticker_{}_{}.webp".format(
                    message.reply_to_message.sticker.date,
                    message.reply_to_message.sticker.set_name)
                await client.download_media(message.reply_to_message.sticker,
                                            file_name="nana/downloads/" + nama)
            elif message.reply_to_message.audio:
                nama = "{}".format(message.reply_to_message.audio.file_name)
                await client.download_media(message.reply_to_message.audio,
                                            file_name="nana/downloads/" + nama)
            elif message.reply_to_message.voice:
                nama = "audio_{}.ogg".format(message.reply_to_message.voice)
                await client.download_media(message.reply_to_message.voice,
                                            file_name="nana/downloads/" + nama)
            elif message.reply_to_message.document:
                nama = "{}".format(message.reply_to_message.document.file_name)
                await client.download_media(message.reply_to_message.document,
                                            file_name="nana/downloads/" + nama)
            else:
                message.edit("Unknown file!")
                return
            upload = drive.CreateFile({
                "parents": [{
                    "kind": "drive#fileLink",
                    "id": drive_dir
                }],
                'title':
                nama
            })
            upload.SetContentFile("nana/downloads/" + nama)
            upload.Upload()
            upload.InsertPermission({
                'type': 'anyone',
                'value': 'anyone',
                'role': 'reader'
            })
            await message.edit(
                "Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})"
                .format(nama, upload['alternateLink'], nama,
                        upload['downloadUrl']))
            os.remove("nana/downloads/" + nama)
        else:
            await message.edit("Reply document to mirror it to gdrive")
    elif len(message.text.split()) == 3 and message.text.split(
    )[1] == "urlmirror":
        await message.edit("Downloading...")
        URL = message.text.split()[2]
        nama = URL.split("/")[-1]
        time_dl = await download_url(URL, nama)
        if "Downloaded" not in time_dl:
            await message.edit("Failed to download file, invaild url!")
            return
        await message.edit(f"Downloaded with {time_dl}.\nNow uploading...")
        upload = drive.CreateFile({
            "parents": [{
                "kind": "drive#fileLink",
                "id": drive_dir
            }],
            'title':
            nama
        })
        upload.SetContentFile("nana/downloads/" + nama)
        upload.Upload()
        upload.InsertPermission({
            'type': 'anyone',
            'value': 'anyone',
            'role': 'reader'
        })
        await message.edit(
            "Done!\nDownload link: [{}]({})\nDirect download link: [{}]({})".
            format(nama, upload['alternateLink'], nama, upload['downloadUrl']))
        os.remove("nana/downloads/" + nama)
    else:
        await message.edit(
            "Usage:\n-> `gdrive download <url/gid>`\n-> `gdrive upload <file>`\n-> `gdrive mirror <url/gid>`\n\nFor more information about this, go to your assistant."
        )
Пример #29
0
0B0ZXk88koS2KUHZZZkVwd1RoVmc"""
    gensim_fileids = gensim_fileids.split('\n')

    gensim_languages = [
        'Bengali', 'Catalan', 'Chinese', 'Danish', 'Dutch', 'Esperanto',
        'Finnish', 'French', 'German', 'Hindi', 'Hungarian', 'Indonesian',
        'Italian', 'Japanese', 'Javanese', 'Korean', 'Malay', 'Norwegian',
        'Norwegian Nynorsk', 'Polish', 'Portuguese', 'Russian', 'Spanish',
        'Swahili', 'Swedish', 'Tagalog', 'Thai', 'Turkish', 'Vietnamese'
    ]

    gensim_lgs = [
        'bn', 'ca', 'zh', 'da', 'nl', 'eo', 'fi', 'fr', 'de', 'hi', 'hu', 'id',
        'it', 'ja', 'jv', 'ko', 'ms', 'no', 'nn', 'pl', 'pt', 'ru', 'es', 'sw',
        'sv', 'tl', 'th', 'tr', 'vi'
    ]

    # Set google Auth and instantiate drive
    gauth = googauth()
    drive = GoogleDrive(gauth)

    # Download all file ids from google
    gensim_retrieve(drive, gensim_fileids[:1], gensim_lgs[:1])

    # Unzip all files
    gensim_unzip(gensim_lgs[:1])

    # Pickle objects for later
    pickle_rw(('gensim_languages', gensim_languages),
              ('gensim_lgs', gensim_lgs))
Пример #30
0
 def __init__(self):
     gauth = GoogleAuth()
     scope = ['https://www.googleapis.com/auth/drive']
     gauth.credentials = ServiceAccountCredentials.from_json_keyfile_name(settings.CLIENT_SECRET_FILE_SERVICEACCOUNT, scope)
     self.drive = GoogleDrive(gauth)