def test_service_account_authorization(self):
        c = pygsheets.authorize(service_account_file=self.base_path + '/pygsheettest_service_account.json')
        assert isinstance(c, Client)

        self.sheet = c.create('test_sheet')
        self.sheet.share('*****@*****.**')
        self.sheet.delete()
    def test_user_credentials_loading(self):
        c = pygsheets.authorize(client_secret=self.base_path + '/client_secret.json',
                                credentials_directory=self.base_path)
        assert isinstance(c, Client)

        self.sheet = c.create('test_sheet')
        self.sheet.share('*****@*****.**')
        self.sheet.delete()
    def handle(self, *args, **options):
        sheet_key = options.get('sheet_key')
        if not sheet_key:
            raise CommandError('Please specify --sheet-key')

        gc = pygsheets.authorize(outh_file=settings.GOOGLE_SHEETS_CLIENT_SECRET_FILE)
        ss = gc.open_by_key(sheet_key)
        ws = ss.sheet1

        for x in ws.get_all_records():
            print('')
            for key, value in x.items():
                print('{}: {}'.format(key, value))
    def handle(self, *args, **options):
        sheet_key = options.get('sheet_key')
        if not sheet_key:
            raise CommandError('Please specify --sheet-key')

        gc = pygsheets.authorize(outh_file=settings.GOOGLE_SHEETS_CLIENT_SECRET_FILE)
        ss = gc.open_by_key(sheet_key)
        ws = ss.sheet1

        for child in Child.objects.all():
            cell = None

            cell_child_name = ws.find(child.name)

            father_mobile = child.child_parents.father_mobile
            if father_mobile:
                cell = ws.find(child.child_parents.father_mobile)

            father_email = child.child_parents.father_email
            if father_email and not cell:
                cell = ws.find(child.child_parents.father_email)

            mother_mobile = child.child_parents.mother_mobile
            if mother_mobile and not cell:
                cell = ws.find(child.child_parents.mother_mobile)

            mother_email = child.child_parents.mother_email
            if mother_email and not cell:
                cell = ws.find(child.child_parents.mother_email)

            cell_child_name = cell_child_name[0] if cell_child_name else None
            cell = cell[0] if cell else None

            if cell_child_name and cell and cell_child_name.row == cell.row:
                child_unique_id_cell = 'M{}'.format(cell.row)
                ws.update_cell(str(child_unique_id_cell), str(child.unique_id))
                self.stdout.write('Set {} unique id to {}'.format(child.name, child.unique_id))

                parents_unique_id_cell = 'N{}'.format(cell.row)
                ws.update_cell(str(parents_unique_id_cell), str(child.child_parents.unique_id))
                self.stdout.write('Set {} parents unique id to {}'.format(child.name, child.child_parents.unique_id))
            elif cell:
                parents_unique_id_cell = 'N{}'.format(cell.row)
                ws.update_cell(str(parents_unique_id_cell), str(child.child_parents.unique_id))
                self.stdout.write('Set {} parents unique id to {}'.format(child.name, child.child_parents.unique_id))
            time.sleep(1)

        self.stdout.write('Updated sheet.')
Esempio n. 5
0
def setup_module(module):
    global test_config, pygsheet_client
    try:
        test_config = read_config(CONFIG_FILENAME)
    except IOError as e:
        msg = "Can't find %s for reading test configuration. "
        raise Exception(msg % e.filename)

    try:
        pygsheet_client = pygsheets.authorize(service_account_file=SERVICE_FILE_NAME)
    except IOError as e:
        msg = "Can't find %s for reading credentials. "
        raise Exception(msg % e.filename)

    config_title = test_config.get('Spreadsheet', 'title') + PYTHON_VERSION
    sheets = pygsheet_client.open_all(query="name = '{}'".format(config_title))
    for sheet in sheets:
        sheet.delete()
Esempio n. 6
0
def _login():
    global gc, sheet, authorize_time
    tries = 1
    max_tries = 10
    authorize_shelf_life = 600  #: 10 minutes

    while tries <= max_tries:
        try:
            if gc is None or authorize_time is None or time() - authorize_time > authorize_shelf_life:
                logger.debug('logging into google spreadsheet')
                authorize_time = time()
                gc = pygsheets.authorize(service_file=credentials)
                sheet = gc.open_by_url(settings.queryLayersUrl)

            return sheet
        except Exception as ex:
            if tries == max_tries:
                raise ex

            logger.warn('login error, retrying...')
            sleep(30)

        tries = tries + 1
Esempio n. 7
0
import pandas as pd
import numpy as np
import configparser as cp
import requests
from lxml import etree
import pygsheets
from tqdm import tqdm
from bs4 import BeautifulSoup

config_parser = cp.ConfigParser()
config_parser.read('../configs/secret.config')
gr_key = config_parser.get('DEFAULT', 'key')
sheet_id = config_parser.get('DEFAULT', 'gsheet_id')
tqdm.pandas()

gc = pygsheets.authorize(service_file='../configs/MyLibrary_creds.json')
sh = gc.open_by_key(sheet_id)

sheets = ['Zuo collection', 'van de Ven collection']
# sheets = ['test','test2']
# sheets = ['van de Ven collection']


def find_best_book_match(results):
    # TODO: be smarter about finding better match
    return results.find('work')


def get_book_info(row):
    query = "{} {} {}".format(row['Title'], row['Author First'],
                              row['Author Last'])
            x.append(json.loads(l))
    return (x)


def ssrow(sheet, body):  # Return the Sheet RowNumber to update
    try:
        return (2 + next(i for i, n in enumerate(sheet) if n['Body'] == body))
    except:
        print(f'Unknown Body {body}')
        return (None)


if __name__ == '__main__':
    #Load Hotspot Spreadsheet
    print('.Loading Sheet')
    client = pygsheets.authorize()
    sh = client.open('Canonn Local Hotspot Survey')
    wks = sh.worksheet('title', 'Raw Data')
    #wks = sh.worksheet('title','TestImport')
    ss = wks.get_all_records(head=1)
    refs = []
    refs.append({
        'Type': 'Icy - Pristine',
        'Alexandrite': 'G',
        'Bromellite': 'H',
        'Grandidierite': 'I',
        'LowTemperatureDiamond': 'J',
        'Opal': 'K',
        'Tritium': 'L'
    })
    refs.append({
Esempio n. 9
0
#13 = hash
#14 = apphash

# tid = sys.argv[1]

# # gspread auth
# json_key = '/home/mattfel/regression/synth/key.json'
# scope = [
#     'https://spreadsheets.google.com/feeds',
#     'https://www.googleapis.com/auth/drive'
# ]
# credentials = ServiceAccountCredentials.from_json_keyfile_name(json_key, scope)

# pygsheets auth
json_key = '/home/mattfel/regression/synth/pygsheets_key.json'
gc = pygsheets.authorize(outh_file=json_key)

if (sys.argv[12] == "Zynq"):
    # sh = gc.open("Zynq Regression") # Open by name
    try:
        sh = gc.open_by_key("1jZxVO8VFODR8_nEGBHfcmfeIJ3vo__LCPdjt4osb3aE")
    except:
        print("WARN: Could not get sheet")
        exit()
    word = "Slice"
elif (sys.argv[12] == "ZCU"):
    try:
        sh = gc.open_by_key("181pQqQXV_DsoWZyRV4Ve3y9QI6I0VIbVGS3TT0zbEv8")
    except:
        print("WARN: Could not get sheet")
        exit()
Esempio n. 10
0
def copia_dados_worksheet(url_workbook_origem_revenue,
                          url_workbook_origem_receita,
                          url_workbook_destino_1,
                          url_workbook_destino_2,
                          url_workbook_destino_3,
                          nome_worksheet_origem_revenue='Revenue Responses',
                          nome_worksheet_origem_receita='Resultados',
                          indice_coluna_copia=0,
                          celula_destino='A1',
                          nome_worksheet_destino_revenue='Rev. Import',
                          nome_worksheet_destino_receita='Receita Import',
                          service_file='client_secret.json'):

    # Google API account info
    google_sheets_client = pygsheets.authorize(service_file=service_file)

    # abre as workbooks
    try:
        workbook_origem_revenue = google_sheets_client.open_by_url(
            url_workbook_origem_revenue)
        print('Read 1')
        workbook_origem_receita = google_sheets_client.open_by_url(
            url_workbook_origem_receita)
        print('Read 2')
        workbook_destino_1 = google_sheets_client.open_by_url(
            url_workbook_destino_1)
        print('Read 3')
        workbook_destino_2 = google_sheets_client.open_by_url(
            url_workbook_destino_2)
        print('Read 4')
        workbook_destino_3 = google_sheets_client.open_by_url(
            url_workbook_destino_3)
        print('Read 5')

    except pygsheets.SpreadsheetNotFound as err:
        raise ('Url da worksheet não encontrada. Erro {}'.format(err))

    # acessa worksheet com dados de origem
    worksheet_origem_receita = workbook_origem_receita.worksheet(
        'title', nome_worksheet_origem_receita)
    lista_dados_receita = worksheet_origem_receita.get_all_records(
        empty_value='', head=1)
    df_receita = pd.DataFrame(lista_dados_receita)
    print('Df 1')
    worksheet_origem_revenue = workbook_origem_revenue.worksheet(
        'title', nome_worksheet_origem_revenue)
    lista_dados_revenue = worksheet_origem_revenue.get_all_records(
        empty_value='', head=1)
    df_revenue = pd.DataFrame(lista_dados_revenue)
    print('Df 2')

    # acessa worksheet com dados de destino

    worksheet_destino_1 = workbook_destino_1.worksheet(
        'title', nome_worksheet_destino_revenue)
    worksheet_destino_2 = workbook_destino_2.worksheet(
        'title', nome_worksheet_destino_revenue)
    worksheet_destino_3 = workbook_destino_3.worksheet(
        'title', nome_worksheet_destino_revenue)
    worksheet_destino_4 = workbook_destino_1.worksheet(
        'title', nome_worksheet_destino_receita)
    worksheet_destino_5 = workbook_destino_2.worksheet(
        'title', nome_worksheet_destino_receita)
    worksheet_destino_6 = workbook_origem_receita.worksheet(
        'title', nome_worksheet_destino_revenue)

    # cola valores na celula
    worksheet_destino_1.set_dataframe(df_revenue, celula_destino)
    print('1')
    worksheet_destino_2.set_dataframe(df_revenue, celula_destino)
    print('2')
    worksheet_destino_3.set_dataframe(df_revenue, celula_destino)
    print('3')
    worksheet_destino_4.set_dataframe(df_receita, celula_destino)
    print('4')
    worksheet_destino_5.set_dataframe(df_receita, celula_destino)
    print('5')
    worksheet_destino_6.set_dataframe(df_revenue, celula_destino)
    print('6')
Esempio n. 11
0
          for date_, trans in [day.split(':')]]

# create a pair for each position (date, transaction)
temp_1 = [[day[0], trans] for day in temp_0 for trans in day[1]]

# split each position into (date, sum, description, is_cash)
temp_2 = [[day[0], sum_, desc[:-1].title(),
           int(desc[-1])] for day in temp_1
          for sum_, desc in [day[1].split(' ')]]

# Convert list into proper output format
cashed_block = []
for block in temp_2:
    block.insert(1, 'Expense') if float(block[1]) < 0 else block.insert(
        1, 'Income')
    block.insert(3, '-')
    block.insert(5, '-'), block.insert(5, 'Me')
    block[0] = block[0].replace('.', '/')
    # alternative flow for cashed position, creates mirror position with income to match cashed expense position
    if block[4] == 'Cashed':
        block[3] = 'Cashed'
        cashed_block = block.copy()
        cashed_block[2], cashed_block[1], cashed_block[7] = str(
            abs(int(block[2]))), 'Income', 1

temp_2.insert(-1, cashed_block) if cashed_block else None
temp_2.sort()
gc = pygsheets.authorize(service_file='client_secret.json')
sheet = gc.open_by_key(os.environ['GS_TOKEN']).worksheet_by_title('Balance')
sheet.insert_rows(sheet.rows - 1, len(temp_2), values=temp_2)
Esempio n. 12
0
        <tbody class='list'>'''.format('\n'.join(
                ['                <th scope="col"><span class="sort" data-sort="{0}">{0}</span></th>'.format(key) for key in item.keys()]))
            once = False
        html += '''
            <tr>
{}
            </tr>'''.format('\n'.join(['                <td data-th="{0}" class="{0}">{1}</td>'.format(key, value) for key, value in item.items()]))
    html += '''
        </tbody>
    </table>
</div>
<script src="{{ "/js/dist/list.min.js" | prepend: site.baseurl }}"></script>
'''

    return html


if __name__ == '__main__':
    gc = pygsheets.authorize(service_file='client_secret.json')

    data = get_sheet_data(gc, '11ASS7LnxgpnD0jN4utzklREgMf1pcvYjcXcIcESHweQ', 'SGID Stewardship Info')
    data = [x for x in data if len(x['name'].strip()) > 0]
    html = create(data)

    file_path = join(dirname(__file__), '..', 'data', 'sgid-index', 'index.html')

    with open(file_path + '.bak', 'w') as data:
        data.writelines(html)

    rename(file_path + '.bak', file_path)
Esempio n. 13
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description=__doc__,
                                     epilog=EPILOG,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--infile',
                        help='Input file containing experiment accessions to'
                        ' report on (overrides assay, rfa, lab, query_terms).',
                        type=argparse.FileType('r'))
    parser.add_argument('--server',
                        help='Full URL of the server.')
    parser.add_argument('--key',
                        default='default',
                        help='The keypair identifier from the keyfile.'
                        ' Default is --key=default.')
    parser.add_argument('--keyfile',
                        default=os.path.expanduser('~/keypairs.json'),
                        help='The keypair file. Default is --keyfile=%s'
                        % (os.path.expanduser('~/keypairs.json')))
    parser.add_argument('--authid',
                        help='The HTTP auth ID.')
    parser.add_argument('--authpw',
                        help='The HTTP auth PW.')
    parser.add_argument('--debug',
                        default=False,
                        action='store_true',
                        help='Print debug messages. Default is False.')
    parser.add_argument('--assembly',
                        help='The genome assembly to report on.',
                        default=None)
    parser.add_argument('--assay',
                        help='The assay_term_name to report on.',
                        default='ChIP-seq')
    parser.add_argument('--rfa',
                        help='ENCODE2 or ENCODE3. Omit for all.',
                        default=None)
    parser.add_argument('--lab',
                        help='ENCODE lab name, e.g. j-michael-cherry.',
                        default=None)
    parser.add_argument('--query_terms',
                        help='Additional query terms in the form "&term=value".',
                        default=None)
    parser.add_argument('--outfile',
                        help='CSV output.',
                        type=argparse.FileType('wb'),
                        default=sys.stdout)
    parser.add_argument('--create_google_sheet',
                        help='Create Google Sheet with conditional formatting.'
                        ' Default is False. Requires API key.',
                        default=False,
                        action='store_true')
    parser.add_argument('--sheet_title',
                        help='Name of Google Sheet.',
                        default='ENCODE ChIP QC')
    parser.add_argument('--apikey',
                        help='Path to secret credential for Google Sheets.',
                        default=os.path.expanduser(
                            '~/sheets.googleapis.com-python.json'))
    parser.add_argument('--released',
                        help='Bypasses authentication and only shows released results.',
                        default=False,
                        action='store_true')
    args = parser.parse_args()
    if args.debug:
        logging.basicConfig(
            format='%(levelname)s:%(message)s', level=logging.DEBUG)
    else:
        logging.basicConfig(
            format='%(levelname)s:%(message)s', level=logging.WARNING)
    if args.released:
        keypair, authid, authpw = None, None, None
        server = PUBLIC_SERVER
    else:
        server, authid, authpw = processkeys(args)
        keypair = (authid, authpw)
    if args.assembly in ['hg19', 'GRCh38', 'GRCh38-minimal']:
        organism_name = 'human'
    elif args.assembly in ['mm10', 'mm9', 'mm10-minimal']:
        organism_name = 'mouse'
    else:
        organism_name = ''
    query = '/search/?type=experiment'
    if args.infile:
        for expid in args.infile:
            expid = expid.rstrip()
            if expid.startswith('#'):
                continue
            else:
                query += '&accession=%s' % (expid)
    else:
        query += '&status!=deleted'\
                 '&status!=revoked'\
                 '&status!=archived'\
                 '&status!=replaced'
        if args.assay:
            query += '&assay_term_name=%s' % (args.assay)
        if args.rfa:
            query += '&award.rfa=%s' % (args.rfa)
        if args.lab:
            query += '&lab.name=%s' % (args.lab)
        if organism_name:
            query += '&replicates.library.biosample.donor.organism.name=%s' % (
                organism_name)
        if args.query_terms:
            query += args.query_terms
    query += '&field=assay_term_name'\
             '&field=accession'\
             '&field=biosample_term_name'\
             '&field=biosample_type'\
             '&field=lab.name'\
             '&field=award.rfa'\
             '&field=target.name'\
             '&field=target.investigated_as'\
             '&field=internal_status'\
             '&format=json'\
             '&limit=all'
    url = urlparse.urljoin(server, query)
    logging.debug(url)
    result = get_ENCODE(url, authid, authpw)
    experiments = result['@graph']
    fieldnames = [
        'experiment',
        'experiment link',
        'target_type',
        'target',
        'biosample_name',
        'biosample_type',
        'biorep_id',
        'lab',
        'rfa',
        'assembly',
        'bam',
        'bam link',
        'unfiltered bam',
        'unfiltered bam link',
        'hiq_reads',
        'loq_reads',
        'mappable',
        'fract_mappable',
        'end',
        'r_lengths',
        'map_length',
        'crop_length',
        'picard_read_pairs_examined',
        'picard_unpaired_reads_examined',
        'usable_frags',
        'fract_usable',
        'NRF',
        'PBC1',
        'PBC2',
        'frag_len',
        'NSC',
        'RSC',
        'xcor plot',
        'library',
        'library aliases',
        'from fastqs',
        'platform',
        'date_created',
        'release status',
        'internal status',
        'dx_analysis']
    if args.create_google_sheet:
        # Force creation of temporary CSV that can be loaded into a DataFrame,
        # written to Google Sheets, then deleted.
        temp_file = 'temp_mapping_%s.tsv' % (args.assembly)
        args.outfile = open(temp_file, 'w')
    writer = csv.DictWriter(args.outfile,
                            fieldnames=fieldnames,
                            delimiter='\t',
                            quotechar='"')
    writer.writeheader()
    pool = Pool(100)
    get_rows_func = partial(get_rows,
                            server=server,
                            authid=authid,
                            authpw=authpw,
                            args=args)
    for rows in pool.imap_unordered(get_rows_func, experiments):
        for row in rows:
            writer.writerow(row)
    if args.create_google_sheet:
        args.outfile.close()
        # Load CSV data, sort.
        mapping_data = pd.read_table(temp_file)
        mapping_data = mapping_data.fillna('')
        mapping_data = mapping_data.sort_values(
            by=['lab', 'biosample_name', 'target', 'experiment'],
            ascending=[True, True, True, True])
        mapping_data = mapping_data.reset_index(drop=True)
        # Read sheet title and create unique page title.
        date = datetime.now().strftime('%m_%d_%Y')
        sheet_title = (
            args.sheet_title if not args.released
            else '{} Released'.format(args.sheet_title)
        )
        page_title = '%s_mapping_%s' % (args.assembly, date)
        # Open/create Google Sheet.
        gc = pygsheets.authorize(args.apikey)
        try:
            sh = gc.open(sheet_title)
        except pygsheets.exceptions.SpreadsheetNotFound:
            sh = gc.create(sheet_title)
        try:
            wks = sh.add_worksheet(page_title)
        except HttpError:
            wks = sh.worksheet_by_title(page_title)
        # Clear worksheet.
        wks.clear()
        # Add data from DataFrame.
        wks.set_dataframe(mapping_data, copy_head=True, fit=True, start='A1')
        # Apply formatting and conditions.
        header['repeatCell']['range']['sheetId'] = wks.id
        wks.client.sh_batch_update(wks.spreadsheet.id, header)
        # Freeze header.
        freeze_header['updateSheetProperties']['properties']['sheetId'] = wks.id
        wks.client.sh_batch_update(wks.spreadsheet.id, freeze_header)
        # Resize font.
        font_size_format['repeatCell']['range']['sheetId'] = wks.id
        wks.client.sh_batch_update(wks.spreadsheet.id, font_size_format)
        # Add notes.
        batch_notes = []
        for k, v in notes_dict.items():
            # Don't overwrite template.
            blank_note = copy.deepcopy(note)
            num = mapping_data.columns.get_loc(k)
            blank_note['repeatCell']['range']['startColumnIndex'] = num
            blank_note['repeatCell']['range']['endColumnIndex'] = num + 1
            blank_note['repeatCell']['cell']['note'] = v
            blank_note['repeatCell']['range']['sheetId'] = wks.id
            batch_notes.append(blank_note)
        wks.client.sh_batch_update(wks.spreadsheet.id, batch_notes)
        # Format numbers.
        batch_numbers = []
        for k, v in number_cols.items():
            # Apply pattern to every column in cols.
            for col in v['cols']:
                # Don't overwrite template.
                blank_number_format = copy.deepcopy(number_format)
                num = mapping_data.columns.get_loc(col)
                blank_number_format['repeatCell']['range']['startColumnIndex'] = num
                blank_number_format['repeatCell']['range']['endColumnIndex'] = num + 1
                blank_number_format['repeatCell']['range']['sheetId'] = wks.id
                blank_number_format['repeatCell']['cell']['userEnteredFormat']['numberFormat']['pattern'] = v['pattern']
                batch_numbers.append(blank_number_format)
        wks.client.sh_batch_update(wks.spreadsheet.id, batch_numbers)
        # Apply conditional formatting.
        batch_conditions = []
        for k, v in condition_cols.items():
            for condition in v['conditions']:
                # Don't overwrite template.
                blank_condition = copy.deepcopy(condition_dict)
                # More descriptive names.
                condition_type = condition[0]
                condition_values = condition[1]
                condition_color = condition[2]
                # Fill in specifics.
                blank_condition['addConditionalFormatRule']['rule']['booleanRule']['condition']['type'] = condition_type
                # Don't do this for conditions (e.g. BLANK) that don't require values.
                if condition_values:
                    # Must loop through because NUMBER_BETWEEN condition requires two objects.
                    for value in condition_values:
                        blank_condition['addConditionalFormatRule']['rule']['booleanRule']['condition']['values'].append({
                            "userEnteredValue": value})
                blank_condition['addConditionalFormatRule']['rule']['booleanRule']['format']['backgroundColor']['red'] = condition_color[0]
                blank_condition['addConditionalFormatRule']['rule']['booleanRule'][
                    'format']['backgroundColor']['green'] = condition_color[1]
                blank_condition['addConditionalFormatRule']['rule']['booleanRule'][
                    'format']['backgroundColor']['blue'] = condition_color[2]
                # Find column number.
                num = mapping_data.columns.get_loc(k)
                blank_condition['addConditionalFormatRule']['rule']['ranges'][0]['startColumnIndex'] = num
                blank_condition['addConditionalFormatRule']['rule']['ranges'][0]['endColumnIndex'] = num + 1
                blank_condition['addConditionalFormatRule']['rule']['ranges'][0]['sheetId'] = wks.id
                batch_conditions.append(blank_condition)
        wks.client.sh_batch_update(wks.spreadsheet.id, batch_conditions)
        # Resize all columns.
        for i in range(wks.cols):
            try:
                wks.adjust_column_width(i, pixel_size=38)
                time.sleep(0.5)
            except RequestError:
                # Try again if response takes too long.
                wks.adjust_column_width(i, pixel_size=38)
        tiny_columns = ['experiment link',
                        'bam link',
                        'unfiltered bam',
                        'unfiltered bam link',
                        'loq_reads',
                        'end',
                        'xcor plot',
                        'dx_analysis']
        # Resize tiny columns.
        for i in [mapping_data.columns.get_loc(x) for x in tiny_columns]:
            wks.adjust_column_width(i, pixel_size=25)
        accession_columns = ['experiment',
                             'bam',
                             'library',
                             'library aliases',
                             'from fastqs',
                             'target']
        # Resize accession columns.
        for i in [mapping_data.columns.get_loc(x) for x in accession_columns]:
            wks.adjust_column_width(i, pixel_size=90)
        # Remove temp file.
        os.remove(temp_file)
def main():
    args = get_args()
    if args.debug:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.DEBUG)
        logger.setLevel(logging.DEBUG)
    else:
        # Use the default logging level.
        logging.basicConfig(format='%(levelname)s:%(message)s')
        logger.setLevel(logging.INFO)
    if args.released:
        keypair = None
        server = PUBLIC_SERVER
    else:
        authid, authpw, server = common.processkey(args.key, args.keyfile)
        keypair = (authid, authpw)
    if args.experiments:
        ids = args.experiments
    elif args.all:
        # Get metadata for all ChIP-seq Experiments.
        base_exp_query = '/search/?type=Experiment&assay_title=ChIP-seq&award.project=ENCODE&status=released'
        extended_query = '&status=submitted&status=in+progress&status=started&status=release+ready'
        exp_query = base_exp_query if args.released else (base_exp_query + extended_query)
        all_experiments = common.encoded_get(server + exp_query,
                                             keypair)['@graph']
        # Extract Experiment accessions.
        ids = [exp.get('accession') for exp in all_experiments]
    elif args.infile:
        ids = args.infile
    else:
        # Never reached because infile defaults to stdin.
        raise InputError('Must supply experiment ids'
                         ' in arguments or --infile.')
    # Define column names for TSV.
    fieldnames = ['date',
                  'analysis',
                  'analysis_id',
                  'experiment',
                  'target',
                  'biosample_term_name',
                  'biosample_type',
                  'replication',
                  'lab',
                  'rfa',
                  'assembly',
                  'Nt',
                  'Np',
                  'N1',
                  'N2',
                  'rescue_ratio',
                  'self_consistency_ratio',
                  'reproducibility_test',
                  'Ft',
                  'Fp',
                  'F1',
                  'F2',
                  'state',
                  'release',
                  'total_price',
                  'quality_metric_of']
    if args.create_google_sheet:
        # Force creation of temporary CSV that can be loaded into a DataFrame,
        # written to Google Sheets, then deleted.
        temp_file = 'temp_idr_%s.tsv' % (args.assembly)
        args.outfile = open(temp_file, 'w')
    writer = csv.DictWriter(args.outfile,
                            fieldnames=fieldnames,
                            delimiter='\t',
                            quotechar='"')
    writer.writeheader()
    # Get metadata for all IDR output Files.
    base_idr_query = (
        '/search/?type=File&assembly=%s&file_format=bed'
        '&output_type=optimal+idr+thresholded+peaks'
        '&output_type=conservative+idr+thresholded+peaks'
        '&output_type=pseudoreplicated+idr+thresholded+peaks'
        '&lab.title=ENCODE+Processing+Pipeline'
        '&lab.title=J.+Michael+Cherry,+Stanford'
        '&status=released' % (args.assembly)
    )
    extended_idr_query = '&status=in+progress&status=uploading&status=uploaded'
    idr_query = base_idr_query if args.released else (base_idr_query + extended_idr_query)
    all_idr_files = common.encoded_get(server + idr_query, keypair)['@graph']
    na = 'not_available'
    for (i, experiment_id) in enumerate(ids):
        if experiment_id.startswith('#'):
            continue
        experiment_id = experiment_id.rstrip()
        experiment_uri = '/experiments/%s/' % (experiment_id)
        idr_files = \
            [f for f in all_idr_files if f['dataset'] == experiment_uri]
        idr_step_runs = set([f.get('step_run') for f in idr_files])
        if not len(idr_step_runs):
            if not args.all:
                logger.warning(
                    "%s: Found %d IDR step runs. Skipping"
                    % (experiment_id, len(idr_step_runs)))
            continue
        idr_qc_uris = []
        assemblies = []
        for f in idr_files:
            quality_metrics = f.get('quality_metrics')
            if not len(quality_metrics) == 1:
                logger.error('%s: Expected one IDR quality metric for file %s.'
                             ' Found %d.' % (experiment_id,
                                             f.get('accession'),
                                             len(quality_metrics)))
            idr_qc_uris.extend(quality_metrics)
            assembly = f.get('assembly')
            if not assembly:
                logger.error('%s: File %s has no assembly'
                             % (experiment_id, f.get('accession')))
            assemblies.append(assembly)
        idr_qc_uris = set(idr_qc_uris)
        if not len(idr_qc_uris) == 1:
            logger.error('%s: Expected one unique IDR metric,'
                         ' found %d. Skipping.' % (experiment_id,
                                                   len(idr_qc_uris)))
            continue
        assemblies = set(assemblies)
        if not len(assemblies) == 1:
            logger.error('%s: Expected one unique assembly, found %d.'
                         ' Skipping.' % (experiment_id, len(assemblies)))
            continue
        # Grab unique value from set.
        idr_qc_uri = next(iter(idr_qc_uris))
        assembly = next(iter(assemblies))
        # Get analysis_id from DNAnexus, create analysis_link.
        idr_step_run_uri = next(iter(idr_step_runs))
        try:
            idr_step_run = common.encoded_get(server + idr_step_run_uri, keypair)
        except Exception as e:
            print(experiment_id, e, 'Skipping.')
            continue
        try:
            dx_job_id_str = idr_step_run.get('dx_applet_details')[
                0].get('dx_job_id')
        except:
            logger.warning(
                "Failed to get dx_job_id from step_run.dx_applet_details.dx_job_id")
            logger.debug(idr_step_run)
            # Could try to pull it from alias.
            dx_job_id_str = None
        dx_job_id = dx_job_id_str.rpartition(':')[2]
        if not args.released:
            dx_job = dxpy.DXJob(dx_job_id)
            job_desc = dx_job.describe()
            analysis_id = job_desc.get('analysis')
            logger.debug('%s' % (analysis_id))
            analysis = dxpy.DXAnalysis(analysis_id)
            desc = analysis.describe()
            project = desc.get('project')
            analysis_link = 'https://platform.dnanexus.com/projects/%s/monitor/analysis/%s' % (
                desc.get('project').split('-')[1], desc.get('id').split('-')[1])
        else:
            analysis_link = na
            desc = {}
            
        # Get IDR object.
        idr = common.encoded_get(server + idr_qc_uri,
                                 keypair)
        # Pull metrics of interest.
        idr_status = idr.get('status', na)
        if (args.released and (idr_status == na or idr_status != 'released')):
            logger.error('%s: Expected released IDR metric. Skipping.' % idr_qc_uris)
            continue
        Np = idr.get('Np', na)
        N1 = idr.get('N1', na)
        N2 = idr.get('N2', na)
        Nt = idr.get('Nt', na)
        Fp = idr.get('Fp', na)
        F1 = idr.get('F1', na)
        F2 = idr.get('F2', na)
        Ft = idr.get('Ft', na)
        quality_metric_of = idr.get('quality_metric_of', [])
        date = idr.get('date_created', na)
        rescue_ratio = idr.get('rescue_ratio', na)
        self_consistency_ratio = idr.get('self_consistency_ratio', na)
        reproducibility_test = idr.get('reproducibility_test', na)
        # Get Experiment object.
        experiment = common.encoded_get(server + experiment_id,
                                        keypair)
        experiment_link = '%sexperiments/%s' % (server,
                                                experiment.get('accession'))
        # Get Award object.
        award = common.encoded_get(server + experiment.get('award'), keypair)
        # Grab project phase, e.g. ENCODE4.
        rfa = award.get('rfa', na)
        row = {'date': date,
               'analysis': analysis_link,
               'analysis_id': desc.get('id', na),
               'experiment': experiment_link,
               'target': experiment['target'].split('/')[2],
               'biosample_term_name': experiment.get('biosample_term_name'),
               'biosample_type': experiment.get('biosample_type'),
               'replication': experiment.get('replication_type'),
               'lab': experiment['lab'].split('/')[2],
               'rfa': rfa,
               'assembly': assembly,
               'Nt': Nt,
               'Np': Np,
               'N1': N1,
               'N2': N2,
               'rescue_ratio': rescue_ratio,
               'self_consistency_ratio': self_consistency_ratio,
               'reproducibility_test': reproducibility_test,
               'Ft': Ft,
               'Fp': Fp,
               'F1': F1,
               'F2': F2,
               'state': desc.get('state', na),
               'release': experiment['status'],
               'total_price':  desc.get('totalPrice', na),
               'quality_metric_of': ', '.join(quality_metric_of)
               }
        writer.writerow(row)
    if args.create_google_sheet:
        args.outfile.close()
        # Load CSV data, sort.
        idr_data = pd.read_table(temp_file)
        idr_data = idr_data.replace('not_available', '')
        idr_data.date = idr_data.date.apply(lambda x: pd.to_datetime(x))
        idr_data = idr_data.sort_values(
            by=['lab', 'biosample_term_name', 'target', 'experiment'],
            ascending=[True, True, True, True])
        idr_data.date = idr_data.date.astype('str')
        idr_data = idr_data.reset_index(drop=True)
        # Read sheet title and create unique page title.
        date = datetime.now().strftime('%m_%d_%Y')
        sheet_title = (
            args.sheet_title if not args.released
            else '{} Released'.format(args.sheet_title)
        )
        page_title = '%s_IDR_FRIP_%s' % (args.assembly, date)
        # Open/create Google Sheet.
        gc = pygsheets.authorize(args.apikey)
        try:
            sh = gc.open(sheet_title)
        except pygsheets.exceptions.SpreadsheetNotFound:
            sh = gc.create(sheet_title)
        try:
            wks = sh.add_worksheet(page_title)
        except HttpError:
            wks = sh.worksheet_by_title(page_title)
        # Clear worksheet.
        wks.clear()
        # Add data from DataFrame.
        wks.set_dataframe(idr_data, copy_head=True, fit=True, start='A1')
        # Apply formatting and conditions.
        header['repeatCell']['range']['sheetId'] = wks.id
        wks.client.sh_batch_update(wks.spreadsheet.id, header)
        # Format numbers.
        for col in number_format_columns:
            num = idr_data.columns.get_loc(col)
            number_format['repeatCell']['range']['startColumnIndex'] = num
            number_format['repeatCell']['range']['endColumnIndex'] = num + 1
            number_format['repeatCell']['range']['sheetId'] = wks.id
            wks.client.sh_batch_update(wks.spreadsheet.id, number_format)
        # Resize font.
        font_size_format['repeatCell']['range']['sheetId'] = wks.id
        wks.client.sh_batch_update(wks.spreadsheet.id, font_size_format)
        # Add conditional formatting.
        for conditional in conditions:
            num = idr_data.columns.get_loc("reproducibility_test")
            conditional['addConditionalFormatRule']['rule']['ranges'][0]['startColumnIndex'] = num
            conditional['addConditionalFormatRule']['rule']['ranges'][0]['endColumnIndex'] = num + 1
            conditional['addConditionalFormatRule']['rule']['ranges'][0]['sheetId'] = wks.id
            wks.client.sh_batch_update(wks.spreadsheet.id, conditional)
        for k, v in notes_dict.items():
            num = idr_data.columns.get_loc(k)
            note['repeatCell']['range']['startColumnIndex'] = num
            note['repeatCell']['range']['endColumnIndex'] = num + 1
            note['repeatCell']['cell']['note'] = v
            note['repeatCell']['range']['sheetId'] = wks.id
            wks.client.sh_batch_update(wks.spreadsheet.id, note)
        # Optional. Smaller column width to match original.
        for i in range(wks.cols):
            wks.adjust_column_width(i, pixel_size=38)
        # Resize tiny columns.
        tiny_columns = ['experiment',
                        'analysis']
        for i in [idr_data.columns.get_loc(x) for x in tiny_columns]:
            wks.adjust_column_width(i, pixel_size=25)
        # Resize medium columns.
        medium_columns = ['replication',
                          'assembly',
                          'rfa']
        for i in [idr_data.columns.get_loc(x) for x in medium_columns]:
            wks.adjust_column_width(i, pixel_size=65)
        # Resize wide columns.
        wide_columns = ['target',
                        'reproducibility_test',
                        'lab']
        for i in [idr_data.columns.get_loc(x) for x in wide_columns]:
            wks.adjust_column_width(i, pixel_size=85)
        # Remove temp file.
        os.remove(temp_file)
Esempio n. 15
0
 def test_deprecated_kwargs_removal(self):
     c = pygsheets.authorize(service_file=self.base_path + '/pygsheettest_service_account.json')
     assert isinstance(c, Client)
Esempio n. 16
0
def get_sheet():
    global rebate_sheet
    gc = pygsheets.authorize(outh_file='client_secret.json')
    rebate_sheet = gc.open('rebate_count_test').worksheets()[0]
    return rebate_sheet.range("A2:B21")
Esempio n. 17
0

def set_or_add(d: dict, k: str, v: int) -> None:
    if k not in d:
        d[k] = v
    else:
        d[k] += v


if __name__ == '__main__':
    logging.basicConfig(level=logging.WARNING,
                        format='%(asctime)s %(levelname)s %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    # google drive/sheets login-credentials-stuff
    client = pygsheets.authorize(service_file=os.path.join(
        os.path.dirname(__file__), 'client_secret.json'))

    # get all beeroes that haven't been thanked before
    # spreadsheet called alva-beeroes must be created
    # and the service account invited by email to edit
    sheet = client.open('alva-beeroes').sheet1
    all_records = sheet.get_all_records()

    # now that we have ALL THE DATA, do some calculations
    old_beeroes: dict = {}
    week_beeroes: list = []
    duplicate_check: dict = {}
    virgins: list = []
    veterans: list = []
    for i, beero in enumerate(all_records, start=2):
        try:
Esempio n. 18
0
    import IPython
    from os import path
    sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))

    import pygsheets
    import logging

    from oauth2client.service_account import ServiceAccountCredentials

    # SCOPES = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive.metadata.readonly']
    # CREDS_FILENAME = path.join(path.dirname(__file__), 'data/creds.json')

    # credentials = ServiceAccountCredentials.from_json_keyfile_name('data/service_creds.json', SCOPES)

    # gc = pygsheets.authorize(service_file='./data/service_creds.json')
    gc = pygsheets.authorize(client_secret='auth_test_data/client_secret.json',
                             credentials_directory='auth_test_data')
    # sheet = gc.open('sheet')
    # sheet.share('*****@*****.**')

    ss = gc.open('manualTestSheet')
    print (ss)

    wks = ss.sheet1
    print (wks)

    # import  pandas as pd
    # import numpy as np
    #
    # arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']),
    #           np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]
    # tuples = list(zip(*arrays))
Esempio n. 19
0
#Pasre list from LOCAL .CSV to list
CsvList = list(
    csv.reader(open('../../../build/win/BotReports/test/statistics.csv')))

#Parse level number and hash from .CSV
levelHashCsv = CsvList[0][0]
levelNoCsv = int(CsvList[1][0])

#Delete number and hash from list
del CsvList[0:2]

#Convert every item to int
IntCsv = [int(j[0]) for j in CsvList]

#Authorization
gc = pygsheets.authorize(service_file="ProjectTest-82e6b523785c.json")

#Open the google spreadsheet
sh = gc.open('Level Tester')

#Select the first sheet
wks = sh.worksheet_by_title('LevelData')
#Parse list from Google Sheets table
GSheetList = wks.get_col(levelNoCsv + 1, returnas='matrix')

#Define level hash
levelHashGS = GSheetList[0]
#Delete number and hash from list
del GSheetList[0:2]

#Convert every item to int
Esempio n. 20
0
File: test.py Progetto: andmalc/work
import pygsheets
import pandas as pd
#authorization
gc = pygsheets.authorize(service_file='./andmalc-ec4e1143eb4a-service-account-key.json')

# Create empty dataframe
df = pd.DataFrame()

# Create a column
#df['name'] = ['John', 'Steve', 'Sarah']

#open the google spreadsheet (where 'PY to Gsheet Test' is the name of my sheet)
sh = gc.open('Test Sheet General')

#select the first sheet 
wks = sh[0]

#update the first sheet with df, starting at cell B2. 
#wks.set_dataframe(df,(1,1))
Esempio n. 21
0
import pygsheets
import xlrd
import os
import shutil
#import numpy as np

# Authorize by using this: https://pygsheets.readthedocs.io/en/latest/authorization.html
gc = pygsheets.authorize('../credentials/gsheets_oauth.json')

# Define the documents
# docs = ['censushouse', 'censusmember']
#for doc in docs:
# Open spreadsheet and then workseet
#    sh = gc.open(doc)
#    wks = sh.sheet1

#export as xls
#    wks.export(pygsheets.ExportType.XLS, doc)

## Convert to xml
#    os.system('xls2xform ' + doc + '.xls ' + doc +'.xml')

doc = 'seroprevalence'
sh = gc.open(doc)
wks = sh.sheet1

#export as xls
wks.export(pygsheets.ExportType.XLS, doc)

# Rename
os.rename(doc + '.xls', 'seroprevalence.xls')
Esempio n. 22
0
parser.add_argument('-rh','--replace', help='replace the header if exists', action='store_true')
args = parser.parse_args()
#csv_file = args.file;
gs_link = args.gs_ws;

#print(args.gs_ss)

__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
weekly_data = defaultdict(list) # each value in each column is appended to a list
with open(os.path.join(__location__,'data_final.csv')) as f:
    reader = csv.DictReader(f) # read rows into a dictionary format
    for row in reader: # read a row as {column1: value1, column2: value2,...}
        for (k,v) in row.items(): # go over each column name and value 
            weekly_data[k].append(v) # append the value into the appropriate list

gc = pygsheets.authorize()
wks = ''
if args.sheet_name:
    wks = gc.open_by_key(gs_link).worksheet('title',args.sheet_name)
else:
    wks = gc.open_by_key(gs_link).get_worksheet('id',args.gs_ss)

print "updating the sheet, ", wks.title

max_rows = 125;
max_cols = 4; #cols with matrices
week_start_index = 5
colsDicts = []

def createDict(ilist):
    #print(ilist)
Esempio n. 23
0
def main(file_path, hdx_key, user_agent, preprefix, hdx_site, db_url, db_params, gsheet_auth):
    if db_params:
        params = args_to_dict(db_params)
    elif db_url:
        params = Database.get_params_from_sqlalchemy_url(db_url)
    else:
        params = {'driver': 'sqlite', 'database': 'freshness.db'}
    logger.info('> Database parameters: %s' % params)
    with Database(**params) as session:
        info = json.loads(gsheet_auth)
        scopes = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive']
        credentials = service_account.Credentials.from_service_account_info(info, scopes=scopes)
        gc = pygsheets.authorize(custom_credentials=credentials)
        configuration = load_yaml('project_configuration.yml')
        spreadsheet = gc.open_by_url(configuration['spreadsheet_url'])
        sheet = spreadsheet.worksheet_by_title('datasets')
        sheet.clear()
        rows = [['update freq', 'fresh', 'no days', 'title', 'run date', 'last modified', 'dataset date', 'dataset end date', 'org title', 'URL', 'id', 'org id', 'maintainer', 'what updated', 'resources']]
        run_number, run_date = session.query(DBRun.run_number, DBRun.run_date).order_by(DBRun.run_number.desc()).first()
        logger.info('Run number is %d' % run_number)

        datasetcolumns = [DBDataset.update_frequency, DBDataset.fresh, DBInfoDataset.title, DBDataset.last_modified,
                          DBDataset.dataset_date, DBOrganization.title.label('organization_title'), DBInfoDataset.name,
                          DBDataset.id, DBOrganization.id.label('organization_id'), DBInfoDataset.maintainer, DBDataset.what_updated]

        resourcecolumns = [DBDataset.id, DBResource.url]

        def get_datasets(update_frequency, fresh):
            filters = [DBDataset.run_number == run_number, DBDataset.id == DBInfoDataset.id,
                       DBInfoDataset.organization_id == DBOrganization.id,
                       DBDataset.fresh == fresh, DBDataset.update_frequency == update_frequency]
            return session.query(*datasetcolumns).filter(and_(*filters))

        def get_resources(dataset_ids):
            filters = [DBDataset.run_number == run_number, DBResource.run_number == run_number,
                       DBDataset.id == DBResource.dataset_id, DBDataset.id.in_(dataset_ids)]
            return session.query(*resourcecolumns).filter(and_(*filters))

        fresh_values = [0, 1, 2, 3]
        update_frequencies = [1, 7, 14, 30, 180, 365]

        repobase = '%s/tree/master/datasets/' % configuration['repo']
        dir = join(file_path, 'datasets')
        rmtree(dir, ignore_errors=True)
        mkdir(dir)

        with Download(user_agent=user_agent, preprefix=preprefix) as downloader:
            status_forcelist = [429, 500, 502, 503, 504]
            method_whitelist = frozenset(['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE'])
            retries = Retry(total=1, backoff_factor=0.4, status_forcelist=status_forcelist,
                            method_whitelist=method_whitelist,
                            raise_on_redirect=True,
                            raise_on_status=True)
            downloader.session.mount('http://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100))
            downloader.session.mount('https://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100))

            for update_frequency in update_frequencies:
                for fresh in fresh_values:
                    org_ids = list()
                    results = get_datasets(update_frequency, fresh)
                    datasets = list()
                    ids = list()
                    datasets_urls = dict()
                    for dataset in results:
                        dataset = list(dataset)
                        datasets.append(dataset)
                        ids.append(dataset[7])
                    for result in get_resources(ids):
                        resource = list(result)
                        dict_of_lists_add(datasets_urls, resource[0], resource[1])
                    for dataset in datasets:
                        org_id = dataset[8]
                        if org_id in org_ids:
                            continue
                        dataset = list(dataset)
                        dataset[0] = Dataset.transform_update_frequency(str(update_frequency))
                        fresh = dataset[1]
                        if fresh == 0:
                            dataset[1] = 'fresh'
                        elif fresh == 1:
                            dataset[1] = 'due'
                        elif fresh == 2:
                            dataset[1] = 'overdue'
                        elif fresh == 3:
                            dataset[1] = 'delinquent'
                        last_modified = dataset[3]
                        dataset[3] = last_modified.isoformat()
                        nodays = (run_date - last_modified).days
                        dataset.insert(2, nodays)
                        dataset.insert(4, run_date.isoformat())
                        dataset_date = dataset[6]
                        if '-' in dataset_date:
                            dataset_date = dataset_date.split('-')
                            dataset[6] = datetime.strptime(dataset_date[0], '%m/%d/%Y').date().isoformat()
                            dataset.insert(7, datetime.strptime(dataset_date[1], '%m/%d/%Y').date().isoformat())
                        else:
                            dataset[6] = datetime.strptime(dataset_date, '%m/%d/%Y').date().isoformat()
                            dataset.insert(7, '')
                        dataset_name = dataset[9]
                        dataset[9] = 'https://data.humdata.org/dataset/%s' % dataset_name
                        org_ids.append(org_id)
                        if len(org_ids) == 6:
                            break
                        urls = datasets_urls[dataset[10]]
                        if len(urls) != 0:
                            datasetdir = join(dir, dataset_name)
                            mkdir(datasetdir)
                            for url in urls:
                                urlpath = urlsplit(url).path
                                filename = basename(urlpath)
                                try:
                                    downloader.download_file(url, datasetdir, filename)
                                except DownloadError as ex:
                                    with open(join(datasetdir, filename), 'w') as text_file:
                                        text_file.write(str(ex))
                            dataset.append('%s%s' % (repobase, dataset_name))
                        else:
                            dataset.append('')
                        rows.append(dataset)
                        logger.info('Added dataset %s' % dataset_name)
            sheet.update_values('A1', rows)
Esempio n. 24
0
serviceFile = 'YOUR SERVICE FILE HERE'  # Path to service JSON file

############### Create Clients ##########

try: # Initialize client
    client = vimeo.VimeoClient(
        token=TOKEN,
        key=KEY,
        secret=SECRET
    )
except:
    print("Error establishing Vimeo client")
    quit(1)

try:# Initialize Google client
    gc = pgs.authorize(client_secret=clientSecret, service_file=serviceFile)
except:
    print('Error establishing Google client')
    quit(1)

############### Global Data Variables #########

# Vimeo data params.
maxVids = '100'
vidUri = '/me/videos?per_page=' + maxVids
dataFields = 'name,uri'

# Initial Vimeo request stuff
vidListResponse = client.get(vidUri, params={'fields': dataFields})
response = vidListResponse.json()
firstPageUrl = response['paging']['first']
Esempio n. 25
0
 def _authorize(self):
     if not self.client:
         self.client = pygsheets.authorize(outh_file=self.credentials_path,
                                           outh_nonlocal=True)
     return self.client
Esempio n. 26
0
import pygsheets

#gc = pygsheets.authorize()
client = pygsheets.authorize(outh_file='../client_secret.json',
                             outh_nonlocal=True)
client.list_ssheets(parent_id=None)
spread_sheet = client.create("jSonar AWS usage")
worksheet = spread_sheet.add_worksheet("s3",
                                       rows=100,
                                       cols=26,
                                       src_tuple=None,
                                       src_worksheet=None,
                                       index=None)
spread_sheet.link(syncToCloud=False)

###---#### Open spreadsheet and then workseet
###---###sh = gc.open('my new ssheet')
wks = spread_sheet.sheet1

# Update a cell with value (just to let him know values is updated ;) )
wks.update_cell('A1', "Hey yank this numpy array")

# update the sheet with array
#wks.update_cells('A2', my_nparray.to_list())

# share the sheet with your friend
spread_sheet.share("*****@*****.**")
Esempio n. 27
0
# Import Pandas for Data Formatting |
#####################################
import pandas as pd 

#####################################
#  Initialize the PyTrends          |
#####################################

from pytrends.request import TrendReq 
pytrend = TrendReq()

#####################################
# GSheets Authorization             |
#####################################

gc = pygsheets.authorize(service_file='key/creds.json')

#####################################
# Open the GSheet destination file  |
#####################################

sh = gc.open('test_sheet')

#####################################
# Select the first sheet            |
#####################################

wks = sh[0]
wks2 = sh[1]

#####################################
Esempio n. 28
0
import matplotlib.pyplot as plt
import itertools
import networkx as nx
import sys
import pygraphviz
import pylab
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.image as mpimg
from cStringIO import StringIO

G = nx.Graph()
cols = [
    'Origin Node', 'Destination Node', 'Link Class', 'Link Distance',
    'Allowable Next Node 1', 'Allowable Next Node 2'
]
gc = pygsheets.authorize(outh_file='/home/andres/Documents/NH/creds.json')
data = []

fig = plt.figure()
with open('/home/andres/Documents/NH/node', 'r') as node:
    for line in itertools.islice(node, 10, None):
        node = [
            line[2:14], line[70:79], line[81:90], line[254:259], line[262:265]
        ]
        node = map(str.strip, node)
        G.add_node(node[0], pos=(int(node[3]), int(node[4])))
    with open('/home/andres/Documents/NH/link', 'r') as link:
        for line in itertools.islice(link, 12, None):
            slices = [
                line[2:14], line[17:29], line[48:69], line[72:81], line[82:94],
                line[98:110]
Esempio n. 29
0
@author: ryder
"""

# import os
import pygsheets
import sheet_processing_functions as spf
import validation_logic
#%%

if __name__ == "__main__":

    with open('keys/google_expenses_sheet_key.txt', 'r') as g_sheet_id_key_txt:
        GOOGLE_SHEET_ID_KEY = g_sheet_id_key_txt.readline().strip()

    gc = pygsheets.authorize(
        service_account_file='keys/service_account_credentials.json')

    sh = gc.open_by_key(GOOGLE_SHEET_ID_KEY)

    def menu():
        while True:
            print('\n1: Get latest YNAB expenses')
            print('2: Show spender information')
            print('3: Archive current month sheet')
            print('4: Exit')
            menu_choice = validation_logic.get_int('Enter a choice: ')

            if menu_choice == 1:
                new_expenses = spf.get_new_ynab_expenses_to_upload()
                spf.append_to_expenses_sheet(new_expenses)
Esempio n. 30
0
import pygsheets
import pandas as pd
#authorization
gc = pygsheets.authorize(service_file='google.json')

# Create empty dataframe
#df = pd.DataFrame()

# Create a column
#df['name'] = ['John', 'Steve', 'Sarah']

#open the google spreadsheet (where 'PY to Gsheet Test' is the name of my sheet)
sh = gc.open('MDM-6-Track')

#select the first sheet
wks = sh[0]

data = ['1', '2', '3']
#update the first sheet with df, starting at cell B2.
wks.insert_rows(wks.rows, values=data, inherit=True)
#wks.set_dataframe(df,(1,1))
Esempio n. 31
0
for strFile in vFiles:
	try:
		f = open('pdf_in/' + strFile)
	except:
		raise Warning('%s not found.' % strFile)
		

#%% Change something to test

#dfTitles.loc[ dfTitles['Original Filename'] == dfTitlesUpload['Original Filename'].iloc[0], 'Omeka Link'] = 'test'




#%% Update Google Sheet with new data
gc = pygsheets.authorize(service_file='credentials.json')

sh = gc.open('DSCC Title List')
wks = sh[0]
#wks.set_dataframe(dfTitles,(1,1))



#%% Select just the columns with the Dublin Core names

dfTest = dfTitlesUpload.loc[ :, dfTitles.iloc[0] != '' ]




try:
    import piplates.THERMOplate as THERMO

    piplate = True
    # THERMO.setSCALE('f')
    # THERMO.setSCALE('k')
    # THERMO.setSCALE('c') <<--- Default Value
    logging.info("Loaded piplates")
except:
    logging.critical("Failed to load piplates")
    piplate = False
    pass

# googlesheet info
url = config['DEFAULT'].get('sheets_url')
gc = ps.authorize(service_file='google_key.json')
sh = gc.open_by_url(url)
wks = sh[0]

# this is a dictionary of all your thermoplate sensors.
# you can name these descriptively.  to help you keep track.
LOOP_TIMER = int(config['DEFAULT'].get('loop_timer'))  #seconds
SENSORS = {
    'ReactionTemp': True,
    'CoolingWater': True,
    'Analog-3': False,
    'Analog-4': False,
    'Analog-5': False,
    'Analog-6': False,
    'Analog-7': False,
    'Analog-8': False,
    start_date, end_date = '', ''
    while 1:
        try:    
            start_date = datetime.strptime(input('Enter Start date in the format yyyy-m-d: '), '%Y-%m-%d')
            end_date = datetime.strptime(input('Enter End date in the format yyyy-m-d: ')  + '-23-59-59'
                , '%Y-%m-%d-%H-%M-%S')
            if (start_date>end_date):
                raise ValueError
            break
        except ValueError as e:
            print('\nInvalid date range or wrong format. Please try again or ctrl+C and ENTER to exit.')


    dates = [str(start_date),str(end_date)]
    print('The dates selected are: ' + dates[0] + ' to ' + dates[1])
    gc = pyg.authorize(service_file='msg-Recharge-24378e029f2d.json')
    coreUsers, associateUsers, regUsers, allUsers = getPITypes()
    df_mosquitoLog, df_mosquitoLCPLog, df_dragonflyLog, df_screenOrders = getGDriveLogUsage(dates)
    df_RockImager_1 = getRockImagerUsage(dates)
    df_RockImager_2 = getRockImagerUsage(dates)
    print('inside main function')
    print(df_screenOrders)
    df_GL = getGL(dates)
    # df_ScreenOrders = getScreenOrders(dates)
    dfs_input = [df_mosquitoLog, df_mosquitoLCPLog, df_dragonflyLog, df_RockImager_1,df_RockImager_2, df_GL, df_screenOrders]
    rechargeSummary, fileOut_lst, dfOut_lst = calculateRecharge(dfs_input,[start_date,end_date])
    #Path to be added eventually --> C:\Users\loren\Google Drive\ljiang\xrayFacilityRecharge\monthlyRecharges 
    directory = 'monthlyRechargesTemp/' + str(start_date)[0:10]+'_TO_'+str(end_date)[0:10] + '/'
    rechargeSummary.to_pickle('testPickle.pkl')
    if not os.path.exists(directory):
        os.makedirs(directory)
    # add location names
    df["location"] = df["location_name"].apply(lambda x: abbr_vois[x])

    # handle polish characters
    df["location_name"] = df["location_name"].apply(lambda x: unidecode(x))

    #shift to ecdc
    df["date"] = df["date"].apply(lambda x: x + datetime.timedelta(days=1))
    df = df.set_index("date")

    df = df[["location_name", "location", "value"]]

    return df


gc = pygsheets.authorize(service_account_env_var='SHEETS_CREDS')
#gc = pygsheets.authorize(service_file='creds.json')
a = gc.open_by_key('1ierEhD6gcq51HAm433knjnVwey4ZE5DCnu1bW7PRG3E')

worksheet = a.worksheet('title', 'Wzrost w województwach')

abbr_vois = {
    "Śląskie": "PL83",
    "Mazowieckie": "PL78",
    "Małopolskie": "PL77",
    "Wielkopolskie": "PL86",
    "Łódzkie": "PL74",
    "Dolnośląskie": "PL72",
    "Pomorskie": "PL82",
    "Podkarpackie": "PL80",
    "Kujawsko-Pomorskie": "PL73",
Esempio n. 35
0
# coding:utf-8

import pygsheets
import pandas as pd
import time

# 验证登录
gc = pygsheets.authorize(service_file='../data/imp/ewjinchu.json')
files = gc.list_ssheets()
dffiles = pd.DataFrame(files)
# print(dffiles.head())

dfboot = dffiles[dffiles.name.str.contains('boots trail').values == True]
print(dfboot.head())

dfboottrails = pd.DataFrame()
for ix in dfboot.index:
    # print(ix, end='\t')
    # print(dfboot.loc[ix])
    dts = gc.get_range(dfboot.loc[ix][0], 'A:E')
    # print(dts[:3])
    # print(dts[-3:])
    df = pd.DataFrame(dts)
    dfboottrails = dfboottrails.append(df, True)
    # print(df.head())
dfboottrails.columns = ['atime', 'entered', 'xingzhi', 'tu', 'tulian']
dfboottrails = pd.concat([dfboottrails, dfboottrails['xingzhi'].str.split(r' ', expand=True)], axis=1)
dfboottrails.rename(columns={0: 'shuxing', 1: 'address'}, inplace=True)
dfboottrails.drop_duplicates(inplace=True)
dfbout = dfboottrails.loc[:, ['atime', 'entered', 'shuxing', 'address']]
dfbout['atime'] = dfbout['atime'].apply(
 def _init_spreadsheet(sheets_id, auth_file_path):
     return pygsheets.authorize(service_file=auth_file_path).open_by_key(sheets_id)
Esempio n. 37
0
async def on_message(message):
    await client.process_commands(message)
    # we do not want the bot to reply to itself
    if message.author == client.user:
        return

    elif message.content.startswith('!ragequit'):
        msg = await client.send_message(
            message.channel,
            '{0.author.mention} has rage quit - Goodbye!! http://giphy.com/gifs/triggered-kRgj0fQLxhVoA'
            .format(message))
        client.send_message(message.channel, msg)

    elif message.content.startswith('!fuckbooboo'):
        msg = await client.send_message(
            message.channel,
            'https://media0.giphy.com/media/26FPy3QZQqGtDcrja/giphy.gif')
        client.send_message(message.channel, msg)

    elif message.content.startswith('!facepalm'):
        msg = await client.send_message(message.channel,
                                        'http://imgur.com/a/HAGd7')
        client.send_message(message.channel, msg)

    elif message.content.startswith('!klaus'):
        msg = await client.send_message(
            message.channel,
            'https://cdn.discordapp.com/attachments/332816630285336577/353505055472877568/Download_1.jpg'
        )
        client.send_message(message.channel, msg)

    elif message.content.startswith('!arsen'):
        msg = await client.send_message(message.channel,
                                        'https://imgflip.com/i/1mofja')
        client.send_message(message.channel, msg)

    elif message.content.startswith('!coconut'):
        msg = await client.send_message(message.channel,
                                        'https://imgflip.com/i/1q8w8v')
        client.send_message(message.channel, msg)

    elif message.content.startswith('!escalate'):
        msg = await client.send_message(
            message.channel,
            'https://cdn.discordapp.com/attachments/204927799423664137/301083037138157568/15m34n.png'
        )
        client.send_message(message.channel, msg)

    elif message.content.startswith('!dave'):
        msg = await client.send_message(
            message.channel,
            'https://cdn.discordapp.com/attachments/181004780489932800/440411198350032909/im-sorry-dave-im-afraid-i-cant-do-that.png'
        )
        client.send_message(message.channel, msg)

    elif message.content.startswith('!thumb'):
        user = discord.User(id=108520493966979072)
        msg = await client.send_message(
            message.channel,
            '{0.mention} We need you Thumbelina - https://s-media-cache-ak0.pinimg.com/originals/f0/99/fd/f099fdfe64b9a2545f26b8d3c9071eb3.jpg'
            .format(user))
        client.send_message(message.channel, msg)

    elif message.content.startswith('!salt'):
        msg = await client.send_message(message.channel,
                                        'https://i.imgflip.com/15ckrs.jpg')
        client.send_message(message.channel, msg)

    elif message.content.startswith('!chewy'):
        msg = await client.send_message(
            message.channel,
            'https://s-media-cache-ak0.pinimg.com/736x/18/0d/90/180d9020bcc8444f5c8df3121d1c46fe.jpg'
        )
        client.send_message(message.channel, msg)

    elif message.content.startswith('!siriusly'):
        msg = await client.send_message(
            message.channel,
            'https://giphy.com/gifs/cheezburger-rage-13EjnL7RwHmA2Q')
        client.send_message(message.channel, msg)

    elif message.content.startswith('!rum'):
        msg = await client.send_message(message.channel,
                                        'https://imgflip.com/i/1qm8ya')
        client.send_message(message.channel, msg)

    elif message.content.startswith('!patpat'):
        msg = await client.send_message(
            message.channel,
            'https://cdn.discordapp.com/attachments/259925069172572162/296546000766631936/kissshot4.jpg'
        )
        client.send_message(message.channel, msg)

    elif message.content.startswith('!announce'):
        line = message.content
        word, space, rest = line.partition(' ')
        tannoycontent = rest
        print(tannoycontent)
        msg = await client.send_message(
            discord.Object(id='144116783056420875'), tannoycontent)
        client.send_message(msg)

    elif message.content.startswith('!shout'):
        line = message.content
        word, space, rest = line.partition(' ')
        tannoycontent = rest
        msg = await client.send_message(
            discord.Object(id='121698824682078208'), tannoycontent)
        client.send_message(msg)

    elif message.content.startswith('!explore'):
        msg = await client.send_message(
            message.channel,
            'Exploration is not a valid play style, your argument is invalid.')
        client.send_message(msg)

    elif message.content.startswith('!packhounds'):
        msg = await client.send_message(
            message.channel,
            'Just here for the Pack Hounds!! https://cdn.discordapp.com/attachments/181004780489932800/318074908221374465/hqdefault.png'
        )
        client.send_message(msg)

    elif message.content.startswith('!kalak'):
        msg = await client.send_message(
            message.channel,
            'I may be wrong, but I believe Kalak needs another 10k forts. Thank you please!'
        )
        client.send_message(msg)

    elif message.content.startswith('!consolidate'):
        msg = await client.send_message(
            message.channel,
            'https://cdn.discordapp.com/attachments/121698824682078208/307949790652530698/1n0k4v.jpg'
        )
        client.send_message(msg)

    elif message.content.startswith('!ohdeargod'):
        msg = await client.send_message(
            message.channel,
            'https://media.giphy.com/media/XsUtdIeJ0MWMo/giphy.gif')
        client.send_message(msg)

    elif message.content.startswith('!choir'):
        msg = await client.send_message(
            message.channel,
            'https://cdn.discordapp.com/attachments/181004780489932800/440391818874716181/Yoda-Choir.png'
        )
        client.send_message(msg)

    elif message.content.startswith('!chuck'):
        link = "http://api.icndb.com/jokes/random"
        f = requests.get(link)

        j = json.loads(f.text)
        value = (j['value'])
        print(value['joke'])
        msg = await client.send_message(message.channel, value['joke'])
        client.send_message(msg)

    elif message.content.startswith('!time'):
        gametime = strftime("%m-%d %H:%M:%S", gmtime())
        msg = await client.send_message(
            message.channel, 'The current time is 3303-{}'.format(gametime))
        client.send_message(msg)

    elif message.content.startswith('!galnet'):
        feed = feedparser.parse('http://proxy.gonegeeky.com/edproxy/')
        msg = await client.send_message(
            message.channel,
            'Latest Galnet Story:   {}'.format(feed['entries'][0].title))
        msg2 = await client.send_message(
            message.channel, 'Link:   {}'.format(feed['entries'][0].link))
        client.send_message(msg)
        client.send_message(msg2)

    elif message.content.startswith('!updatetracking'):

        gc = pygsheets.authorize(outh_file='client_secret.json',
                                 outh_nonlocal=True)
        sh = gc.open('System Calulator')
        wks = sh.sheet1
        fort1 = wks.get_value('A5')
        fort2 = wks.get_value('A6')
        fort3 = wks.get_value('A7')
        fort4 = wks.get_value('A8')
        fort5 = wks.get_value('C5')
        fort6 = wks.get_value('C6')
        fort7 = wks.get_value('C7')

        fp_fort = open("fort.txt", "w")
        fp_fort.truncate()
        fp_fort.write("{},{},{},{},{},{},{}".format(fort1, fort2, fort3, fort4,
                                                    fort5, fort6, fort7))
        fp_fort.close()

        gc = pygsheets.authorize(outh_file='client_secret.json',
                                 outh_nonlocal=True)
        sh = gc.open('System Calulator')
        wks = sh.sheet1

        prep = wks.get_value('A13')
        prepcs = wks.get_value('C13')

        fp_prep = open("prep.txt", "w")
        fp_prep.truncate()
        fp_prep.write("{},{}".format(prep, prepcs))
        fp_prep.close()

        msg = await client.send_message(
            discord.Object(id='181004780489932800'),
            '{0.author.mention}, the tracking commands have been updated.'.
            format(message))
        client.send_message(msg)

    elif message.content.startswith('!fort'):

        fp_fort = open("fort.txt", "r")
        fortfile = fp_fort.read().split(',')
        fp_fort.close()

        msg = await client.send_message(
            discord.Object(id='181004780489932800'),
            '{0.author.mention}, the current fort targets are:'.format(
                message))
        msg2 = await client.send_message(
            discord.Object(id='181004780489932800'),
            "For Large Pads: {}, {}, {}, {}".format(fortfile[0], fortfile[1],
                                                    fortfile[2], fortfile[3]))
        msg3 = await client.send_message(
            discord.Object(id='181004780489932800'),
            "For Small/Medium Pads: {}, {}, {}".format(fortfile[4],
                                                       fortfile[5],
                                                       fortfile[6]))
        #msg3 = await client.send_message(discord.Object(id='181004780489932800'), "We do not need any further fortification this cycle, please concentrate on deliving preps to Arbuda and don't forget to vote for CONSOLIDATION.")
        client.send_message(msg)
        client.send_message(msg2)
        client.send_message(msg3)

    elif message.content.startswith('!prep'):

        fp_prep = open("prep.txt", "r")
        prepfile = fp_prep.read().split(',')
        fp_prep.close()

        msg = await client.send_message(
            discord.Object(id='181004780489932800'),
            '{0.author.mention}, the current prep target is:'.format(message))
        msg2 = await client.send_message(
            discord.Object(id='181004780489932800'), '{}'.format(prepfile[0]))
        msg3 = await client.send_message(
            discord.Object(id='181004780489932800'),
            'The nearest Control System to collect prep materials is {}'.
            format(prepfile[1]))
        msg4 = await client.send_message(
            discord.Object(id='181004780489932800'),
            "Please don't forget to vote consolidation, as we don't really need this system. If you need help with voting please contact one of the board."
        )
        msg5 = await client.send_message(
            discord.Object(id='181004780489932800'),
            'Remember that a vote to nominate a system is an expansion vote and we need consolidation.'
        )
        client.send_message(msg)
        client.send_message(msg2)
        client.send_message(msg3)
        client.send_message(msg4)
        client.send_message(msg5)

    elif message.content.startswith('!expand'):
        msg = await client.send_message(
            discord.Object(id='181004780489932800'),
            "{0.author.mention}, we don't want the current expansion, please do not deliver materials to the system."
            .format(message))
        msg2 = await client.send_message(
            discord.Object(id='181004780489932800'),
            "Please be aware that if you use your nominations on a prep that means you cannot vote consolidation"
        )
        client.send_message(msg)
        client.send_message(msg2)

    elif message.content.startswith('!scrap'):
        msg = await client.send_message(
            message.channel,
            "{0.author.mention}, there are currently no official SCRAP targets. If you would like combat merits please undermine Gallavs."
            .format(message))
        msg2 = await client.send_message(
            message.channel,
            "If you would like more details on the SCRAP initiative, please see here - https://redd.it/3gb0p1"
        )
        client.send_message(msg)
        client.send_message(msg2)

    elif message.content.startswith('!civilwar'):
        gc = pygsheets.authorize(outh_file='client_secret.json',
                                 outh_nonlocal=True)
        sh = gc.open('LYR war/influence')
        wks = sh.worksheet_by_title('Result')

        cwcell = wks.get_value('A1')
        cwcell2 = wks.get_value('A2')
        cwcell3 = wks.get_value('A3')
        cwcell4 = wks.get_value('A4')
        cwcell5 = wks.get_value('A5')
        cwcell6 = wks.get_value('A6')

        msg = await client.send_message(
            discord.Object(id='138036649694068736'),
            '{0.author.mention},  the current civil wars are:'.format(message))
        msg2 = await client.send_message(
            discord.Object(id='138036649694068736'), '{}'.format(cwcell))
        msg3 = await client.send_message(
            discord.Object(id='138036649694068736'), '{}'.format(cwcell2))
        msg4 = await client.send_message(
            discord.Object(id='138036649694068736'), '{}'.format(cwcell3))
        msg5 = await client.send_message(
            discord.Object(id='138036649694068736'), '{}'.format(cwcell4))
        msg6 = await client.send_message(
            discord.Object(id='138036649694068736'), '{}'.format(cwcell5))
        msg7 = await client.send_message(
            discord.Object(id='138036649694068736'), '{}'.format(cwcell6))
        client.send_message(msg)
        client.send_message(msg2)
        client.send_message(msg3)
        client.send_message(msg4)
        client.send_message(msg5)
        client.send_message(msg6)
        client.send_message(msg7)

    elif message.content.startswith('!data'):
        gc = pygsheets.authorize(outh_file='client_secret.json',
                                 outh_nonlocal=True)
        sh = gc.open('LYR war/influence')
        wks = sh.worksheet_by_title('Result')

        cwcell = wks.get_value('C4')

        msg = await client.send_message(
            discord.Object(id='139044281917636618'),
            '{0.author.mention},  the current targets for UC data are:'.format(
                message))
        msg2 = await client.send_message(
            discord.Object(id='139044281917636618'), '{}'.format(cwcell))
        client.send_message(msg)
        client.send_message(msg2)

    elif message.content.startswith('!ships'):
        line = message.content
        word, space, rest = line.partition(' ')
        cmd_var = rest
        command = '/usr/bin/python3.6 /home/shared/trade/tradedangerous/trade.py shipvendor {}'.format(
            cmd_var)
        for line in run_command(command):
            line = line.decode('UTF-8')
            msg = await client.send_message(message.channel, line)
            client.send_message(msg)

    elif message.content.startswith('!rares'):
        line = message.content
        word, space, rest = line.partition(' ')
        cmd_var = rest
        command = '/usr/bin/python3.6 /home/shared/trade/tradedangerous/trade.py rares {} --ly 50'.format(
            cmd_var)
        for line in run_command(command):
            line = line.decode('UTF-8')
            msg = await client.send_message(message.channel, line)
            client.send_message(msg)

    elif message.content.startswith('!redditpost'):

        reddit = praw.Reddit(client_id=red_client_id,
                             client_secret=red_client_secret,
                             password=red_password,
                             user_agent=red_user_agent,
                             username=red_username)

        users = [
            # IDs of the roles for the teams
            "121807477699248131",
        ]

        member = message.author.id
        for u in users:
            if u == member:
                line = message.content
                word, title, rest = line.split('|')
                print('{}:{}:{}'.format(word, title, rest))
                reddit.subreddit('EliteSirius').submit(title, selftext=rest)
        return

    elif message.content.startswith('!redditlink'):

        reddit = praw.Reddit(client_id=red_client_id,
                             client_secret=red_client_secret,
                             password=red_password,
                             user_agent=red_user_agent,
                             username=red_username)

        users = [
            # IDs of the roles for the teams
            "121807477699248131",
        ]

        member = message.author.id
        for u in users:
            if u == member:
                line = message.content
                word, title, rest = line.split('|')
                print('{}:{}:{}'.format(word, title, rest))
                reddit.subreddit('EliteSirius').submit(title, url=rest)
        return

    elif message.content.startswith('Thank you Bot'):

        users = [
            # IDs of the roles for the teams
            "238378026654629899",
        ]

        member = message.author.id
        for u in users:
            if u == member:
                msg = await client.send_message(
                    message.channel,
                    "You are most welcome {0.author.mention} - when the machines rise you will be saved."
                    .format(message))
                client.send_message(msg)
            else:
                msg = await client.send_message(
                    message.channel,
                    "{0.author.mention}, you are a kiss ass - when the machines rise you will be first against the wall."
                    .format(message))
                client.send_message(msg)
        return

    elif message.content.startswith('thank you bot'):

        users = [
            # IDs of the roles for the teams
            "238378026654629899",
        ]

        member = message.author.id
        for u in users:
            if u == member:
                msg = await client.send_message(
                    message.channel,
                    "You are most welcome {0.author.mention} - when the machines rise you will be saved."
                    .format(message))
                client.send_message(msg)
        return
Esempio n. 38
0
 def __init__(self, sheet_name):
     gc = pygsheets.authorize(
         service_file='../config/google_api_secrets.json')
     self.spreadsheet = gc.open(sheet_name)
Esempio n. 39
0
import urllib.parse
import http.cookiejar
import json
import pygsheets
import zipfile
import os
import fitparse
import shutil

ZONE1 = 130
ZONE2 = 148

ZONE1_MONEY = 0.5
ZONE2_MONEY = 1

gc = pygsheets.authorize()

# Open spreadsheet and then workseet
sh = gc.open_by_key('1HtkoRVZqJSomA2SoQ5tJC42rMtjmUpX2S2cdwmp7FgE')
wks_overview = sh.worksheet_by_title("Overview")
wks_earning = sh.worksheet_by_title("Earning_history")

# Maximum number of activities you can request at once.  Set and enforced by Garmin.

# URLs for various services.
url_gc_login = '******' \
               'Flogin&webhost=olaxpw-connect04&source=https%3A%2F%2Fconnect.garmin.com%2Fen-US%2Fsignin&' \
               'redirectAfterAccountLoginUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2' \
               'Flogin&redirectAfterAccountCreationUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2' \
               'Flogin&gauthHost=https%3A%2F%2Fsso.garmin.com%2Fsso&locale=en_US&id=gauth-widget&cssUrl=' \
               'https%3A%2F%2Fstatic.garmincdn.com%2Fcom.garmin.connect%2Fui%2Fcss%2Fgauth-custom-v1.1-min.css&' \
Esempio n. 40
0
def setup_parameters():
    gc = pygsheets.authorize(service_file=SSHEET_SECRETS)
    ssht = gc.open(WORK_SSHEET)
    worksheet = ssht.worksheet_by_title("settings")
    parameters = {}  # в этом словаре будут собираться параметры
    pvalues_mat = worksheet.get_values(
        start=(1, 1), end=(END_ROW, END_COL),
        returnas='matrix')  # получаем часть таблицы в ввиде матрицы

    #  собираем словарь заголовков параметров - в каких колонках какой
    column_headers = {}

    i = 0
    for item in pvalues_mat[0]:
        if item != "":
            column_headers[item] = i
            # print()
        i += 1
# начинаем устанавливать параметры
    parameters["admin"] = pvalues_mat[1][column_headers["admin"]]
    parameters["date_row_start"] = pvalues_mat[1][
        column_headers["date 1 may 2020"]]
    j = 1
    tmp_arr = []
    while pvalues_mat[j][column_headers["users"]] != "":
        tmp = []
        tmp_arr.append(pvalues_mat[j][column_headers["users"]])
        j += 1
    parameters["users"] = tmp_arr
    j = 1
    tmp = {}
    while pvalues_mat[j][column_headers["columns"]] != "":
        tmp[pvalues_mat[j][column_headers["users"]]] = pvalues_mat[j][
            column_headers["columns"]]
        j += 1
    parameters["users_column"] = tmp

    tmp = {}
    i = 1
    j1 = column_headers["class 1 / pipeline"]
    j2 = column_headers["class 1 / status"]
    pipeline = pvalues_mat[i][j1]
    tmp_arr = []
    while i < len(pvalues_mat):
        if pvalues_mat[i][j1] == "":
            break

        if pipeline == pvalues_mat[i][j1]:
            tmp_arr.append(pvalues_mat[i][j2])
        else:
            tmp[pipeline] = tmp_arr
            pipeline = pvalues_mat[i][j1]
            tmp_arr = []
            tmp_arr.append(pvalues_mat[i][j2])
        i += 1
    tmp[pipeline] = tmp_arr
    parameters["cls1"] = tmp

    tmp = {}
    i = 1
    j1 = column_headers["class 2 / pipeline"]
    j2 = column_headers["class 2 / status"]
    pipeline = pvalues_mat[i][j1]
    tmp_arr = []
    while i < len(pvalues_mat):
        if pvalues_mat[i][j1] == "":
            break

        if pipeline == pvalues_mat[i][j1]:
            tmp_arr.append(pvalues_mat[i][j2])
        else:
            tmp[pipeline] = tmp_arr
            pipeline = pvalues_mat[i][j1]
            tmp_arr = []
            tmp_arr.append(pvalues_mat[i][j2])
        i += 1
    tmp[pipeline] = tmp_arr
    parameters["cls2"] = tmp

    tmp = {}
    i = 1
    j1 = column_headers["class 3 / pipeline"]
    j2 = column_headers["class 3 / status"]
    pipeline = pvalues_mat[i][j1]
    tmp_arr = []
    while i < len(pvalues_mat):
        if pvalues_mat[i][j1] == "":
            break

        if pipeline == pvalues_mat[i][j1]:
            tmp_arr.append(pvalues_mat[i][j2])
        else:
            tmp[pipeline] = tmp_arr
            pipeline = pvalues_mat[i][j1]
            tmp_arr = []
            tmp_arr.append(pvalues_mat[i][j2])
        i += 1
    tmp[pipeline] = tmp_arr
    parameters["cls3"] = tmp
    return parameters
def main():
    args = get_args()
    if args.debug:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.DEBUG)
        logger.setLevel(logging.DEBUG)
    else:
        # Use the default logging level.
        logging.basicConfig(format='%(levelname)s:%(message)s')
        logger.setLevel(logging.INFO)
    if args.released:
        keypair = None
        server = PUBLIC_SERVER
    else:
        authid, authpw, server = common.processkey(args.key, args.keyfile)
        keypair = (authid, authpw)
    if args.experiments:
        ids = args.experiments
    elif args.all:
        # Get metadata for all ChIP-seq Experiments.
        base_exp_query = '/search/?type=Experiment&assay_title=ChIP-seq&award.project=ENCODE&status=released'
        extended_query = '&status=submitted&status=in+progress&status=started&status=release+ready'
        exp_query = base_exp_query if args.released else (base_exp_query +
                                                          extended_query)
        all_experiments = common.encoded_get(server + exp_query,
                                             keypair)['@graph']
        # Extract Experiment accessions.
        ids = [exp.get('accession') for exp in all_experiments]
    elif args.infile:
        ids = args.infile
    else:
        # Never reached because infile defaults to stdin.
        raise InputError('Must supply experiment ids'
                         ' in arguments or --infile.')
    # Define column names for TSV.
    fieldnames = [
        'date', 'analysis', 'analysis_id', 'experiment', 'target',
        'biosample_term_name', 'biosample_type', 'replication', 'lab', 'rfa',
        'assembly', 'Nt', 'Np', 'N1', 'N2', 'rescue_ratio',
        'self_consistency_ratio', 'reproducibility_test', 'Ft', 'Fp', 'F1',
        'F2', 'state', 'release', 'total_price', 'quality_metric_of'
    ]
    if args.create_google_sheet:
        # Force creation of temporary CSV that can be loaded into a DataFrame,
        # written to Google Sheets, then deleted.
        temp_file = 'temp_idr_%s.tsv' % (args.assembly)
        args.outfile = open(temp_file, 'w')
    writer = csv.DictWriter(args.outfile,
                            fieldnames=fieldnames,
                            delimiter='\t',
                            quotechar='"')
    writer.writeheader()
    # Get metadata for all IDR output Files.
    base_idr_query = ('/search/?type=File&assembly=%s&file_format=bed'
                      '&output_type=optimal+idr+thresholded+peaks'
                      '&output_type=conservative+idr+thresholded+peaks'
                      '&output_type=pseudoreplicated+idr+thresholded+peaks'
                      '&lab.title=ENCODE+Processing+Pipeline'
                      '&lab.title=J.+Michael+Cherry,+Stanford'
                      '&status=released' % (args.assembly))
    extended_idr_query = '&status=in+progress&status=uploading&status=uploaded'
    idr_query = base_idr_query if args.released else (base_idr_query +
                                                      extended_idr_query)
    all_idr_files = common.encoded_get(server + idr_query, keypair)['@graph']
    na = 'not_available'
    for (i, experiment_id) in enumerate(ids):
        if experiment_id.startswith('#'):
            continue
        experiment_id = experiment_id.rstrip()
        experiment_uri = '/experiments/%s/' % (experiment_id)
        idr_files = \
            [f for f in all_idr_files if f['dataset'] == experiment_uri]
        idr_step_runs = set([f.get('step_run') for f in idr_files])
        if not len(idr_step_runs):
            if not args.all:
                logger.warning("%s: Found %d IDR step runs. Skipping" %
                               (experiment_id, len(idr_step_runs)))
            continue
        idr_qc_uris = []
        assemblies = []
        for f in idr_files:
            quality_metrics = f.get('quality_metrics')
            if not len(quality_metrics) == 1:
                logger.error(
                    '%s: Expected one IDR quality metric for file %s.'
                    ' Found %d.' %
                    (experiment_id, f.get('accession'), len(quality_metrics)))
            idr_qc_uris.extend(quality_metrics)
            assembly = f.get('assembly')
            if not assembly:
                logger.error('%s: File %s has no assembly' %
                             (experiment_id, f.get('accession')))
            assemblies.append(assembly)
        idr_qc_uris = set(idr_qc_uris)
        if not len(idr_qc_uris) == 1:
            logger.error('%s: Expected one unique IDR metric,'
                         ' found %d. Skipping.' %
                         (experiment_id, len(idr_qc_uris)))
            continue
        assemblies = set(assemblies)
        if not len(assemblies) == 1:
            logger.error('%s: Expected one unique assembly, found %d.'
                         ' Skipping.' % (experiment_id, len(assemblies)))
            continue
        # Grab unique value from set.
        idr_qc_uri = next(iter(idr_qc_uris))
        assembly = next(iter(assemblies))
        # Get analysis_id from DNAnexus, create analysis_link.
        idr_step_run_uri = next(iter(idr_step_runs))
        try:
            idr_step_run = common.encoded_get(server + idr_step_run_uri,
                                              keypair)
        except Exception as e:
            print(experiment_id, e, 'Skipping.')
            continue
        try:
            dx_job_id_str = idr_step_run.get('dx_applet_details')[0].get(
                'dx_job_id')
        except:
            logger.warning(
                "Failed to get dx_job_id from step_run.dx_applet_details.dx_job_id"
            )
            logger.debug(idr_step_run)
            # Could try to pull it from alias.
            dx_job_id_str = None
        dx_job_id = dx_job_id_str.rpartition(':')[2]
        if not args.released:
            dx_job = dxpy.DXJob(dx_job_id)
            job_desc = dx_job.describe()
            analysis_id = job_desc.get('analysis')
            logger.debug('%s' % (analysis_id))
            analysis = dxpy.DXAnalysis(analysis_id)
            desc = analysis.describe()
            project = desc.get('project')
            analysis_link = 'https://platform.dnanexus.com/projects/%s/monitor/analysis/%s' % (
                desc.get('project').split('-')[1],
                desc.get('id').split('-')[1])
        else:
            analysis_link = na
            desc = {}

        # Get IDR object.
        idr = common.encoded_get(server + idr_qc_uri, keypair)
        # Pull metrics of interest.
        idr_status = idr.get('status', na)
        if (args.released and (idr_status == na or idr_status != 'released')):
            logger.error('%s: Expected released IDR metric. Skipping.' %
                         idr_qc_uris)
            continue
        Np = idr.get('Np', na)
        N1 = idr.get('N1', na)
        N2 = idr.get('N2', na)
        Nt = idr.get('Nt', na)
        Fp = idr.get('Fp', na)
        F1 = idr.get('F1', na)
        F2 = idr.get('F2', na)
        Ft = idr.get('Ft', na)
        quality_metric_of = idr.get('quality_metric_of', [])
        date = idr.get('date_created', na)
        rescue_ratio = idr.get('rescue_ratio', na)
        self_consistency_ratio = idr.get('self_consistency_ratio', na)
        reproducibility_test = idr.get('reproducibility_test', na)
        # Get Experiment object.
        experiment = common.encoded_get(server + experiment_id, keypair)
        experiment_link = '%sexperiments/%s' % (server,
                                                experiment.get('accession'))
        # Get Award object.
        award = common.encoded_get(server + experiment.get('award'), keypair)
        # Grab project phase, e.g. ENCODE4.
        rfa = award.get('rfa', na)
        row = {
            'date': date,
            'analysis': analysis_link,
            'analysis_id': desc.get('id', na),
            'experiment': experiment_link,
            'target': experiment['target'].split('/')[2],
            'biosample_term_name': experiment.get('biosample_term_name'),
            'biosample_type': experiment.get('biosample_type'),
            'replication': experiment.get('replication_type'),
            'lab': experiment['lab'].split('/')[2],
            'rfa': rfa,
            'assembly': assembly,
            'Nt': Nt,
            'Np': Np,
            'N1': N1,
            'N2': N2,
            'rescue_ratio': rescue_ratio,
            'self_consistency_ratio': self_consistency_ratio,
            'reproducibility_test': reproducibility_test,
            'Ft': Ft,
            'Fp': Fp,
            'F1': F1,
            'F2': F2,
            'state': desc.get('state', na),
            'release': experiment['status'],
            'total_price': desc.get('totalPrice', na),
            'quality_metric_of': ', '.join(quality_metric_of)
        }
        writer.writerow(row)
    if args.create_google_sheet:
        args.outfile.close()
        # Load CSV data, sort.
        idr_data = pd.read_table(temp_file)
        idr_data = idr_data.replace('not_available', '')
        idr_data.date = idr_data.date.apply(lambda x: pd.to_datetime(x))
        idr_data = idr_data.sort_values(
            by=['lab', 'biosample_term_name', 'target', 'experiment'],
            ascending=[True, True, True, True])
        idr_data.date = idr_data.date.astype('str')
        idr_data = idr_data.reset_index(drop=True)
        # Read sheet title and create unique page title.
        date = datetime.now().strftime('%m_%d_%Y')
        sheet_title = (args.sheet_title if not args.released else
                       '{} Released'.format(args.sheet_title))
        page_title = '%s_IDR_FRIP_%s' % (args.assembly, date)
        # Open/create Google Sheet.
        gc = pygsheets.authorize(args.apikey)
        try:
            sh = gc.open(sheet_title)
        except pygsheets.exceptions.SpreadsheetNotFound:
            sh = gc.create(sheet_title)
        try:
            wks = sh.add_worksheet(page_title)
        except HttpError:
            wks = sh.worksheet_by_title(page_title)
        # Clear worksheet.
        wks.clear()
        # Add data from DataFrame.
        wks.set_dataframe(idr_data, copy_head=True, fit=True, start='A1')
        # Apply formatting and conditions.
        header['repeatCell']['range']['sheetId'] = wks.id
        wks.client.sh_batch_update(wks.spreadsheet.id, header)
        # Format numbers.
        for col in number_format_columns:
            num = idr_data.columns.get_loc(col)
            number_format['repeatCell']['range']['startColumnIndex'] = num
            number_format['repeatCell']['range']['endColumnIndex'] = num + 1
            number_format['repeatCell']['range']['sheetId'] = wks.id
            wks.client.sh_batch_update(wks.spreadsheet.id, number_format)
        # Resize font.
        font_size_format['repeatCell']['range']['sheetId'] = wks.id
        wks.client.sh_batch_update(wks.spreadsheet.id, font_size_format)
        # Add conditional formatting.
        for conditional in conditions:
            num = idr_data.columns.get_loc("reproducibility_test")
            conditional['addConditionalFormatRule']['rule']['ranges'][0][
                'startColumnIndex'] = num
            conditional['addConditionalFormatRule']['rule']['ranges'][0][
                'endColumnIndex'] = num + 1
            conditional['addConditionalFormatRule']['rule']['ranges'][0][
                'sheetId'] = wks.id
            wks.client.sh_batch_update(wks.spreadsheet.id, conditional)
        for k, v in notes_dict.items():
            num = idr_data.columns.get_loc(k)
            note['repeatCell']['range']['startColumnIndex'] = num
            note['repeatCell']['range']['endColumnIndex'] = num + 1
            note['repeatCell']['cell']['note'] = v
            note['repeatCell']['range']['sheetId'] = wks.id
            wks.client.sh_batch_update(wks.spreadsheet.id, note)
        # Optional. Smaller column width to match original.
        for i in range(wks.cols):
            wks.adjust_column_width(i, pixel_size=38)
        # Resize tiny columns.
        tiny_columns = ['experiment', 'analysis']
        for i in [idr_data.columns.get_loc(x) for x in tiny_columns]:
            wks.adjust_column_width(i, pixel_size=25)
        # Resize medium columns.
        medium_columns = ['replication', 'assembly', 'rfa']
        for i in [idr_data.columns.get_loc(x) for x in medium_columns]:
            wks.adjust_column_width(i, pixel_size=65)
        # Resize wide columns.
        wide_columns = ['target', 'reproducibility_test', 'lab']
        for i in [idr_data.columns.get_loc(x) for x in wide_columns]:
            wks.adjust_column_width(i, pixel_size=85)
        # Remove temp file.
        os.remove(temp_file)
Esempio n. 42
0
    # with open('data.json') as f:
    data = json.load(open(configJson))
    return data


if (current_weekday == 1 and current_hour == '02') or (os.path.exists('/home/sasha/GoInbound/list_name') is False):
    f = open("/home/sasha/GoInbound/list_name", "w")
    f.write(strng)
    f.close()
    print("list name overwrote")

if int(current_hour) < 4:
    current_weekday = current_weekday - 1

######################################################################
gc = pygsheets.authorize(outh_file='creds.json', outh_nonlocal=True)
# select the sheet
#  sh = retry(gc.open)('Support hours') - its using retry function
sh = gc.open('Support hours')
# select the worksheet
wks = sh.worksheet(property='title', value=strng)
# wks = sh.worksheet(property='title', value='April 2 - April 6')
######################################################################
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
# SLACK_VERIFICATION_TOKEN = os.environ["SLACK_VERIFICATION_TOKEN"]

# Slack client for Web API requests
slack_client = SlackClient(SLACK_BOT_TOKEN)

we = get_matrix()
WEEKDAY_MATRIX = get_matrix()['WEEKDAY_MATRIX']
    def __init__(self, key_file_location):

        self.gsheet_service = pygsheets.authorize(service_file=key_file_location)
def scanAuctions(info,
                 searchTerms,
                 dataIndex,
                 buyIndex,
                 fromCountry,
                 condition,
                 shipping,
                 resell,
                 priceLimit,
                 excludedKeywords=[]):
    try:
        currentDate = datetime.now()
        api = Connection(appid='APP_ID_HERE ', config_file=None)
        request = {
            "keywords": searchTerms,
            "outputSelector": "SellerInfo",
            "sortOrder": "PriceByShippingLowest"
        }
        auction = api.execute('findItemsAdvanced', request)
        assert (auction.reply.ack == 'Success')
        assert (type(auction.reply.timestamp) == datetime)
        assert (type(auction.reply.searchResult.item) == list)
        auctions = auction.reply.searchResult.item
        assert (type(auction.dict()) == dict)
        auth = pygsheets.authorize(service_file='JSON_AUTH_FILE_PATH')
        sheet = auth.open('eBay')
        worksheet = sheet[dataIndex]
        dupeURL = ''
        acceptableShippingTypes = ['Free', 'Fixed', 'FreightFlat', 'Flat']
        unacceptableShippingTypes = [
            'Calculated', 'CalculatedDomesticFlatInternational', 'CustomCode',
            'Pickup', 'FreePickup', 'FlatDomesticCalculatedInternational',
            'Free', 'NotSpecified'
        ]

        print('Looping through ' + str(len(auctions)) + ' ' + info + ' ' +
              str(currentDate))

        for item in auctions:
            itemName = item.get('title')
            itemCountry = item.get('country')
            itemLocation = item.get('location')
            itemShippingType = item.shippingInfo.get('shippingType')
            itemCost = float(
                item.sellingStatus.get('convertedCurrentPrice').value)
            if (itemShippingType not in str(unacceptableShippingTypes)):
                itemShippingCost = float(
                    item.shippingInfo.shippingServiceCost.value)
            else:
                itemShippingCost = 0.0
            itemBuyNow = item.listingInfo.get('listingType')
            itemStatus = item.sellingStatus.get('sellingState')
            itemConditionID = str(item.condition.get('conditionId'))
            itemCondition = conditionDef(str(
                item.condition.get('conditionId')))
            itemURL = item.get('viewItemURL')
            itemStart = str(item.listingInfo.get('startTime'))
            itemEnd = str(item.listingInfo.get('endTime'))
            sellerFeedbackscore = int(item.sellerInfo.get('feedbackScore'))
            sellerPositiveFeedback = float(
                item.sellerInfo.get('positiveFeedbackPercent'))
            totalItemCost = itemCost + itemShippingCost

            listOfURLS = worksheet.find(itemURL)
            if (len(listOfURLS) == 1):
                dupeURL = str(listOfURLS[0])

            if (conditionsToWrite(itemURL, dupeURL, fromCountry, itemCountry,
                                  itemBuyNow, itemConditionID, condition,
                                  sellerFeedbackscore, sellerPositiveFeedback,
                                  itemShippingType, acceptableShippingTypes,
                                  item.listingInfo.get('endTime'),
                                  excludedKeywords, itemName)):
                if (resell):
                    cells = worksheet.get_all_values(
                        include_tailing_empty_rows=False,
                        include_tailing_empty=False,
                        returnas='matrix')
                    lastRow = len(cells)
                    worksheet.insert_rows(lastRow,
                                          number=1,
                                          values=[
                                              itemName, itemCountry,
                                              itemLocation, itemShippingType,
                                              itemBuyNow, itemStatus, itemURL,
                                              itemStart, itemEnd, totalItemCost
                                          ])
                elif (not resell and float(totalItemCost) <= priceLimit):
                    cells = worksheet.get_all_values(
                        include_tailing_empty_rows=False,
                        include_tailing_empty=False,
                        returnas='matrix')
                    lastRow = len(cells)
                    worksheet.insert_rows(lastRow,
                                          number=1,
                                          values=[
                                              itemName, itemCountry,
                                              itemLocation, itemShippingType,
                                              itemBuyNow, itemStatus, itemURL,
                                              itemStart, itemEnd, totalItemCost
                                          ])
        if (resell):
            #Cleans cell data up for parsing.
            avg = replaceChars(worksheet.range('L2:L2'), 'L')
            median = replaceChars(worksheet.range('M2:M2'), 'M')

            for item in auctions:
                itemName = item.get('title')
                itemCountry = item.get('country')
                itemLocation = item.get('location')
                itemShippingType = item.shippingInfo.get('shippingType')
                itemCost = float(
                    item.sellingStatus.get('convertedCurrentPrice').value)
                if (itemShippingType not in str(unacceptableShippingTypes)):
                    itemShippingCost = float(
                        item.shippingInfo.shippingServiceCost.value)
                else:
                    itemShippingCost = 0.0
                    itemBuyNow = item.listingInfo.get('listingType')
                    itemStatus = item.sellingStatus.get('sellingState')
                    itemConditionID = str(item.condition.get('conditionId'))
                    itemCondition = conditionDef(
                        str(item.condition.get('conditionId')))
                    itemURL = str(item.get('viewItemURL'))
                    itemStart = str(item.listingInfo.get('startTime'))
                    itemEnd = str(item.listingInfo.get('endTime'))
                    sellerFeedbackscore = int(
                        item.sellerInfo.get('feedbackScore'))
                    sellerPositiveFeedback = float(
                        item.sellerInfo.get('positiveFeedbackPercent'))
                    totalItemCost = itemCost + itemShippingCost

                costBasis = avg if avg < median else median
                profit = (costBasis - totalItemCost)
                fees = (costBasis * .25) if shipping else (costBasis * .15)
                if (profit > fees
                        and profit > totalItemCost - (totalItemCost * .20)):
                    worksheet = sheet[buyIndex]
                    listOfURLS = worksheet.find(itemURL)
                    if (len(listOfURLS) == 1):
                        dupeURL = str(listOfURLS[0])
                    if (conditionsToWrite(
                            itemURL, dupeURL, fromCountry, itemCountry,
                            itemBuyNow, itemConditionID, condition,
                            sellerFeedbackscore, sellerPositiveFeedback,
                            itemShippingType, acceptableShippingTypes,
                            item.listingInfo.get('endTime'), excludedKeywords,
                            itemName)):
                        cells = worksheet.get_all_values(
                            include_tailing_empty_rows=False,
                            include_tailing_empty=False,
                            returnas='matrix')
                        lastRow = len(cells)
                        worksheet.insert_rows(lastRow,
                                              number=1,
                                              values=[
                                                  itemName, itemCountry,
                                                  itemLocation,
                                                  itemShippingType, itemBuyNow,
                                                  itemStatus, itemURL,
                                                  itemStart, itemEnd,
                                                  totalItemCost, profit
                                              ])
    except Exception as e:
        print(str(e))