示例#1
0
    def _login(self, email: str, password: str, token: str = None):
        self.session.cookies.clear()
        self.session = Util.mount_standard_session(self.session)

        headers = {}

        body = {
            'email': email,
            'password': password,
        }

        if token:
            body['token'] = token

        response = self.session.post(
            url='{}/api/auth'.format(self.url),
            json=body,
            headers=headers,
        )

        Util.raise_detailed_error(response)

        self.user = response.json()
        # Persist so can quickly run again without needing to enter 2FA token.
        self.save_session_to_disk()
示例#2
0
 def list(self):
     response = self.session.get(self.url + '?filter=',
                                 headers={
                                     'content-type': 'application/json'
                                 })
     Util.raise_detailed_error(response)
     return response
示例#3
0
 def get(self, job_id=None):
     if job_id is None:
         job_id = self.job_id
     url = '{}/{}'.format(self.url, job_id)
     response = self.session.get(url, headers={'content-type': 'application/json'})
     Util.raise_detailed_error(response)
     return response.json()
示例#4
0
    def download_original_file(self, entity_id: int,
                               destination_filename: str) -> str:
        """
        Download the originally uploaded file corresponding to a PipeBio document.
        Two requests are made:
        1. Request a signed url for this document (GET /api/v2/entities/:id/original)
        2. Download the data from that signed url (GET <result-from-step-1>)
        """
        # First request a signed url from PipeBio.
        signed_url_response = self.session.get(
            '{}/api/v2/entities/{}/original'.format(self.url, entity_id), )

        # Did the signed-url request work ok?
        Util.raise_detailed_error(signed_url_response)

        # Parse the results to get the signed url.
        download_url = signed_url_response.json()['url']

        # Download the original file.
        download_response = requests.get(download_url)

        # Did the download request work ok?
        Util.raise_detailed_error(download_response)

        # Write the result to disk in chunks.
        with open(destination_filename, 'wb') as f:
            for chunk in download_response.iter_content(chunk_size=8192):
                f.write(chunk)

        return destination_filename
示例#5
0
    def create_file(self,
                    project_id: str,
                    parent_id: int,
                    name: str,
                    entity_type: EntityTypes = EntityTypes.SEQUENCE_DOCUMENT,
                    visible=False) -> dict:
        print('create_file for parent_id:' + str(parent_id) + ' name:' +
              str(name))

        payload = {
            'name': name,
            'type': entity_type.value,
            'visible': visible,
            'shareableId': project_id,
        }

        if parent_id is not None:
            payload['parentId'] = int(str(parent_id))

        response = self.session.post(
            '{}/api/v2/entities'.format(self.url),
            headers={'Content-type': 'Application/json'},
            data=json.dumps(payload),
        )
        print('create_file response:' + str(response.status_code))
        Util.raise_detailed_error(response)
        return response.json()
示例#6
0
 def create(self, entity_id: int, type: AttachmentType, data: Union[dict, List]):
     print('Creating attachment: entity_id={},kind={}'.format(entity_id, type.value))
     url = '{}/{}/attachments'.format(self.url, entity_id)
     json = {"data": data, "type": type.value}
     response = self.session.post(url, json=json)
     Util.raise_detailed_error(response)
     print('Creating attachment: response', response.status_code)
     return response.json()
示例#7
0
 def get(self, entity_id):
     response = self.session.get(
         '{}/api/v2/entities/{}'.format(self.url, entity_id),
         headers={'Content-type': 'Application/json'},
     )
     print('get response:' + str(response.status_code))
     Util.raise_detailed_error(response)
     return response.json()
示例#8
0
    def _save_web_tmp_db(self, sql, tpl):
        try:
            with DBConnect.get_connection() as con:
                with con.cursor() as cur:
                    cur.execute(sql, tpl)

        except:
            Util.put('_save_web_tmp_db エラー',
                     traceback.format_exc() + '\r\n\r\n' + sql)
示例#9
0
    def list(self) -> List[dict]:
        url = '{}/api/v2/shareables'.format(self.url)

        response = self.session.get(url)

        print('ShareablesService:list - response:' + str(response.status_code))

        Util.raise_detailed_error(response)

        return response.json()['data']
示例#10
0
 def mark_file_visible(self, entity_summary: UploadSummary):
     print('marking visible:', entity_summary)
     response = self.session.patch(
         '{}/api/v2/entities/{}'.format(self.url, entity_summary.id),
         headers={'Content-type': 'Application/json'},
         data=json.dumps(entity_summary.to_json()),
     )
     print('mark_file_visible response:' + str(response.status_code))
     print('mark_file_visible text    :' + str(response.text))
     Util.raise_detailed_error(response)
     return response.json()
示例#11
0
 def start_import_job(self):
     """
     Enable the cloud-function to trigger a job run via the kubernetes job processing engine.
     :return:
     """
     response = self.session.patch(
         '{}/{}/import'.format(self.url, self.job_id),
         headers={'content-type': 'application/json'}
     )
     Util.raise_detailed_error(response)
     return response
示例#12
0
    def create(self, name: str) -> dict:
        url = '{}/api/v2/shareables'.format(self.url)

        response = self.session.post(
            url,
            data=json.dumps({'name': name}),
        )

        print('ShareablesService:list - response:' + str(response.status_code))

        Util.raise_detailed_error(response)

        return response.json()
示例#13
0
 def upsert(self, entity_id: int, type: AttachmentType, data: Union[dict, List], version: int = 1,
            ignore_version=True):
     """
     Create or update if exists.
     """
     print('Upserting attachment: entity_id={},type={},version={},ignore_version={}'.format(entity_id, type.value,
                                                                                            version,
                                                                                            ignore_version))
     url = '{}/{}/attachments'.format(self.url, entity_id)
     json = {"data": data, "version": version, "type": type.value, "ignoreVersion": ignore_version}
     response = self.session.put(url, json=json)
     Util.raise_detailed_error(response)
     print('Creating attachment: response', response.status_code)
示例#14
0
def lambda_handler(event, context):
    reload(sys)
    sys.setdefaultencoding("utf-8")

    Util.put('lambda_handler', str(event))

    # イベントで振る舞いを変える
    for event_record in event['Records']:
        if 's3' in event_record:  # S3イベントの場合
            key = Util.url_decode(event_record['s3']['object']['key'])
            Util.put('key', key)
            import s3_main
            s3_main.data_import(key)
        else:  # cronイベントの場合
            pass
示例#15
0
    def upload_data_to_signed_url(self, absolute_file_location: str, signed_url: str, signed_headers):

        # 1. Start the signed-upload.
        # NOTE: Url and headers cannot be modified or the upload will fail.
        create_upload_response = self.session.post(signed_url, headers=signed_headers)
        Util.raise_detailed_error(create_upload_response)
        response_headers = create_upload_response.headers
        location = response_headers['Location']

        # 2. Upload bytes.
        with open(absolute_file_location, 'rb') as file:
            upload_response = self.session.put(location, data=file)
            Util.raise_detailed_error(upload_response)
            print('Upload response: ', upload_response.status_code)
            print('Upload response:', upload_response.text)
示例#16
0
def example_02a_download_result_as_tsv(document_id: int):
    """
    Download the raw file as a TSV.
    """
    client = PipebioClient()

    # Either login with hardcoded variables or use environment variables:
    # e.g.
    #       client.login(<my-email>, <my-password>, <my-token>)
    #   or:
    #       PIPE_EMAIL=<my-email> PIPE_PASSWORD=<my-password> PIPE_TOKEN=<my-token> python login.py
    client.login()

    # Display who we are logged in as.
    user = client.authentication.user
    print('\nLogged in as {}. \n'.format(user['firstName'], user['lastName']))

    # Set the download name and folder.
    destination_filename = "download.tsv"
    destination_location = Util.get_executed_file_location()
    absolute_location = os.path.join(destination_location, '..',
                                     f'Downloads/{destination_filename}')

    client.sequences.download(document_id, destination=absolute_location)

    return absolute_location
示例#17
0
    def list_entities(self, shareable_id: str):
        url = '{}/api/v2/shareables/{}/entities'.format(self.url, shareable_id)

        response = self.session.get(url)

        print('ShareablesService:list_entities - response:' +
              str(response.status_code))

        Util.raise_detailed_error(response)

        file = StringIO(response.text)
        reader = csv.DictReader(file, dialect='excel-tab')
        rows = []
        for row in reader:
            rows.append(row)
        return rows
示例#18
0
    def create_signed_upload(self, file_name: str, parent_id: int, project_id: str, organization_id: str) -> dict:
        data = {
            'name': file_name,
            'type': EntityTypes.SEQUENCE_DOCUMENT.value,
            'targetFolderId': parent_id,
            'shareableId': project_id,
            'ownerId': organization_id,
        }

        # if details is not None:
        #     # Details should be an
        #     data['details'] = []
        #     for detail in details:
        #         data['details'].append(detail.to_json())

        response = self.session.post('{}/api/v2/signed-url'.format(self.base_url), json=data)

        Util.raise_detailed_error(response)

        return response.json()
示例#19
0
    def create(self,
               owner_id: str,
               shareable_id: str,
               job_type: JobType,
               name: str,
               input_entity_ids: List[int],
               params=None) -> str:
        """

        :param owner_id: - organization id owning this job
        :param shareable_id: - project in which the docuemnts are
        :param job_type:
        :param name: - helpful user facing name
        :param input_entity_ids: - document ids
        :param params: - specific to this job_type
        :return:
        """

        if params is None:
            params = {}

        response = self.session.post(self.url,
                                     headers={
                                         'content-type': 'application/json'
                                     },
                                     data=json.dumps({
                                         'name': name,
                                         'params': params,
                                         'shareableId': shareable_id,
                                         'ownerId': owner_id,
                                         'inputEntities': input_entity_ids,
                                         'type': job_type.value
                                     }))

        Util.raise_detailed_error(response)

        data = response.json()
        job_id = data['id']
        self.job_id = job_id
        return job_id
示例#20
0
    def save(cls, org_file_name, arr_csv_container, str_collaborator):
        u"csv_containerの配列をもらって、ファイルに保存、S3にUP、そのパスを返す"
        tmp_csv_file_name = org_file_name.replace('_org.', '.')
        tmp_csv_zip_file_name = tmp_csv_file_name.replace('.csv', '.zip')
        tmp_csv_file_path = '/tmp/' + tmp_csv_file_name
        tmp_csv_zip_file_path = tmp_csv_file_path.replace('.csv', '.zip')

        with open(tmp_csv_file_path, 'ab') as f:  # aでファイルが無ければ作る
            # 改行やカンマ、コーテーションなどのCSVフォーマットを崩す文字列がある場合だけ、ダブルコーテーションでくくられる
            csv_writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
            # ヘッダーを書く
            header_row_data = []
            header_row_data.append('店舗名')
            header_row_data.append('店舗電話番号')
            csv_writer.writerow(header_row_data)

            for i, csv_container in enumerate(arr_csv_container):  # indexつきで処理
                try:
                    row_data = []
                    row_data.append(csv_container.name)
                    row_data.append(csv_container.telephone)
                    csv_writer.writerow(row_data)

                except:
                    Util.put('CsvController.save', traceback.format_exc())

        with zipfile.ZipFile(tmp_csv_zip_file_path, 'w',
                             zipfile.ZIP_DEFLATED) as zipFile:
            zipFile.write(tmp_csv_file_path, tmp_csv_file_name)

        s3 = boto3.resource('s3')
        bucket = s3.Bucket(Settings.get_s3_bucket())
        # 変換結果をバックアップの意味でUP
        with open(tmp_csv_zip_file_path, 'rb') as csv_up_data:
            bucket.put_object(Key=str_collaborator + '/' +
                              tmp_csv_zip_file_name,
                              Body=csv_up_data)

        return tmp_csv_file_name
示例#21
0
    def get_fields(self, entity_id: int, ignore_id=False) -> List[Column]:
        """
        Returns the fields for a document or 404 if there are no fields (e.g. it's a folder).
        :return:
        """
        response = self.session.get(
            '{}/api/v2/entities/{}/fields'.format(self.url, entity_id), )
        Util.raise_detailed_error(response)
        columns = []
        for field in response.json():

            if ignore_id and field == 'id':
                continue
            else:
                # Not all columns have field so we need to check it's set.
                description = field[
                    'description'] if 'description' in field else None
                columns.append(
                    Column(field['name'], TableColumnType[field['type']],
                           description))

        return columns
示例#22
0
def data_import(csv_path):
    reload(sys)
    sys.setdefaultencoding("utf-8")

    Util.put('変換開始', csv_path)

    with open(csv_path) as f:
        reader = csv.reader(f)
        next(reader)  # ヘッダーを読み飛ばす場合

        arr_csv_cnt = []
        # 一行ずつ処理
        for row in reader:  # indexつきで処理
            # CSVフォーマットのクラスへ保持する
            csv_cnt = CsvContainer()
            csv_cnt.name = row[0]
            csv_cnt.telephone = row[1]
            # …ホントは続く

            arr_csv_cnt.append(csv_cnt)

    return arr_csv_cnt
示例#23
0
def data_import(key):
    u'S3トリガのメイン関数'
    try:
        reload(sys)
        sys.setdefaultencoding("utf-8")

        Util.put('s3_main.data_import 開始', key)

        # S3からDL 別名で保存
        s3 = boto3.resource('s3')
        bucket = s3.Bucket(Settings.get_s3_bucket())
        code = 'netmarketing'  # とりあえず決め打ち 本来は判別する
        org_csv_file_name = '{cd}_{dt}_{hs}_org.csv'.format(
            cd=code,
            dt=datetime.now().strftime('%Y%m%d_%H%M%S'),
            hs=hashlib.sha224(key).hexdigest()[0:10]  # 複数ファイル更新時にも被らないように
        )
        org_csv_file_path = '/tmp/' + org_csv_file_name
        bucket.download_file(key, org_csv_file_path)

        # CSVを各提供元の仕様に則って変換、CSVデータの配列を返す
        arr_csv_container = []
        import collaborator.netmarketing as collabo  # 本来は提携先によって取得処理を違わせる
        arr_csv_container = collabo.data_import(org_csv_file_path)

        # S3にCSVファイルとしてUP、ファイル名を返す
        csv_file = CsvController.save(org_csv_file_name, arr_csv_container,
                                      code)

        # データをサービス用DBに保存
        CsvController.insert_web_tmp_db(arr_csv_container, csv_file)

        # S3のファイルをバックアップしてから削除
        org_zip_file_name = org_csv_file_name.replace('.csv', '.zip')
        org_zip_file_path = org_csv_file_path.replace('.csv', '.zip')
        with zipfile.ZipFile(org_zip_file_path, 'w',
                             zipfile.ZIP_DEFLATED) as zipFile:
            zipFile.write(org_csv_file_path, org_csv_file_name)

        with open(org_zip_file_path, 'rb') as org_up_data:
            bucket.put_object(Key=code + '/' + org_zip_file_name,
                              Body=org_up_data)
        s3.Object(Settings.get_s3_bucket(), key).delete()

        Util.put('s3_main.data_import 終了', '[name] : ' + code)

    except:
        # エラー書き込み
        import traceback
        Util.put('s3_main.data_import エラー', traceback.format_exc())
示例#24
0
def example_02c_download_result_to_biological_format(document_id):
    """
    Download the format in Genbank, Fasta, Fastq, Ab1 etc.
    """
    client = PipebioClient()

    # Either login with hardcoded variables or use environment variables:
    # e.g.
    #       client.login(<my-email>, <my-password>, <my-token>)
    #   or:
    #       PIPE_EMAIL=<my-email> PIPE_PASSWORD=<my-password> PIPE_TOKEN=<my-token> python login.py
    client.login()

    # Display who we are logged in as.
    user = client.authentication.user
    print('\nLogged in as {}. \n'.format(user['firstName'], user['lastName']))

    # Specify a target folder on this computer to download the file to.
    destination_folder = Util.get_executed_file_location()

    return client.export(document_id, ExportFormat.GENBANK.value,
                         destination_folder)
示例#25
0
def example_02d_download_original_file(document_id: int,
                                       destination_filename: str = None
                                       ) -> str:
    """
    Download the original, un-parsed file.
   """
    client = PipebioClient()

    # Either login with hardcoded variables or use environment variables.
    # PIPE_EMAIL=<my-email> PIPE_PASSWORD=<my-password> PIPE_TOKEN=<my-token> python login.py
    client.login()

    # Display who we are logged in as.
    user = client.authentication.user
    print('\nLogged in as {}. \n'.format(user['firstName'], user['lastName']))

    # Set the download name and folder.
    destination_filename = "download.tsv" if destination_filename is None else destination_filename
    destination_location = Util.get_executed_file_location()
    absolute_location = os.path.join(destination_location,
                                     f'../Downloads/{destination_filename}')

    return client.entities.download_original_file(document_id,
                                                  absolute_location)
示例#26
0
 def get(self, entity_id: str, type: AttachmentType, version: int = 1):
     url = '{}/{}/attachments/{}'.format(self.url, entity_id, type.value, json={'version': version})
     response = self.session.get(url)
     Util.raise_detailed_error(response)
     return response.json()
示例#27
0
 def __init__(self, url, session: requests.sessions, job_id=None):
     self.base_url = url
     self.url = url + '/api/v2/jobs'
     self.session = Util.mount_standard_session(session)
     self.job_id = job_id
     self.status = 'RUNNING'
示例#28
0
 def __init__(self, url: str, session: requests.sessions):
     self._url = url
     self.url = '{0}/api/v2'.format(url)
     self.session = Util.mount_standard_session(session)
     self.entities = Entities(url, session)