Пример #1
0
 def upload_to_s3(cls,
                  *,
                  directory: DirectoryPath,
                  bucket: str = 'e2e-distributed-stress-tests'):
     s3_key = os.getenv('TFID') if 'TFID' in os.environ else str(
         uuid.uuid4())
     S3(bucket=bucket).add(path=directory, key=s3_key)
Пример #2
0
def upload():
    photo = request.args.get("photo.jpg") #photo.jpg par exemple
    user_id = request.args.get("user_id")
    # Upload la photo sur S3.
    photo_url = S3.upload_photo(photo)
    # Écrire l'URL en base de données.
    database.query("INSERT INTO photos (user_id, url) VALUES {user_id}, {photo_url}") # SQL
    database.write({ "photo_url": photo_url, "user_id": user_id }) # NoSQL
    # Récupérer les metadonnées.
    metadata = Analyze.analyze_photo(photo)
    # Écrire les métadonnées en base de données.
    database.query("UPDATE photos SET metadata = {metadata} WHERE photo_url = {photo_url}") # SQL
    database.update({ "photo_url": photo_url, "user_id": user_id, "metadata": metadata }) # NoSQL
    return 'OK'
    def __init__(self, dataset_location = 'caltech-dataset/dataset'):
        self.dataset_location = dataset_location
        self.annotations = None

        self.anchors = Anchors([30, 60, 100, 200, 350], [0.41])
        CaltechDataset.LOSS_LAMBDA = 2 * float(CaltechDataset.OUTPUT_SIZE[0] * CaltechDataset.OUTPUT_SIZE[1] * self.anchors.num) / float(CaltechDataset.MINIBATCH_SIZE)

        self.epoch = 0
        self.training_minibatch = 0
        self.validation_minibatch = 0
        self.testing_minibatch = 0
        self.s3 = S3(boto3.client("s3"))
        self.s3.bucket = "caltech-dataset"

        # self.set_training([(0, 1, 975), (3, 8, 240), (3, 8, 262), (3, 8, 279), (3, 8, 280), (3, 8, 293), (3, 8, 294), (3, 8, 295), (3, 8, 299), (3, 8, 300), (3, 8, 306), (3, 8, 308), (3, 8, 309), (3, 8, 313), (3, 8, 314), (3, 8, 315), (3, 8, 316), (3, 8, 317), (3, 8, 318), (3, 8, 321), (3, 8, 322), (3, 8, 326), (3, 8, 327), (3, 8, 334), (3, 8, 335), (3, 8, 336), (3, 8, 342), (3, 8, 345), (3, 8, 347), (3, 8, 348), (3, 8, 349), (3, 8, 350), (3, 8, 351), (3, 8, 358), (3, 8, 359), (3, 8, 360), (3, 8, 361), (3, 8, 362), (3, 8, 363), (3, 8, 364), (3, 8, 365), (3, 8, 368), (3, 8, 369), (3, 8, 370), (3, 8, 371), (3, 8, 372), (3, 8, 373), (3, 8, 374), (3, 8, 380), (3, 8, 381), (3, 8, 382), (3, 8, 391), (3, 8, 392), (3, 8, 393), (3, 8, 396), (3, 8, 397), (3, 8, 398), (3, 8, 401), (3, 8, 404), (3, 8, 410), (3, 8, 420), (3, 8, 427), (3, 8, 429), (3, 8, 430), (3, 8, 431), (3, 8, 434), (3, 8, 437), (3, 8, 443), (3, 8, 444), (3, 8, 451), (3, 8, 455), (3, 8, 456), (3, 8, 457), (3, 8, 458), (3, 8, 459), (3, 8, 460), (3, 8, 462), (3, 8, 466), (3, 8, 467), (3, 8, 478), (3, 8, 479), (3, 8, 480), (3, 8, 492), (3, 8, 493), (3, 8, 514), (3, 8, 515)])
        self.discover_training()

        self.discover_testing()
Пример #4
0
def download_file(s3_key, base_dir=None):
    """Downloads a file from S3

    Args:
        s3_key (str): S3 location of a file
        base_dir (str, optional): Directory to download files to. Defaults to current working dir.

    Returns:
        str: Path to downloaded file
    """

    assert s3_key.startswith("s3://")
    base_dir = base_dir if base_dir else os.getcwd()
    bucket, key = re.findall(r"s3://(.+?)/(.+)", s3_key)[0]
    S3().download_file(os.path.join(base_dir, key),
                       bucket,
                       key,
                       overwrite=True)
    return os.path.join(base_dir, key)
Пример #5
0
def digital_recon(filename):
    if type(filename) == list:
        sdq_list = []
        item_list = []
        for file in filename:
            a, b = read_recon_file(file)
            sdq_list.extend(a)
            item_list.extend(b)
    else:
        sdq_list, item_list = read_recon_file(filename)

    a, b, c, d = reconDynamo(sdq_list, item_list)

    s3 = S3('dmf-prod')

    missing_sdqs = set()
    if len(sdq_list):
        sdq_bucket = s3['pine-dmf-sdqresponse']
        all_sdq_list = [
            str(i.key)[0:5] for i in sdq_bucket.bucket.objects.all()
        ]
        missing_sdqs = set(sdq_list) - set(all_sdq_list)
    print("Number of SDQ responses received: {}".format(len(sdq_list)))
    print("Number of SDQ responses missing: {}".format(len(missing_sdqs)))
    print(missing_sdqs)
    print("Number of SDQ responses loaded into DynamoDB: {}".format(a))
    print("Number of SDQs not loaded: {}".format(len(b)))
    print(b)
    print()

    missing_items = set()
    if len(item_list):
        item_bucket = s3['pine-dmf-itemresponse']
        all_item_list = [
            str(i.key)[0:5] for i in item_bucket.bucket.objects.all()
        ]
        missing_items = set(item_list) - set(all_item_list)
    print("Number of item responses received: {}".format(len(item_list)))
    print("Number of item responses missing: {}".format(len(missing_items)))
    print(missing_items)
    print("Number of Item responses loaded into DynamoDB: {}".format(c))
    print("Number of Items not loaded: {}".format(len(d)))
    print(d)
Пример #6
0
 def load_merchants(self, acquirer: str) -> list:
     df = []
     try:
         region = 'us-east-1'
         keys ={
             "AME": {
                 "region": "sa-east-1"
             }
         }
         if acquirer in keys:
             region = keys[acquirer]["region"]
         
         s3 = S3(p_bucket=f"concil-{environ.get('STAGE', 'dev')}-blackbox-{acquirer.lower()}", region_name=region)
         merchants = s3.get_object(p_key=f"establishment/CFR_ESTABELECIMENTOS_{acquirer.upper()}.csv")
         df = pd.read_csv(StringIO(merchants), sep=";")
         df = df.query("FILIAL_CODIGO != 'FILIAL_GERAL'", engine="python")
         df['ESTABELECIMENTO'] = df['ESTABELECIMENTO'].map(str)
         self.merchants = df
         # self.merchants.to_csv('test_establishments.csv', sep=',')
         df.rename(columns={"ESTABELECIMENTO": "merchantCode", "PFJ_CODIGO": "clientCode"}, inplace=True)
     except Exception as error:
         print(str(error))
     finally:
         return df[["merchantCode", "clientCode"]].to_dict(orient="records"), []
Пример #7
0
import datetime
import os
import time
from logging import getLogger
from pathlib import Path

import pandas as pd

from bitflyer_api import (cancel_child_order, get_balance, get_child_orders,
                          send_child_order)
from manage import CHILD_ORDERS_DIR, REF_LOCAL
from utils import df_to_csv, path_exists, read_csv, rm_file

if not REF_LOCAL:
    from aws import S3
    s3 = S3()

logger = getLogger(__name__)


class AI:
    """自動売買システムのアルゴリズム

    """
    def __init__(self,
                 latest_summary,
                 product_code,
                 min_size,
                 min_volume_short=2000,
                 min_volume_long=10000,
                 max_volume_short=10000,
def lambda_handler(event, context):
    for _event in event['Records']:
        data = S3().read_object(_event['s3']['bucket']['name'],
                                _event['s3']['object']['key'])
        SentimentAnalysis().main(body=data)
Пример #9
0
 def download_from_s3(cls,
                      *,
                      key: str,
                      local_directory: str = '.',
                      bucket: str = 'e2e-distributed-stress-tests'):
     S3(bucket=bucket).get(key=key, local_path=f'{local_directory}/{key}')
Пример #10
0
from aws import S3
import datetime
import pandas as pd

RAWDATA_BUCKET = 'kpu-gradutation-team-rawdata-dev'

S3_CLIENT = S3()

dt_obj = datetime.datetime.now(datetime.timezone.utc)

print(dt_obj)

raw_files = [
    raw_file["Key"] for raw_file in S3_CLIENT.get_keys(
        Bucket=RAWDATA_BUCKET,
        Prefix="/kpu/rawdata/{:04d}/{:02d}/{:02d}/{:02d}".format(
            dt_obj.year, dt_obj.month, dt_obj.day, dt_obj.hour),
        is_sorted=False) if raw_file.get('Key', '').endswith('.csv')
]
print("raw file count : ", len(raw_files))
print(raw_files)

new_report_df = pd.DataFrame(
    columns=['timestamp_from', 'timestamp_to', 'status'])
merged_df = pd.concat([
    pd.read_csv(
        S3_CLIENT.read_file(Bucket=RAWDATA_BUCKET, Key=raw_file)['Body'])
    for raw_file in raw_files
],
                      join='outer',
                      sort=False)