def __init__(self, project, bucket_name, tempdir='/tmp'):
     # connect to the cloud bucket
     os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.environ.get(
         'privatekey_path')
     self.client = storage.Client(project)
     self.bucket = self.client.get_bucket(bucket_name)
     self.tempdir = tempdir
Example #2
0
    def _join(self, filename='exit_code', delete=True):
        """Wait until all calculation are finished:
        This means wait until every bucket has an file with the name of the given filename

        :param str filename: The filename where the exit code is stored
        :param bool delete:  Should the instance be deleted after calculation
        """
        finished_tasks = []
        logger.info("Calculating..")
        gcs = storage.Client(self.project.project_id)
        while len(finished_tasks) < len(self.job.tasks):
            for t in [
                    task for task in self.job.tasks
                    if task not in finished_tasks
            ]:
                bucket = gcs.get_bucket(t.id)
                if bucket.get_blob(filename):
                    finished_tasks.append(t)
                    logger.info(
                        "Calculation is finished for instance: {}".format(
                            t.id))
                    if delete:
                        self._delete_instances([t])

            time.sleep(10)

        logger.info("Calculation is finished")
Example #3
0
def delete_user_image(user_id):
    # Check to see if the user exists
    u = User.get_by_id(user_id)
    if u is None:
        raise InvalidUsage('UserID does not match any existing user',
                           status_code=400)

    path = u.profile_picture_path
    if path is None:
        raise InvalidUsage('User has no profile picture.', status_code=400)

    # Create client for interfacing with Cloud Storage API
    client = storage.Client()
    bucket = client.get_bucket(global_vars.USER_IMG_BUCKET)

    bucket.delete_blob(path)

    u.profile_picture_path = None
    u.put()

    now = datetime.datetime.now()

    # Return response
    resp = jsonify({'picture_id deleted': path, 'date_deleted': now})
    resp.status_code = 200
    return resp
Example #4
0
def price(result):
    client = storage.Client()
    bucket = client.get_bucket('123iof')
    bucket.blob("updatedcsfc.xls")
    dfne = pd.read_excel('gs://123iof/updatedcfsc.xls')
    dfne = pd.DataFrame(dfne)
    dfne['ProductID'] = dfne["ProductID"].astype(str)
    currentdt = datetime.datetime.now()

    #serial_obj.close()
    global newlocdf
    newlocdf = dfne.loc[dfne['ProductID'] == str(result)]
    time = newlocdf['Time'].values
    temp = newlocdf['temperature'].values
    layout = go.Layout(title='Days vs Temperature',
                       xaxis=dict(title='Days'),
                       yaxis=dict(title='Temperature'))
    trace = go.Scatter(x=time, y=temp)

    data = [trace]

    fig = go.Figure(data=data, layout=layout)

    graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)

    return graphJSON
Example #5
0
def list_blobs_with_prefix(bucket_name, prefix, delimiter=None):
    """Lists all the blobs in the bucket that begin with the prefix.

    This can be used to list all blobs in a "folder", e.g. "public/".

    The delimiter argument can be used to restrict the results to only the
    "files" in the given "folder". Without the delimiter, the entire tree under
    the prefix is returned. For example, given these blobs:

        /a/1.txt
        /a/b/2.txt

    If you just specify prefix = '/a', you'll get back:

        /a/1.txt
        /a/b/2.txt

    However, if you specify prefix='/a' and delimiter='/', you'll get back:

        /a/1.txt

    """
    storage_client = storage.Client()
    bucket = storage_client.get_bucket(bucket_name)

    blobs = bucket.list_blobs(prefix=prefix, delimiter=delimiter)

    print('Blobs:')
    for blob in blobs:
        print(blob.name)

    if delimiter:
        print('Prefixes:')
        for prefix in blobs.prefixes:
            print(prefix)
Example #6
0
def combine():
    client = storage.Client(PROJECT_ID)
    bucket = client.bucket(PROJECT_ID + ".appspot.com")
    blobs = bucket.list_blobs()

    os.system("rm /tmp/*")

    names = []

    for blob in blobs:
        if "output" in blob.name:
            names.append(blob.name.encode('utf-8'))

    names.sort()

    with open('/tmp/combine.lst', 'w') as f1:
        for name in names:
            f1.write("file '/tmp/" + name + "'\n")
            download(name)

    logger.log_text("Worker: created combine list: /tmp/combine.lst")

    ret = os.system(
        "ffmpeg -f concat -safe 0 -i  /tmp/combine.lst -c copy /tmp/combined.mkv"
    )

    if ret:
        logger.log_text("Worker: combine failed: /tmp/combine.mkv - " +
                        str(ret).encode('utf-8'))
        return

    upload("combined.mkv")
Example #7
0
def setUpModule():
    Config.CLIENT = storage.Client()
    # %d rounds milliseconds to nearest integer.
    bucket_name = 'new%d' % (1000 * time.time(),)
    # In the **very** rare case the bucket name is reserved, this
    # fails with a ConnectionError.
    Config.TEST_BUCKET = Config.CLIENT.create_bucket(bucket_name)
Example #8
0
def get_fpl_data(req: Request) -> str:
    """Cloud function to save data from FPL website.

    Writes raw JSON to Google Cloud storage.

    Args:
      req: Flask Request automatically passed by gcp Functions. Is ignored.

    Returns:
      Filename that was written to Google Cloud storage

    """
    del req
    storage_client = storage.Client()

    logging.info("Beginning FPL data collection run.")

    response_content = _fpl_downloader()

    fpl_bucket = storage_client.bucket(FPL_BUCKET_NAME)

    fpl_blob_name = 'fpl_data_' + dt.datetime.utcnow().isoformat()
    new_blob = fpl_bucket.blob(fpl_blob_name)
    new_blob.upload_from_file(BytesIO(response_content), size=2048)

    logging.info(f"Data uploaded to {new_blob.name}.")

    return new_blob.name
Example #9
0
def update_file_cache(settings):
    ''' download JSON files from GCP Storage bucket, returns a list of the
    files that were downloaded/changed '''
    etags = load_json(settings['cache.dir'] + '/gcp/etags.json')

    #FIXME
    os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = settings['creds.dir'] + \
                                                   "/" + \
                                                   settings['creds.gcp.json']
    credentials = GoogleCredentials.get_application_default()
    client = storage.Client(project=settings['creds.gcp.project'],
                            credentials=credentials)
    bucket = client.get_bucket('exported-billing')

    LOG.debug("Checking for new/changed files.")
    changed = []
    for obj in bucket.list_blobs():
        filename = settings['cache.dir'] + '/gcp/' + obj.name
        if not os.path.exists(filename) or \
                filename not in etags or \
                obj.etag != etags[filename]:

            try:
                LOG.debug("Etags for %s: %s == %s", obj.name, obj.etag,
                          etags[filename])
            except KeyError:
                LOG.debug("Etag missing: %s", obj.name)

            LOG.info("Downloading: %s", obj.name)
            obj.download_to_filename(filename)
            etags[filename] = obj.etag
            changed.append(os.path.basename(filename))
    save_json(settings['cache.dir'] + '/gcp/etags.json', etags)
    return changed
Example #10
0
def new_listing_image(listing_id):
    # user_id = request.form['user_id']
    # listing_id = request.form['listing_id']
    userfile = request.files['userfile']
    filename = userfile.filename

    # Check if listing exists
    l = Listing.get_by_id(listing_id)
    if l is None:
        raise InvalidUsage('Listing does not exist!', status_code=400)

    # Create client for interfacing with Cloud Storage API
    client = storage.Client()
    bucket = client.get_bucket(global_vars.LISTING_IMG_BUCKET)

    # Calculating size this way is not very efficient. Is there another way?
    userfile.seek(0, 2)
    size = userfile.tell()
    userfile.seek(0)

    # upload the item image
    path = str(listing_id) + '/' + filename
    image = bucket.blob(blob_name=path)
    image.upload_from_file(file_obj=userfile,
                           size=size,
                           content_type='image/jpeg')

    # Hacky way of making the image public..
    image.acl.all().grant_read()
    image.acl.save()

    resp = jsonify({'image_path': path, 'image_media_link': image.media_link})
    resp.status_code = 201
    return resp
Example #11
0
    def post(self, request, *args, **kwargs):
        serializer = self.get_serializer(data=request.data)
        if not serializer.is_valid():
            return FailedResponse(status_message='Invalid request')
        report = Report.objects.filter(id=request.data["report_id"]).first()
        if not report:
            return FailedResponse(status_message='Report not found')
        credentials_dict = json.loads(base64.b64decode(settings.GCP_AUTH))
        credentials = ServiceAccountCredentials.from_json_keyfile_dict(
            credentials_dict)
        client = storage.Client(credentials=credentials,
                                project=settings.GCP_PROJECT_ID)
        bucket = client.get_bucket(settings.GCP_BUCKET_NAME)
        image = base64.b64decode(serializer.data.get('photo'))

        record = ReportPhoto(user_id=request.user.id,
                             report_id=request.data["report_id"])
        record.save()

        blob = bucket.blob(f'images/{record.id}.jpg', chunk_size=262144)
        blob.upload_from_string(image)
        blob.make_public()

        record.public_url = blob.public_url
        record.save()
        return SuccessResponse(status_message='Success',
                               data={
                                   "photo_id": record.id,
                                   "public_url": blob.public_url
                               })
Example #12
0
def upload_gcp_func(file, filename):
    credentials_dict = {
        "type":
        "service_account",
        "project_id":
        "hackator",
        "private_key_id":
        "<enter your private key id>",
        "private_key":
        "<enter private key>",
        "client_email":
        "*****@*****.**",
        "client_id":
        "<enter client id>",
        "auth_uri":
        "https://accounts.google.com/o/oauth2/auth",
        "token_uri":
        "https://oauth2.googleapis.com/token",
        "auth_provider_x509_cert_url":
        "https://www.googleapis.com/oauth2/v1/certs",
        "client_x509_cert_url":
        "https://www.googleapis.com/robot/v1/metadata/x509/koushikhack44%40hackator.iam.gserviceaccount.com"
    }
    credentials = ServiceAccountCredentials.from_json_keyfile_dict(
        credentials_dict)

    client = storage.Client(credentials=credentials, project='hackator')
    bucket = client.get_bucket('kbuckethack')
    blob = bucket.blob(filename)
    blob.upload_from_string(file)
def CategoryGenerator(bucket, file_path):
    client = storage.Client()
    bucket = client.get_bucket(bucket)
    src_url_file = bucket.get_blob(file_path)
    src_url_string = src_url_file.download_as_string()
    for category_name in src_url_string.decode('utf-8').split('\r\n'):
        yield Category(category_name)
def load(context, path, callback):
    if path.startswith('http'):  #_use_http_loader(context, url):
        logger.debug("[LOADER] load with http_loader")
        http_loader.load_sync(context,
                              path,
                              callback,
                              normalize_url_func=http_loader._normalize_url)
        return

    bucket_id = context.config.get("CLOUD_STORAGE_BUCKET_ID")
    project_id = context.config.get("CLOUD_STORAGE_PROJECT_ID")
    bucket = buckets[project_id].get(bucket_id, None)

    logger.debug("[LOADER] loading from bucket")

    if bucket is None:
        client = storage.Client(project_id)
        bucket = client.get_bucket(bucket_id)
        buckets[project_id][bucket_id] = bucket

    blob = bucket.get_blob(path)
    if blob:
        callback(blob.download_as_string())
    else:
        callback(blob)
Example #15
0
def upload_images():
    try:
        user_id = request.authorization.get('username')
        file = request.files['uploaded_file']
        filename = file.filename
        #folder = 'app/static/images/{0}/'.format(user_id)
        #completeName = folder+filename
        #dir = os.path.dirname(completeName)
        #if not os.path.exists(dir):
        #    os.makedirs(dir)
        #file.save(completeName)
        #file.close()
        completeName = "{0}-{1}".format(user_id, filename)
        # Create a Cloud Storage client.
        gcs = storage.Client()
        # Get the bucket that the file will be uploaded to.
        bucket = gcs.get_bucket(CLOUD_STORAGE_BUCKET)
        # Create a new blob and upload the file's content.
        blob = bucket.blob(completeName)
        blob.upload_from_string(file.read(), content_type=file.content_type)
        print "after file is uploaded::{0}".format(blob.public_url)
        # The public URL can be used to directly access the uploaded file via HTTP.
        return make_response(jsonify({'url': blob.public_url}), 200)
    except Exception, e:
        logging.error(str(e))
        abort(400)
Example #16
0
def testMoviePy():
    clips_array = []
    client = storage.Client('set-cloud-gaston')
    bucket = client.bucket('set-cloud-gaston.appspot.com')
    blob = bucket.blob("MM41916160062.mp4")
    tempFile = "/tmp/MM41916160062.mp4"

    print("testMoviePy - about to download {}".format(tempFile))

    with open(tempFile, 'w') as f:
        blob.download_to_file(f)
    print("testMoviePy - downloaded {}".format(tempFile))

    rgb = VideoFileClip(tempFile)
    collor_bars_1 = rgb.subclip(0, 150)
    content = rgb.subclip(150, 1419)
    collor_bars_2 = rgb.subclip(1419, 1517)
    print("testMoviePy - clip extraction done ")

    clips_array.append(content)
    print("testMoviePy - about to contatenate ")

    final = concatenate_videoclips(clips_array)
    print("testMoviePy - about to write video /tmp/MM41916160062_edited.mp4")

    final.write_videofile("/tmp/MM41916160062_edited.mp4")
    print("testMoviePy - /tmp/MM41916160062_edited.mp4 file created")

    blob = bucket.blob('MM41916160062_edited.mp4')
    print(
        "testMoviePy - about to upload to GC video as MM41916160062_edited.mp4"
    )
    blob.upload_from_file(open('/tmp/MM41916160062_edited.mp4'))
    print("testMoviePy - done succesfully")
Example #17
0
def setUpModule():
    Config.CLIENT = storage.Client()
    bucket_name = 'new' + unique_resource_id()
    # In the **very** rare case the bucket name is reserved, this
    # fails with a ConnectionError.
    Config.TEST_BUCKET = Config.CLIENT.bucket(bucket_name)
    retry_429(Config.TEST_BUCKET.create)()
Example #18
0
def transcode(messageAsString):
    #testMoviePy()
    print('inside def transcode(messageAsString) line 10')
    client = storage.Client('set-cloud-gaston')
    bucket = client.bucket('set-cloud-gaston.appspot.com')
    blob = bucket.blob(messageAsString)
    tempFile = "/tmp/" + messageAsString
    with open(tempFile, 'w') as f:
        print("about to download {}".format(tempFile))
        blob.download_to_file(f)
        print("{} downloaded".format(tempFile))

    # os.system('rm /tmp/output.webm')

    command = '/usr/bin/avconv -i ' + tempFile + ' -c:v libvpx -crf 10 -b:v 1M -c:a libvorbis /tmp/output.webm'
    print('executing OS command')
    print(command)
    ret = os.system(command)
    if ret:
        sys.stderr.write("FAILED")
        print('OS command failed')
        return "Failed"
    print('OS command finished successfully')
    blob = bucket.blob('youtube_demo.webm')
    blob.upload_from_file(open('/tmp/output.webm'))
    sys.stdout.write("SUCCESS")
    return "SUCCESS"
Example #19
0
 def _get_client(project_name):
     if project_name not in GS._clients:
         if storage is None:
             raise FileSystemNotSupported(
                 'Google Storage is not supported. Install "gcloud".')
         GS._clients[project_name] = storage.Client(project_name)
     return GS._clients[project_name]
Example #20
0
def start():
    #change this for the credentials
    os.environ[
        "GOOGLE_APPLICATION_CREDENTIALS"] = r"C:\\Users\\vince\Desktop\\MyGoogleCloudService\\linear-equator-253121-2b458fe691e7.json"

    #instantiating a client with a corresponded bucket
    client = storage.Client()
    bucketName = input("Enter Your Bucket:")
    try:
        bucket = client.get_bucket(bucketName)
    except Exception as e:
        print(e)
    bucketPath = input(
        "Enter Your disired Bucket Path(Format= \'[path]/\')\nIf no path, press Enter: "
    )
    #path for all the aduio files
    path = glob("../../data/audio/*.flac")
    #file name for google cloud
    fileNameGC = [
        bucketPath + name.replace("../../data/audio\\", "") for name in path
    ]

    #creating a tuple for the threadpool
    tuples = []
    for i in range(len(path)):
        tup = (fileNameGC[i], path[i])
        tuples.append(tup)
    for tup in tuples:
        printResult(upload(tup, bucket))
    print("All uploads are finished")
Example #21
0
def createdatabase():
    dfnew1 = dfnew[['Time', 'temperature', 'ItemName', 'price']]
    conn = sqlite3.connect('sense8.db')
    cursor = conn.cursor()
    print("Opened database successfully")
    try:
        dfnew1.to_sql('producttable', conn)
        dfnew.to_sql('sensortablefinalnew1', conn)

    except ValueError:
        print('already exists')
    sql = ''' INSERT INTO sensortablefinalnew1(ProductID,Time,temperature,ItemName,price,shelflife,calories,cholestrol_in_mg,pottasium,protiens,vitamin_A,vitamin_C,Calcium,Iron)
              VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
    #cur = conn.commit()
    #cur = conn.cursor()
    sql2 = ''' INSERT INTO producttable(ProductID,Time,temperature,ItemName,price)
                  VALUES(?,?,?,?,?)'''
    conn.executemany(sql, dfnew.to_records(index=True, ))
    conn.executemany(sql2, dfnew1.to_records(index=True, ))
    conn.commit()
    global dfsql
    dfsql = pd.read_sql_query("SELECT * FROM sensortablefinalnew1", conn)
    conn.close()
    dfsql.to_excel("cfscnew.xls")
    client = storage.Client()
    bucket = client.get_bucket('123iof')
    bucket.blob("newcfsc.xls")
    blob = bucket.blob('updatedcfsc.xls')
    with open('cfscnew.xls', 'rb') as csv:
        blob.upload_from_file(csv)

    return dfsql
Example #22
0
def main():
    connGCS = GCStorage(storage.Client())
    connBQ = GCBQ(bigquery.Client())
    bucketId = 'bigqbucket110211'
    datasetId = 'universal110211'
    tableId = 'UniversalBank'
    mybucket = connGCS.getGCBucket(connGCS.client, bucketId)
    #print(mybucket)
    if mybucket is None:
        mybucket = connGCS.createGCBucket(connGCS.client, bucketId)
    fileCopied = connGCS.copylocaltoGCPBucket(file_to_load_final, mybucket)
    if fileCopied:
        print('File Copied to GCP Bucket {}', format(mybucket))
    #print(mybucket.list_blobs())
    dataset_id = connBQ.getBQDsetId(datasetId, connBQ.client)
    #print(dataset_id)
    if dataset_id is None:
        dataset_id = connBQ.createDatasetBQ("{}.{}".format(connBQ.client.project, datasetId), connBQ.client)
    #print(dataset_id)
    table_id = connBQ.getBQTableId("{}.{}.{}".format(connBQ.client.project, dataset_id, tableId), connBQ.client)
    #print(table_id)
    if table_id is not None:
        table_id = connBQ.createTableBQ(tableId, datasetId, connBQ.client)
    #conn.getFileURI(mybucket)
    #print(mybucket.get_blob("{}.csv".format(tableId)).public_url)
    fileLoadedToBQ = connBQ.loaddataIntoBQ(connBQ.client, file_to_load_final, mybucket, tableId, datasetId, file_to_load_final)
Example #23
0
def blob_metadata(bucket_name, blob_name):
    """Prints out a blob's metadata."""
    storage_client = storage.Client()
    bucket = storage_client.get_bucket(bucket_name)
    blob = bucket.get_blob(blob_name)

    print('Blob: {}'.format(blob.name))
    print('Bucket: {}'.format(blob.bucket.name))
    print('Storage class: {}'.format(blob.storage_class))
    print('ID: {}'.format(blob.id))
    print('Size: {} bytes'.format(blob.size))
    print('Updated: {}'.format(blob.updated))
    print('Generation: {}'.format(blob.generation))
    print('Metageneration: {}'.format(blob.metageneration))
    print('Etag: {}'.format(blob.etag))
    print('Owner: {}'.format(blob.owner))
    print('Component count: {}'.format(blob.component_count))
    print('Crc32c: {}'.format(blob.crc32c))
    print('md5_hash: {}'.format(blob.md5_hash))
    print('Cache-control: {}'.format(blob.cache_control))
    print('Content-type: {}'.format(blob.content_type))
    print('Content-disposition: {}'.format(blob.content_disposition))
    print('Content-encoding: {}'.format(blob.content_encoding))
    print('Content-language: {}'.format(blob.content_language))
    print('Metadata: {}'.format(blob.metadata))
Example #24
0
def get_sdrf_info(project_id, bucket_name, disease_codes, header,
                  set_index_col, search_patterns):

    client = storage.Client(project_id)
    bucket = client.get_bucket(bucket_name)

    # connect to google cloud storage
    gcs = GcsConnector(project_id, bucket_name)

    sdrf_info = pd.DataFrame()
    for disease_code in disease_codes:
        for blob in bucket.list_blobs(prefix=disease_code):
            sdrf_filename = blob.name
            if not all(x in sdrf_filename for x in search_patterns):
                continue
            print(sdrf_filename)

            filebuffer = gcs.download_blob_to_file(sdrf_filename)
            # convert to a dataframe
            sdrf_df = convert_file_to_dataframe(filebuffer, skiprows=0)

            sdrf_df = cleanup_dataframe(sdrf_df)

            sdrf_df['Study'] = disease_code

            try:
                sdrf_df = sdrf_df.set_index(set_index_col)
            except:
                sdrf_df = sdrf_df.set_index("Derived_Array_Data_File")

            sdrf_info = sdrf_info.append(sdrf_df)

    print("Done loading SDRF files.")
    return sdrf_info
Example #25
0
def prepare_gs_default_test_bucket():
    # Check credentials are present: this procedure should not be
    # called otherwise.
    if no_real_gs_credentials():
        assert False

    bucket_name = bucket_name_mangle('waletdefwuy', delimiter='')

    conn = storage.Client()

    def _clean():
        bucket = conn.get_bucket(bucket_name)
        for blob in bucket.list_blobs():
            try:
                bucket.delete_blob(blob.path)
            except exceptions.NotFound:
                pass

    try:
        conn.create_bucket(bucket_name)
    except exceptions.Conflict:
        # Conflict: bucket already present.  Re-use it, but
        # clean it out first.
        pass

    _clean()

    return bucket_name
Example #26
0
def print_bucket_acl(bucket_name):
    """Prints out a bucket's access control list."""
    storage_client = storage.Client()
    bucket = storage_client.bucket(bucket_name)

    for entry in bucket.acl:
        print('{}: {}'.format(entry['role'], entry['entity']))
Example #27
0
 def __init__(self, project_name, bucket_name, credential_path):
     self.client = storage.Client(
         credentials=GCStorage.get_credentials(credential_path),
         project=project_name)
     self.bucket = self.client.get_bucket(bucket_name)
     self.bucket_name = bucket_name
     GCStorage.MONO = self
Example #28
0
def test_blob(cloud_config):
    """Provides a pre-existing blob in the test bucket."""
    bucket = storage.Client().bucket(cloud_config.storage_bucket)
    blob = bucket.blob('encryption_test_sigil')
    content = 'Hello, is it me you\'re looking for?'
    blob.upload_from_string(content,
                            encryption_key=TEST_ENCRYPTION_KEY_DECODED)
    return blob.name, content
Example #29
0
 def __init__(self, credentials, storage_bucket, requests):
     self.storage_bucket = "https://firebasestorage.googleapis.com/v0/b/" + storage_bucket
     self.credentials = credentials
     self.requests = requests
     self.path = ""
     if credentials:
         client = storage.Client(credentials=credentials, project=storage_bucket)
         self.bucket = client.get_bucket(storage_bucket)
Example #30
0
def to_storage(bucket, bucket_folder, file_name, path_to_file):

    client = storage.Client(project="gavinete-sv")
    bucket = client.get_bucket(f"{bucket}")
    blob = bucket.blob(f"{bucket_folder}/{file_name}")
    blob.upload_from_filename(f"{path_to_file}")

    print("Done!")