Beispiel #1
0
def run(job, **kwargs):
    resource = kwargs.get('resource')
    create_custom_fields_as_needed()

    storage_account = '{{ storage_account }}'
    file = "{{ file }}"
    azure_storage_file_share_name = '{{ azure_storage_file_share_name }}'
    file_name = Path(file).name
    if file.startswith(settings.MEDIA_URL):
        set_progress("Converting relative URL to filesystem path")
        file = file.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)

    account_key = Resource.objects.filter(name__icontains='{{ storage_account }}')[0].azure_account_key
    fallback_account_key = Resource.objects.filter(name__icontains="{{ storage_account }}")[0].azure_account_key_fallback

    set_progress("Connecting To Azure...")
    file_service = FileService(account_name=storage_account, account_key=account_key)

    set_progress('Creating a file share...')
    file_service.create_share(share_name=azure_storage_file_share_name, quota=1)

    set_progress('Creating a file...')
    if file_service.exists(share_name=azure_storage_file_share_name, file_name=file_name, directory_name=''):
        file_service.create_file_from_path(share_name=azure_storage_file_share_name, file_name=file_name, directory_name='', local_file_path=file)
        return "WARNING", "File with this name already exists", "The file will be updated."
    else:
        file_service.create_file_from_path(share_name=azure_storage_file_share_name, file_name=file_name, directory_name='', local_file_path=file)
        resource.name = azure_storage_file_share_name + '-' + file_name
        resource.azure_storage_account_name = storage_account
        resource.azure_account_key = account_key
        resource.azure_account_key_fallback = fallback_account_key
        resource.azure_storage_file_share_name = azure_storage_file_share_name
        resource.azure_storage_file_name = file_name
        resource.save()
    return "Success", "The File has succesfully been uploaded", ""
Beispiel #2
0
def _configure_auto_storage(cli_ctx, location):
    """Configures auto storage account for the cluster

    :param str location: location for the auto-storage account.
    :return (str, str): a tuple with auto storage account name and key.
    """
    from azure.mgmt.resource.resources.models import ResourceGroup
    from azure.storage.file import FileService
    from azure.storage.blob import BlockBlobService
    resource_group = _get_auto_storage_resource_group()
    resource_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
    if resource_client.resource_groups.check_existence(resource_group):
        logger.warning('BatchAI will use existing %s resource group for auto-storage account',
                       resource_group)
    else:
        logger.warning('Creating %s resource for auto-storage account', resource_group)
        resource_client.resource_groups.create_or_update(
            resource_group, ResourceGroup(location=location))
    storage_client = _get_storage_management_client(cli_ctx)
    account = None
    for a in storage_client.storage_accounts.list_by_resource_group(resource_group):
        if a.primary_location == location.lower().replace(' ', ''):
            account = a.name
            logger.warning('Using existing %s storage account as an auto-storage account', account)
            break
    if account is None:
        account = _create_auto_storage_account(storage_client, resource_group, location)
        logger.warning('Created auto storage account %s', account)
    key = _get_storage_account_key(cli_ctx, account, None)
    file_service = FileService(account, key)
    file_service.create_share(AUTO_STORAGE_SHARE_NAME, fail_on_exist=False)
    blob_service = BlockBlobService(account, key)
    blob_service.create_container(AUTO_STORAGE_CONTAINER_NAME, fail_on_exist=False)
    return account, key
Beispiel #3
0
    def create_file_share(self, storage_account_name: str, share_name: str,
                          size: int, key: str) -> FileService:
        self.logger.info("Creating file share")
        file_service = FileService(account_name=storage_account_name,
                                   account_key=key)
        file_service.create_share(share_name, quota=size)

        return file_service
def prepare_azure_file_share_service(config, dataset_directory='dataset_directory'):
    # Create a file share
    service = FileService(config.storage_account_name, config.storage_account_key)
    service.create_share(config.workspace_file_share, fail_on_exist=False)

    # Create a directory in the file share
    service.create_directory(config.workspace_file_share, dataset_directory, fail_on_exist=False)

    return service
Beispiel #5
0
def create_azure_fileshare(share_prefix, account_name, account_key):
    """
    Generate a unique share name to avoid overlaps in shared infra
    :param share_prefix:
    :param account_name:
    :param account_key:
    :return:
    """

    # FIXME - Need to remove hardcoded directoty link below

    d_dir = './WebInDeploy/bootstrap'
    share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4()))
    print('using share_name of: {}'.format(share_name))

    # archive_file_path = _create_archive_directory(files, share_prefix)

    try:
        # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this
        s = requests.Session()
        s.verify = False

        file_service = FileService(account_name=account_name,
                                   account_key=account_key,
                                   request_session=s)

        # print(file_service)
        if not file_service.exists(share_name):
            file_service.create_share(share_name)

        for d in ['config', 'content', 'software', 'license']:
            print('creating directory of type: {}'.format(d))
            if not file_service.exists(share_name, directory_name=d):
                file_service.create_directory(share_name, d)

            # FIXME - We only handle bootstrap files.  May need to handle other dirs

            if d == 'config':
                for filename in os.listdir(d_dir):
                    print('creating file: {0}'.format(filename))
                    file_service.create_file_from_path(
                        share_name, d, filename, os.path.join(d_dir, filename))

    except AttributeError as ae:
        # this can be returned on bad auth information
        print(ae)
        return "Authentication or other error creating bootstrap file_share in Azure"

    except AzureException as ahe:
        print(ahe)
        return str(ahe)
    except ValueError as ve:
        print(ve)
        return str(ve)

    print('all done')
    return share_name
def _create_file_share(storage_account, storage_account_key):
    """Creates Azure Files in the storage account to be mounted into a cluster

    :param str storage_account: name of the storage account.
    :param str storage_account_key: storage account key.
    """
    if storage_account == FAKE_STORAGE.name:
        return
    service = FileService(storage_account, storage_account_key)
    service.create_share(AZURE_FILES_NAME)
Beispiel #7
0
    def _create_file_share(storage_account, storage_account_key):
        """Creates Azure Files in the storage account to be mounted into a cluster

        :param str storage_account: name of the storage account.
        :param str storage_account_key: storage account key.
        """
        if storage_account == Helpers.FAKE_STORAGE.name:
            return
        service = FileService(storage_account, storage_account_key)
        service.create_share(Helpers.AZURE_FILES_NAME)
Beispiel #8
0
def file():
    static_dir_path = "D:\home\site\wwwroot\static"
    static_file_dir_path = static_dir_path + '\\' + 'files'
    account_name = 'hanastragetest'
    account_key = 'acount_key'
    root_share_name = 'root'
    share_name = 'images'
    directory_url = 'https://hanastragetest.file.core.windows.net/' + root_share_name + '/' + share_name

    # create local save directory
    if os.path.exist(static_file_dir_path) is False:
        os.mkdir(static_file_dir_path)

    file_service = FileService(account_name=account_name,
                               account_key=account_key)
    # create share
    file_service.create_share(root_share_name)

    # create directory
    file_service.create_directory(root_share_name, share_name)

    files = os.listdir(static_dir_path)
    for file in files:
        # delete
        if file_service.exists(root_share_name, share_name, file):
            file_service.delete_file(root_share_name, share_name, file)

        # file upload
        file_service.create_file_from_path(
            root_share_name,
            share_name,  # We want to create this blob in the root directory, so we specify None for the directory_name
            file,
            static_dir_path + '\\' + file,
            content_settings=ContentSettings(content_type='image/png'))

    generator = file_service.list_directories_and_files(
        root_share_name, share_name)

    html = ""
    for file in generator:
        # file download
        file_save_path = static_file_dir_path + '\\' + file
        file_service.get_file_to_path(root_share_name, share_name, file,
                                      file_save_path)
        html = "{}<img src='{}'>".format(html, file_save_path)

    result = {
        "result": True,
        "data": {
            "file_or_dir_name":
            [file_or_dir.name for file_or_dir in generator]
        }
    }
    return make_response(json.dumps(result, ensure_ascii=False) + html)
Beispiel #9
0
def create_and_attach_file_storage(cfg, ws):
    if len(cfg.DataReference.localDirectoryFilesList) > 0:
        for ref in cfg.DataReference.localDirectoryFilesList:
            log.info("Attempting to create file share '%s' on storage account '%s'.", ref.remoteFileShare, ref.storageAccountName)
            file_service = FileService(ref.storageAccountName, ref.storageAccountKey)
            exist = file_service.create_share(ref.remoteFileShare, fail_on_exist=False)
            if exist:
                log.info("File Share '%s' on storage account '%s' created.", ref.remoteFileShare, ref.storageAccountName)
            else:
                log.info("File Share '%s' on storage account '%s' already existed.", ref.remoteFileShare, ref.storageAccountName)
            # Get most recent list of datastores linked to current workspace
            datastores = ws.datastores()
            # Validate if share_ds is created
            ds = None if ref.dataref_id not in datastores else Datastore(workspace = ws, name = ref.dataref_id)
            # Register the DS to the workspace
            if ds:
                if ds.account_name == ref.storageAccountName and ds.container_name == ref.remoteFileShare:
                    recreate = False
                else:
                    recreate = True
                    # also remove the existing reference
                    ds.unregister()
            else:
                recreate = True
            if recreate:
                log.info('Registering file share "{}" to AML datastore for AML workspace "{}" under datastore id "{}".'.format(ref.remoteFileShare, ws.name, ref.dataref_id))
                ds = Datastore.register_azure_file_share(workspace = ws,
                                                    datastore_name = ref.dataref_id, 
                                                    file_share_name = ref.remoteFileShare, 
                                                    account_name = ref.storageAccountName, 
                                                    account_key= ref.storageAccountKey,
                                                    overwrite=True,
                                                    )
            else:
                log.info('File share "{}" under AML workspace "{}" already registered under datastore id "{}".'.format(ref.remoteFileShare, ws.name, ref.dataref_id))
Beispiel #10
0
def shares():
    # Create Container and Share
    global storage_account_key, blob_service, blob_share, file_service, file_share
    sak = storage_client.storage_accounts.list_keys(resourcegroupname,
                                                    storageaccountname)
    storage_account_key = sak.keys[0].value
    cloudstorage_client = CloudStorageAccount(storageaccountname,
                                              storage_account_key)
    blob_service = cloudstorage_client.create_block_blob_service()
    blob_share = blob_service.create_container(
        sharename, public_access=PublicAccess.Container)
    file_service = FileService(account_name=storageaccountname,
                               account_key=storage_account_key)
    file_share = file_service.create_share(sharename)
    # Copy Setup Files to Container and Share
    blob_service.create_blob_from_path(
        sharename,
        filename,
        filename,
    )
    file_service.create_file_from_path(
        sharename,
        '',
        filename,
        filename,
    )
Beispiel #11
0
class FileShare(object):
    """
        Information from Azure files service
    """
    def __init__(self, account):
        self.account_name = account.storage_name()
        self.account_key = account.storage_key()
        self.files = FileService(
            self.account_name, self.account_key
        )

    def list(self):
        """
            list file shares
        """
        result = []
        try:
            for share in self.files.list_shares():
                result.append(format(share.name))
        except Exception as e:
            raise AzureFileShareListError(
                '%s: %s' % (type(e).__name__, format(e))
            )
        return result

    def create(self, share_name):
        """
            create a file share
        """
        try:
            self.files.create_share(share_name)
        except Exception as e:
            raise AzureFileShareCreateError(
                '%s: %s' % (type(e).__name__, format(e))
            )

    def delete(self, share_name):
        """
            delete a file share
        """
        try:
            self.files.delete_share(share_name)
        except Exception as e:
            raise AzureFileShareDeleteError(
                '%s: %s' % (type(e).__name__, format(e))
            )
    def upload_file(self, path):
        """Upload a file into the default share on the storage account.

        If the share doesn't exist, create it first.
        """
        file_service = FileService(
            account_name=self.account.name,
            account_key=self.key,
        )
        file_service.create_share(self.default_share)
        file_service.create_file_from_path(
            self.default_share,
            None,
            os.path.basename(path),
            path,
        )
        return '/'.join([self.default_share, os.path.basename(path)])
def create_azure_fileshare(files, share_prefix, account_name, account_key):
    # generate a unique share name to avoid overlaps in shared infra
    share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4()))
    print('using share_name of: {}'.format(share_name))

    archive_file_path = _create_archive_directory(files, share_prefix)

    try:
        # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this
        s = requests.Session()
        s.verify = False

        file_service = FileService(account_name=account_name,
                                   account_key=account_key,
                                   request_session=s)

        # print(file_service)
        if not file_service.exists(share_name):
            file_service.create_share(share_name)

        for d in ['config', 'content', 'software', 'license']:
            print('creating directory of type: {}'.format(d))
            if not file_service.exists(share_name, directory_name=d):
                file_service.create_directory(share_name, d)

            d_dir = os.path.join(archive_file_path, d)
            for filename in os.listdir(d_dir):
                print('creating file: {0}'.format(filename))
                file_service.create_file_from_path(
                    share_name, d, filename, os.path.join(d_dir, filename))

    except AttributeError as ae:
        # this can be returned on bad auth information
        print(ae)
        return "Authentication or other error creating bootstrap file_share in Azure"

    except AzureException as ahe:
        print(ahe)
        return str(ahe)
    except ValueError as ve:
        print(ve)
        return str(ve)

    print('all done')
    return 'Azure file-share {} created successfully'.format(share_name)
Beispiel #14
0
def saveModel(customer, modelName, model, storage_account_name,
              storage_account_key):
    fileService = FileService(account_name=storage_account_name,
                              account_key=storage_account_key)
    if not fileService.exists('trainedmodels', customer):
        fileService.create_share('trainedmodels')
        fileService.create_directory('trainedmodels', customer)

    if not fileService.exists('trainedmodels', customer + '/' + modelName):
        fileService.create_directory('trainedmodels',
                                     customer + '/' + modelName)

    modelPickle = pickle.dumps(model)
    timestr = time.strftime('%Y%m%d-%H%M%S')
    fileName = modelName + '_' + timestr + '.pkl'
    fileService.create_file_from_bytes('trainedmodels',
                                       customer + '/' + modelName, fileName,
                                       modelPickle)
    print(fileName + ' saved.')
import zipfile
from azure.storage.blob import BlockBlobService
from azure.storage.blob import PublicAccess
from azure.storage.file import FileService

ACCOUNT_NAME = os.environ['StorageAccountName']
ACCOUNT_KEY = os.environ['StorageAccountKey']
CONTAINER_NAME = os.environ['TelemetryContainerName']

az_blob_service = BlockBlobService(account_name=ACCOUNT_NAME,
                                   account_key=ACCOUNT_KEY)

az_blob_service.create_container(CONTAINER_NAME, fail_on_exist=False)

file_service = FileService(account_name=ACCOUNT_NAME, account_key=ACCOUNT_KEY)
file_service.create_share(share_name='model', quota=1)

source = os.environ['ModelZipUrl']
dest = 'model.zip'

urllib.request.urlretrieve(source, dest)

with zipfile.ZipFile(dest, "r") as zip_ref:
    zip_ref.extractall("model")

for root, dirs, files in os.walk('model', topdown=True):
    directory = os.path.relpath(root, 'model')
    if directory != '.':
        file_service.create_directory('model', directory)
    for f in files:
        file_service.create_file_from_path('model', directory, f,
Beispiel #16
0
    smtp = smtplib.SMTP(host="smtp.gmail.com", port=587)
    smtp.starttls()
    smtp.login(username, password)
    smtp.sendmail(send_from, send_to, msg.as_string())
    smtp.close()
    print('Email sent')


# Gmail login details
username = '******'
password = '******'
default_address = []

# Login Details for Azure Storage
file_service = FileService(account_name='killianoneachtain', account_key='KEY')
file_service.create_share('security')
file_service.create_directory('security', 'securityPhotos')

cwd = os.getcwd()  # Get the current working directory (cwd)
path = cwd + "/securityPhotos"
# change file permissions
access_rights = 0o755

# create a photo directory if none exists
try:
    os.mkdir(path, access_rights)
except OSError:
    print("Creation of the directory %s failed. \nFolder already exists!" %
          path)
else:
    print("Successfully created the directory %s " % path)
Beispiel #17
0
def main(path, debug, remote_directory, typhoonname):
    initialize.setup_cartopy()
    start_time = datetime.now()
    print(
        '---------------------AUTOMATION SCRIPT STARTED---------------------------------'
    )
    print(str(start_time))
    #%% check for active typhoons
    print(
        '---------------------check for active typhoons---------------------------------'
    )
    print(str(start_time))
    remote_dir = remote_directory
    if debug:
        typhoonname = 'SURIGAE'
        remote_dir = '20210421120000'
        logger.info(f"DEBUGGING piepline for typhoon{typhoonname}")
        Activetyphoon = [typhoonname]
    else:
        # If passed typhoon name is None or empty string
        if not typhoonname:
            Activetyphoon = Check_for_active_typhoon.check_active_typhoon()
            if not Activetyphoon:
                logger.info("No active typhoon in PAR stop pipeline")
                sys.exit()
            logger.info(f"Running on active Typhoon(s) {Activetyphoon}")
        else:
            Activetyphoon = [typhoonname]
            remote_dir = remote_directory
            logger.info(f"Running on custom Typhoon {Activetyphoon}")

    Alternative_data_point = (start_time -
                              timedelta(hours=24)).strftime("%Y%m%d")

    date_dir = start_time.strftime("%Y%m%d%H")
    Input_folder = os.path.join(path, f'forecast/Input/{date_dir}/Input/')
    Output_folder = os.path.join(path, f'forecast/Output/{date_dir}/Output/')

    if not os.path.exists(Input_folder):
        os.makedirs(Input_folder)
    if not os.path.exists(Output_folder):
        os.makedirs(Output_folder)
    #download NOAA rainfall
    try:
        #Rainfall_data_window.download_rainfall_nomads(Input_folder,path,Alternative_data_point)
        Rainfall_data.download_rainfall_nomads(Input_folder, path,
                                               Alternative_data_point)
        rainfall_error = False
    except:
        traceback.print_exc()
        #logger.warning(f'Rainfall download failed, performing download in R script')
        logger.info(
            f'Rainfall download failed, performing download in R script')
        rainfall_error = True
    ###### download UCL data

    try:
        ucl_data.create_ucl_metadata(path, os.environ['UCL_USERNAME'],
                                     os.environ['UCL_PASSWORD'])
        ucl_data.process_ucl_data(path, Input_folder,
                                  os.environ['UCL_USERNAME'],
                                  os.environ['UCL_PASSWORD'])
    except:
        logger.info(f'UCL download failed')
    #%%
    ##Create grid points to calculate Winfield
    cent = Centroids()
    cent.set_raster_from_pnt_bounds((118, 6, 127, 19), res=0.05)
    #this option is added to make the script scaleable globally To Do
    #cent.set_raster_from_pnt_bounds((LonMin,LatMin,LonMax,LatMax), res=0.05)
    cent.check()
    cent.plot()
    ####
    admin = gpd.read_file(
        os.path.join(path, "./data-raw/phl_admin3_simpl2.geojson"))
    df = pd.DataFrame(data=cent.coord)
    df["centroid_id"] = "id" + (df.index).astype(str)
    centroid_idx = df["centroid_id"].values
    ncents = cent.size
    df = df.rename(columns={0: "lat", 1: "lon"})
    df = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon, df.lat))
    #df.to_crs({'init': 'epsg:4326'})
    df.crs = {'init': 'epsg:4326'}
    df_admin = sjoin(df, admin, how="left").dropna()

    # Sometimes the ECMWF ftp server complains about too many requests
    # This code allows several retries with some sleep time in between
    n_tries = 0
    while True:
        try:
            logger.info("Downloading ECMWF typhoon tracks")
            bufr_files = TCForecast.fetch_bufr_ftp(remote_dir=remote_dir)
            fcast = TCForecast()
            fcast.fetch_ecmwf(files=bufr_files)
        except ftplib.all_errors as e:
            n_tries += 1
            if n_tries >= ECMWF_MAX_TRIES:
                logger.error(
                    f' Data downloading from ECMWF failed: {e}, '
                    f'reached limit of {ECMWF_MAX_TRIES} tries, exiting')
                sys.exit()
            logger.error(
                f' Data downloading from ECMWF failed: {e}, retrying after {ECMWF_SLEEP} s'
            )
            time.sleep(ECMWF_SLEEP)
            continue
        break

    #%% filter data downloaded in the above step for active typhoons  in PAR
    # filter tracks with name of current typhoons and drop tracks with only one timestep
    fcast.data = [
        track_data_clean.track_data_clean(tr) for tr in fcast.data
        if (tr.time.size > 1 and tr.name in Activetyphoon)
    ]

    # fcast.data = [tr for tr in fcast.data if tr.name in Activetyphoon]
    # fcast.data = [tr for tr in fcast.data if tr.time.size>1]
    for typhoons in Activetyphoon:
        #typhoons=Activetyphoon[0]
        logger.info(f'Processing data {typhoons}')
        fname = open(
            os.path.join(path, 'forecast/Input/',
                         "typhoon_info_for_model.csv"), 'w')
        fname.write('source,filename,event,time' + '\n')
        if not rainfall_error:
            line_ = 'Rainfall,' + '%srainfall' % Input_folder + ',' + typhoons + ',' + date_dir  #StormName #
            fname.write(line_ + '\n')

        line_ = 'Output_folder,' + '%s' % Output_folder + ',' + typhoons + ',' + date_dir  #StormName #
        #line_='Rainfall,'+'%sRainfall/' % Input_folder +','+ typhoons + ',' + date_dir #StormName #
        fname.write(line_ + '\n')

        #typhoons='SURIGAE'  # to run it manually for any typhoon
        # select windspeed for HRS model

        fcast.data = [tr for tr in fcast.data if tr.name == typhoons]
        tr_HRS = [tr for tr in fcast.data if (tr.is_ensemble == 'False')]

        if tr_HRS != []:
            HRS_SPEED = (tr_HRS[0].max_sustained_wind.values / 0.84).tolist(
            )  ############# 0.84 is conversion factor for ECMWF 10MIN TO 1MIN AVERAGE
            dfff = tr_HRS[0].to_dataframe()
            dfff[['VMAX', 'LAT',
                  'LON']] = dfff[['max_sustained_wind', 'lat', 'lon']]
            dfff['YYYYMMDDHH'] = dfff.index.values
            dfff['YYYYMMDDHH'] = dfff['YYYYMMDDHH'].apply(
                lambda x: x.strftime("%Y%m%d%H%M"))
            dfff['STORMNAME'] = typhoons
            dfff[['YYYYMMDDHH', 'VMAX', 'LAT', 'LON',
                  'STORMNAME']].to_csv(os.path.join(Input_folder,
                                                    'ecmwf_hrs_track.csv'),
                                       index=False)
            line_ = 'ecmwf,' + '%secmwf_hrs_track.csv' % Input_folder + ',' + typhoons + ',' + date_dir  #StormName #
            #line_='Rainfall,'+'%sRainfall/' % Input_folder +','+ typhoons + ',' + date_dir #StormName #
            fname.write(line_ + '\n')
            # Adjust track time step
            data_forced = [
                tr.where(tr.time <= max(tr_HRS[0].time.values), drop=True)
                for tr in fcast.data
            ]
            # data_forced = [track_data_clean.track_data_force_HRS(tr,HRS_SPEED) for tr in data_forced] # forced with HRS windspeed

            #data_forced= [track_data_clean.track_data_clean(tr) for tr in fcast.data] # taking speed of ENS
            # interpolate to 3h steps from the original 6h
            #fcast.equal_timestep(3)
        else:
            len_ar = np.min([len(var.lat.values) for var in fcast.data])
            lat_ = np.ma.mean([var.lat.values[:len_ar] for var in fcast.data],
                              axis=0)
            lon_ = np.ma.mean([var.lon.values[:len_ar] for var in fcast.data],
                              axis=0)
            YYYYMMDDHH = pd.date_range(fcast.data[0].time.values[0],
                                       periods=len_ar,
                                       freq="H")
            vmax_ = np.ma.mean(
                [var.max_sustained_wind.values[:len_ar] for var in fcast.data],
                axis=0)
            d = {
                'YYYYMMDDHH': YYYYMMDDHH,
                "VMAX": vmax_,
                "LAT": lat_,
                "LON": lon_
            }
            dfff = pd.DataFrame(d)
            dfff['STORMNAME'] = typhoons
            dfff['YYYYMMDDHH'] = dfff['YYYYMMDDHH'].apply(
                lambda x: x.strftime("%Y%m%d%H%M"))
            dfff[['YYYYMMDDHH', 'VMAX', 'LAT', 'LON',
                  'STORMNAME']].to_csv(os.path.join(Input_folder,
                                                    'ecmwf_hrs_track.csv'),
                                       index=False)
            line_ = 'ecmwf,' + '%secmwf_hrs_track.csv' % Input_folder + ',' + typhoons + ',' + date_dir  #StormName #
            #line_='Rainfall,'+'%sRainfall/' % Input_folder +','+ typhoons + ',' + date_dir #StormName #
            fname.write(line_ + '\n')
            data_forced = fcast.data

        # calculate windfields for each ensamble
        threshold = 0  #(threshold to filter dataframe /reduce data )
        df = pd.DataFrame(data=cent.coord)
        df["centroid_id"] = "id" + (df.index).astype(str)
        centroid_idx = df["centroid_id"].values
        ncents = cent.size
        df = df.rename(columns={0: "lat", 1: "lon"})

        #calculate wind field for each ensamble members
        list_intensity = []
        distan_track = []
        for tr in data_forced:
            logger.info(
                f"Running on ensemble # {tr.ensemble_number} for typhoon {tr.name}"
            )
            track = TCTracks()
            typhoon = TropCyclone()
            track.data = [tr]
            #track.equal_timestep(3)
            tr = track.data[0]
            typhoon.set_from_tracks(track, cent, store_windfields=True)
            # Make intensity plot using the high resolution member
            if tr.is_ensemble == 'False':
                logger.info("High res member: creating intensity plot")
                plot_intensity.plot_inensity(typhoon=typhoon,
                                             event=tr.sid,
                                             output_dir=Output_folder,
                                             date_dir=date_dir,
                                             typhoon_name=tr.name)
            windfield = typhoon.windfields
            nsteps = windfield[0].shape[0]
            centroid_id = np.tile(centroid_idx, nsteps)
            intensity_3d = windfield[0].toarray().reshape(nsteps, ncents, 2)
            intensity = np.linalg.norm(intensity_3d, axis=-1).ravel()
            timesteps = np.repeat(track.data[0].time.values, ncents)
            #timesteps = np.repeat(tr.time.values, ncents)
            timesteps = timesteps.reshape((nsteps, ncents)).ravel()
            inten_tr = pd.DataFrame({
                'centroid_id': centroid_id,
                'value': intensity,
                'timestamp': timesteps,
            })
            inten_tr = inten_tr[inten_tr.value > threshold]
            inten_tr['storm_id'] = tr.sid
            inten_tr['ens_id'] = tr.sid + '_' + str(tr.ensemble_number)
            inten_tr['name'] = tr.name
            inten_tr = (pd.merge(inten_tr,
                                 df_admin,
                                 how='outer',
                                 on='centroid_id').dropna().groupby(
                                     ['adm3_pcode', 'ens_id'],
                                     as_index=False).agg(
                                         {"value": ['count', 'max']}))
            inten_tr.columns = [
                x for x in ['adm3_pcode', 'storm_id', 'value_count', 'v_max']
            ]
            list_intensity.append(inten_tr)
            distan_track1 = []
            for index, row in df.iterrows():
                dist = np.min(
                    np.sqrt(
                        np.square(tr.lat.values - row['lat']) +
                        np.square(tr.lon.values - row['lon'])))
                distan_track1.append(dist * 111)
            dist_tr = pd.DataFrame({
                'centroid_id': centroid_idx,
                'value': distan_track1
            })
            dist_tr['storm_id'] = tr.sid
            dist_tr['name'] = tr.name
            dist_tr['ens_id'] = tr.sid + '_' + str(tr.ensemble_number)
            dist_tr = (pd.merge(dist_tr,
                                df_admin,
                                how='outer',
                                on='centroid_id').dropna().groupby(
                                    ['adm3_pcode', 'name', 'ens_id'],
                                    as_index=False).agg({'value': 'min'}))
            dist_tr.columns = [
                x for x in ['adm3_pcode', 'name', 'storm_id', 'dis_track_min']
            ]  # join_left_df_.columns.ravel()]
            distan_track.append(dist_tr)
        df_intensity_ = pd.concat(list_intensity)
        distan_track1 = pd.concat(distan_track)

        typhhon_df = pd.merge(df_intensity_,
                              distan_track1,
                              how='left',
                              on=['adm3_pcode', 'storm_id'])

        typhhon_df.to_csv(os.path.join(Input_folder, 'windfield.csv'),
                          index=False)

        line_ = 'windfield,' + '%swindfield.csv' % Input_folder + ',' + typhoons + ',' + date_dir  #StormName #
        #line_='Rainfall,'+'%sRainfall/' % Input_folder +','+ typhoons + ',' + date_dir #StormName #
        fname.write(line_ + '\n')
        fname.close()

        #############################################################
        #### Run IBF model
        #############################################################
        os.chdir(path)

        if platform == "linux" or platform == "linux2":  #check if running on linux or windows os
            # linux
            try:
                p = subprocess.check_call(
                    ["Rscript", "run_model_V2.R",
                     str(rainfall_error)])
            except subprocess.CalledProcessError as e:
                logger.error(f'failed to excute R sript')
                raise ValueError(str(e))
        elif platform == "win32":  #if OS is windows edit the path for Rscript
            try:
                p = subprocess.check_call([
                    "C:/Program Files/R/R-4.1.0/bin/Rscript", "run_model_V2.R",
                    str(rainfall_error)
                ])
            except subprocess.CalledProcessError as e:
                logger.error(f'failed to excute R sript')
                raise ValueError(str(e))

        #############################################################
        # send email in case of landfall-typhoon
        #############################################################

        image_filenames = list(Path(Output_folder).glob('*.png'))
        data_filenames = list(Path(Output_folder).glob('*.csv'))

        if image_filenames or data_filenames:
            message_html = """\
            <html>
            <body>
            <h1>IBF model run result </h1>
            <p>Please find attached a map and data with updated model run</p>
            <img src="cid:Impact_Data">
            </body>
            </html>
            """
            Sendemail.sendemail(
                smtp_server=os.environ["SMTP_SERVER"],
                smtp_port=int(os.environ["SMTP_PORT"]),
                email_username=os.environ["EMAIL_LOGIN"],
                email_password=os.environ["EMAIL_PASSWORD"],
                email_subject='Updated impact map for a new Typhoon in PAR',
                from_address=os.environ["EMAIL_FROM"],
                to_address_list=os.environ["EMAIL_TO_LIST"].split(','),
                cc_address_list=os.environ["EMAIL_CC_LIST"].split(','),
                message_html=message_html,
                filename_list=image_filenames + data_filenames)
        else:
            raise FileNotFoundError(
                f'No .png or .csv found in {Output_folder}')
            ##################### upload model output to 510 datalack ##############

        file_service = FileService(
            account_name=os.environ["AZURE_STORAGE_ACCOUNT"],
            protocol='https',
            connection_string=os.environ["AZURE_CONNECTING_STRING"])
        file_service.create_share('forecast')
        OutPutFolder = date_dir
        file_service.create_directory('forecast', OutPutFolder)

        for img_file in image_filenames:
            file_service.create_file_from_path(
                'forecast',
                OutPutFolder,
                os.fspath(img_file.parts[-1]),
                img_file,
                content_settings=ContentSettings(content_type='image/png'))

        for data_file in data_filenames:
            file_service.create_file_from_path(
                'forecast',
                OutPutFolder,
                os.fspath(data_file.parts[-1]),
                data_file,
                content_settings=ContentSettings(content_type='text/csv'))

        ##################### upload model input(Rainfall+wind intensity) to 510 datalack ##############
        # To DO

    print(
        '---------------------AUTOMATION SCRIPT FINISHED---------------------------------'
    )
    print(str(datetime.now()))
class AzureFileWriter(FilebaseBaseWriter):
    """
    Writes items to azure file shares. It is a File Based writer, so it has filebase
    option available

        - account_name (str)
            Public acces name of the azure account.

        - account_key (str)
            Public acces key to the azure account.

        - share (str)
            File share name.

        - filebase (str)
            Base path to store the items in the share.

    """
    supported_options = {
        'account_name': {
            'type': six.string_types,
            'env_fallback': 'EXPORTERS_AZUREWRITER_NAME'
        },
        'account_key': {
            'type': six.string_types,
            'env_fallback': 'EXPORTERS_AZUREWRITER_KEY'
        },
        'share': {
            'type': six.string_types
        }
    }

    def __init__(self, options, meta, *args, **kw):
        from azure.storage.file import FileService
        super(AzureFileWriter, self).__init__(options, meta, *args, **kw)
        account_name = self.read_option('account_name')
        account_key = self.read_option('account_key')
        self.azure_service = FileService(account_name, account_key)
        self.share = self.read_option('share')
        self.azure_service.create_share(self.share)
        self.logger.info('AzureWriter has been initiated.'
                         'Writing to share {}'.format(self.share))
        self.set_metadata('files_counter', Counter())
        self.set_metadata('files_written', [])

    def write(self, dump_path, group_key=None, file_name=None):
        if group_key is None:
            group_key = []
        self._write_file(dump_path, group_key, file_name)

    def _update_metadata(self, dump_path, filebase_path, file_name):
        buffer_info = self.write_buffer.metadata[dump_path]
        file_info = {
            'file_name': file_name,
            'filebase_path': filebase_path,
            'size': buffer_info['size'],
            'number_of_records': buffer_info['number_of_records']
        }
        files_written = self.get_metadata('files_written')
        files_written.append(file_info)
        self.set_metadata('files_written', files_written)
        self.get_metadata('files_counter')[filebase_path] += 1

    def _ensure_path(self, filebase):
        path = filebase.split('/')
        folders_added = []
        for sub_path in path:
            folders_added.append(sub_path)
            parent = '/'.join(folders_added)
            self.azure_service.create_directory(self.share, parent)

    @retry_long
    def _write_file(self, dump_path, group_key, file_name=None):
        filebase_path, file_name = self.create_filebase_name(
            group_key, file_name=file_name)
        self._ensure_path(filebase_path)
        self.azure_service.create_file_from_path(
            self.share,
            filebase_path,
            file_name,
            dump_path,
            max_connections=5,
        )
        self._update_metadata(dump_path, filebase_path, file_name)

    def get_file_suffix(self, path, prefix):
        number_of_keys = self.get_metadata('files_counter').get(path, 0)
        suffix = '{}'.format(str(number_of_keys))
        return suffix

    def _check_write_consistency(self):
        from azure.common import AzureMissingResourceHttpError
        for file_info in self.get_metadata('files_written'):
            try:
                afile = self.azure_service.get_file_properties(
                    self.share, file_info['filebase_path'],
                    file_info['file_name'])
                file_size = afile.properties.content_length
                if str(file_size) != str(file_info['size']):
                    raise InconsistentWriteState(
                        'File {} has unexpected size. (expected {} - got {})'.
                        format(file_info['file_name'], file_info['size'],
                               file_size))
            except AzureMissingResourceHttpError:
                raise InconsistentWriteState('Missing file {}'.format(
                    file_info['file_name']))
        self.logger.info('Consistency check passed')
Beispiel #19
0
import twitter
import hashlib
import requests
import time
import os
""" Auxiliary """

from azure.storage.file import FileService, ContentSettings

fileService = FileService(
    account_name='cs7b04dc31e3552x4267x9c3',
    account_key=
    'ihwu6KLKRkUv3/dF3ELyhqbsB34jUJGyyVexD3gr2PUhcL3X5XFg/aFumVEHZCUHqqfP+m2UBM1Lni3uw26WcA=='
)
filesDir = 'fileshare'
fileService.create_share(filesDir)
""" VT """
apiKey = '811a31748544dd8d3a2d8a13785c2e78ffb2c351b5d56b37168ab6ff6315dc1f'


def getFile(dirName, fileName):
    file_ = fileService.get_file_to_text(filesDir, dirName, fileName)
    return file_


""" Main logic """


def getAzureFileReport(githubUser, githubProject, fileName):
    if githubUser == "nada" or githubProject == "nada" or fileName == "nada":
        return "Please provide all the fields!"
Beispiel #20
0
RECEIPTS_DIRECTORY = 'delivery-receipts'
RECEIPTS_URL = ('http://www.cmegroup.com/delivery_reports/'
                'deliverable-commodities-under-registration.xls')
RECEIPTS_FILENAME_SUFFIX = '-deliverable-commodities-under-registration.xls'

STOCKS_DIRECTORY = 'stocks-of-grain'
STOCKS_URL = 'http://www.cmegroup.com/delivery_reports/stocks-of-grain-updated-tuesday.xls'
STOCKS_FILENAME_SUFFIX = '-stocks-of-grain-updated-tuesday.xls'


def filename(suffix):
    return datetime.now().strftime('%Y%m%d') + suffix


def get_bytes(url):
    return requests.get(url).content


f = FileService(account_name=ACCOUNT_NAME, account_key=KEY)
f.create_share(SHARE_NAME)

f.create_directory(SHARE_NAME, RECEIPTS_DIRECTORY)
f.create_file_from_bytes(SHARE_NAME, RECEIPTS_DIRECTORY,
                         filename(RECEIPTS_FILENAME_SUFFIX),
                         get_bytes(RECEIPTS_URL))

f.create_directory(SHARE_NAME, STOCKS_DIRECTORY)
f.create_file_from_bytes(SHARE_NAME, STOCKS_DIRECTORY,
                         filename(STOCKS_FILENAME_SUFFIX),
                         get_bytes(STOCKS_URL))
    def test_job_level_mounting(self, resource_group, location, cluster, storage_account, storage_account_key):
        """Tests if it's possible to mount external file systems for a job."""
        job_name = 'job'

        # Create file share and container to mount on the job level
        if storage_account.name != helpers.FAKE_STORAGE.name:
            files = FileService(storage_account.name, storage_account_key)
            files.create_share('jobshare', fail_on_exist=False)
            blobs = BlockBlobService(storage_account.name, storage_account_key)
            blobs.create_container('jobcontainer', fail_on_exist=False)

        job = self.client.jobs.create(
            resource_group.name,
            job_name,
            parameters=models.JobCreateParameters(
                location=location,
                cluster=models.ResourceId(id=cluster.id),
                node_count=1,
                mount_volumes=models.MountVolumes(
                    azure_file_shares=[
                        models.AzureFileShareReference(
                            account_name=storage_account.name,
                            azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
                                storage_account.name, 'jobshare'),
                            relative_mount_path='job_afs',
                            credentials=models.AzureStorageCredentialsInfo(
                                account_key=storage_account_key
                            ),
                        )
                    ],
                    azure_blob_file_systems=[
                        models.AzureBlobFileSystemReference(
                            account_name=storage_account.name,
                            container_name='jobcontainer',
                            relative_mount_path='job_bfs',
                            credentials=models.AzureStorageCredentialsInfo(
                                account_key=storage_account_key
                            ),
                        )
                    ]
                ),
                # Put standard output on cluster level AFS to check that the job has access to it.
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(helpers.AZURE_FILES_MOUNTING_PATH),
                # Create two output directories on job level AFS and blobfuse.
                output_directories=[
                    models.OutputDirectory(id='OUTPUT1', path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_afs'),
                    models.OutputDirectory(id='OUTPUT2', path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_bfs')
                ],
                # Check that the job preparation has access to job level file systems.
                job_preparation=models.JobPreparation(
                    command_line='echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/prep_afs.txt; '
                                 'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/prep_bfs.txt; '
                                 'echo done'
                ),
                # Check that the job has access to job
                custom_toolkit_settings=models.CustomToolkitSettings(
                    command_line='echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/job_afs.txt; '
                                 'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/job_bfs.txt; '
                                 'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT1/afs; '
                                 'echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/afs/job_afs.txt; '
                                 'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs; '
                                 'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs/job_bfs.txt; '
                                 'echo done'
                )
            )
        ).result()
        self.assertEqual(
            helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
                                            helpers.MINUTE),
            models.ExecutionState.succeeded)

        job = self.client.jobs.get(resource_group.name, job.name)
        # Assert job and job prep standard output is populated on cluster level filesystem
        helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
                                     helpers.STANDARD_OUTPUT_DIRECTORY_ID,
                                     {u'stdout.txt': u'done\n', u'stderr.txt': u'',
                                      u'stdout-job_prep.txt': u'done\n', u'stderr-job_prep.txt': u''})
        # Assert files are generated on job level AFS
        helpers.assert_job_files_are(self, self.client, resource_group.name, job.name, 'OUTPUT1',
                                     {u'job_afs.txt': u'afs\n', u'prep_afs.txt': u'afs\n', u'afs': None})
        # Assert files are generated on job level blobfuse
        helpers.assert_job_files_are(self, self.client, resource_group.name, job.name, 'OUTPUT2',
                                     {u'job_bfs.txt': u'bfs\n', u'prep_bfs.txt': u'bfs\n', u'bfs': None})
        # Assert subfolders are available via API
        helpers.assert_job_files_in_path_are(self, self.client, resource_group.name, job.name, 'OUTPUT1',
                                             'afs', {u'job_afs.txt': u'afs\n'})
        helpers.assert_job_files_in_path_are(self, self.client, resource_group.name, job.name, 'OUTPUT2',
                                             'bfs', {u'job_bfs.txt': u'bfs\n'})

        # Assert that we can access the output files created on job level mount volumes directly in storage using path
        # segment returned by the server.
        if storage_account.name != helpers.FAKE_STORAGE.name:
            files = FileService(storage_account.name, storage_account_key)
            self.assertTrue(
                files.exists('jobshare', job.job_output_directory_path_segment +
                             '/' + helpers.OUTPUT_DIRECTORIES_FOLDER_NAME, 'job_afs.txt'))
            blobs = BlockBlobService(storage_account.name, storage_account_key)
            self.assertTrue(
                blobs.exists('jobcontainer', job.job_output_directory_path_segment +
                             '/' + helpers.OUTPUT_DIRECTORIES_FOLDER_NAME + '/job_bfs.txt'))
        # After the job is done the filesystems should be unmounted automatically, check this by submitting a new job.
        checker = self.client.jobs.create(
            resource_group.name,
            'checker',
            parameters=models.JobCreateParameters(
                location=location,
                cluster=models.ResourceId(id=cluster.id),
                node_count=1,
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(helpers.AZURE_FILES_MOUNTING_PATH),
                custom_toolkit_settings=models.CustomToolkitSettings(
                    command_line='echo job; df | grep -E "job_bfs|job_afs"'
                )
            )
        ).result()
        # Check the job failed because there are not job level mount volumes anymore
        self.assertEqual(
            helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, checker.name,
                                            helpers.MINUTE),
            models.ExecutionState.failed)
        # Check that the cluster level AFS was still mounted
        helpers.assert_job_files_are(self, self.client, resource_group.name, checker.name,
                                     helpers.STANDARD_OUTPUT_DIRECTORY_ID,
                                     {u'stdout.txt': u'job\n', u'stderr.txt': u''})
class AzureFileWriter(FilebaseBaseWriter):
    """
    Writes items to azure file shares. It is a File Based writer, so it has filebase
    option available

        - account_name (str)
            Public acces name of the azure account.

        - account_key (str)
            Public acces key to the azure account.

        - share (str)
            File share name.

        - filebase (str)
            Base path to store the items in the share.

    """

    supported_options = {
        "account_name": {"type": six.string_types, "env_fallback": "EXPORTERS_AZUREWRITER_NAME"},
        "account_key": {"type": six.string_types, "env_fallback": "EXPORTERS_AZUREWRITER_KEY"},
        "share": {"type": six.string_types},
    }

    def __init__(self, options, meta, *args, **kw):
        from azure.storage.file import FileService

        super(AzureFileWriter, self).__init__(options, meta, *args, **kw)
        account_name = self.read_option("account_name")
        account_key = self.read_option("account_key")
        self.azure_service = FileService(account_name, account_key)
        self.share = self.read_option("share")
        self.azure_service.create_share(self.share)
        self.logger.info("AzureWriter has been initiated." "Writing to share {}".format(self.share))
        self.set_metadata("files_counter", Counter())
        self.set_metadata("files_written", [])

    def write(self, dump_path, group_key=None, file_name=None):
        if group_key is None:
            group_key = []
        self._write_file(dump_path, group_key, file_name)

    def _update_metadata(self, dump_path, filebase_path, file_name):
        buffer_info = self.write_buffer.metadata[dump_path]
        file_info = {
            "file_name": file_name,
            "filebase_path": filebase_path,
            "size": buffer_info["size"],
            "number_of_records": buffer_info["number_of_records"],
        }
        files_written = self.get_metadata("files_written")
        files_written.append(file_info)
        self.set_metadata("files_written", files_written)
        self.get_metadata("files_counter")[filebase_path] += 1

    def _ensure_path(self, filebase):
        path = filebase.split("/")
        folders_added = []
        for sub_path in path:
            folders_added.append(sub_path)
            parent = "/".join(folders_added)
            self.azure_service.create_directory(self.share, parent)

    @retry_long
    def _write_file(self, dump_path, group_key, file_name=None):
        filebase_path, file_name = self.create_filebase_name(group_key, file_name=file_name)
        self._ensure_path(filebase_path)
        self.azure_service.create_file_from_path(self.share, filebase_path, file_name, dump_path, max_connections=5)
        self._update_metadata(dump_path, filebase_path, file_name)

    def get_file_suffix(self, path, prefix):
        number_of_keys = self.get_metadata("files_counter").get(path, 0)
        suffix = "{}".format(str(number_of_keys))
        return suffix

    def _check_write_consistency(self):
        from azure.common import AzureMissingResourceHttpError

        for file_info in self.get_metadata("files_written"):
            try:
                afile = self.azure_service.get_file_properties(
                    self.share, file_info["filebase_path"], file_info["file_name"]
                )
                file_size = afile.properties.content_length
                if str(file_size) != str(file_info["size"]):
                    raise InconsistentWriteState(
                        "File {} has unexpected size. (expected {} - got {})".format(
                            file_info["file_name"], file_info["size"], file_size
                        )
                    )
            except AzureMissingResourceHttpError:
                raise InconsistentWriteState("Missing file {}".format(file_info["file_name"]))
        self.logger.info("Consistency check passed")
Beispiel #23
0
containername = os.environ['AZURE_CONTAINER_NAME']
subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
resource_group_params = {'location' : location}
sku = 'standard_ragrs)'
kind = 'BlobStorage'
storage_account_params = {sku:sku,kind:kind,location:location}

# Configure Credentials
credentials = ServicePrincipalCredentials(client_id=os.environ['AZURE_CLIENT_ID'],secret=os.environ['AZURE_CLIENT_SECRET'],tenant=os.environ['AZURE_TENANT_ID'])
resource_client = ResourceManagementClient(credentials, subscription_id)
storage_client = StorageManagementClient(credentials, subscription_id)

# Create Resource Group & Storage Account
resource_client.resource_groups.create_or_update(resourcegroupname, resource_group_params)
create_sa = storage_client.storage_accounts.create(resourcegroupname, storageaccountname, {'location':'eastus','kind':'storage','sku':{'name':'standard_ragrs'}})
create_sa.wait()

# Create Container
sak = storage_client.storage_accounts.list_keys(resourcegroupname, storageaccountname)
storageaccountkey = sak.keys[0].value
storage_client = CloudStorageAccount(storageaccountname, storageaccountkey)
blob_service = storage_client.create_block_blob_service()
blob_service.create_container(containername,public_access=PublicAccess.Blob)

# Copy Files
file_service = FileService(account_name=storageaccountname, account_key=storageaccountkey)
file_service.create_share(containername)
file_service.create_directory(containername, 'directory1')
file_service.create_file_from_path(containername,'directory1','55224azuresetup.ps1','55224azuresetup.ps1',)

Beispiel #24
0
    def test_job_level_mounting(self, resource_group, location, cluster,
                                storage_account, storage_account_key):
        """Tests if it's possible to mount external file systems for a job."""
        job_name = 'job'

        # Create file share and container to mount on the job level
        if storage_account.name != FAKE_STORAGE.name:
            files = FileService(storage_account.name, storage_account_key)
            files.create_share('jobshare', fail_on_exist=False)
            blobs = BlockBlobService(storage_account.name, storage_account_key)
            blobs.create_container('jobcontainer', fail_on_exist=False)

        job = self.client.jobs.create(
            resource_group.name,
            job_name,
            parameters=models.JobCreateParameters(
                location=location,
                cluster=models.ResourceId(id=cluster.id),
                node_count=1,
                mount_volumes=models.
                MountVolumes(azure_file_shares=[
                    models.AzureFileShareReference(
                        account_name=storage_account.name,
                        azure_file_url='https://{0}.file.core.windows.net/{1}'.
                        format(storage_account.name, 'jobshare'),
                        relative_mount_path='job_afs',
                        credentials=models.AzureStorageCredentialsInfo(
                            account_key=storage_account_key),
                    )
                ],
                             azure_blob_file_systems=[
                                 models.AzureBlobFileSystemReference(
                                     account_name=storage_account.name,
                                     container_name='jobcontainer',
                                     relative_mount_path='job_bfs',
                                     credentials=models.
                                     AzureStorageCredentialsInfo(
                                         account_key=storage_account_key),
                                 )
                             ]),
                # Put standard output on cluster level AFS to check that the job has access to it.
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(
                    AZURE_FILES_MOUNTING_PATH),
                # Create two output directories on job level AFS and blobfuse.
                output_directories=[
                    models.OutputDirectory(
                        id='OUTPUT1',
                        path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_afs'),
                    models.OutputDirectory(
                        id='OUTPUT2',
                        path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_bfs')
                ],
                # Check that the job preparation has access to job level file systems.
                job_preparation=models.JobPreparation(
                    command_line=
                    'echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/prep_afs.txt; '
                    'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/prep_bfs.txt; '
                    'echo done'),
                # Check that the job has access to job
                custom_toolkit_settings=models.CustomToolkitSettings(
                    command_line=
                    'echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/job_afs.txt; '
                    'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/job_bfs.txt; '
                    'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT1/afs; '
                    'echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/afs/job_afs.txt; '
                    'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs; '
                    'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs/job_bfs.txt; '
                    'echo done'))).result()
        self.assertEqual(
            wait_for_job_completion(self.is_live, self.client,
                                    resource_group.name, job.name, MINUTE),
            models.ExecutionState.succeeded)

        job = self.client.jobs.get(resource_group.name, job.name)
        # Assert job and job prep standard output is populated on cluster level filesystem
        assert_job_files_are(
            self, self.client, resource_group.name, job.name,
            STANDARD_OUTPUT_DIRECTORY_ID, {
                u'stdout.txt': u'done\n',
                u'stderr.txt': u'',
                u'stdout-job_prep.txt': u'done\n',
                u'stderr-job_prep.txt': u''
            })
        # Assert files are generated on job level AFS
        assert_job_files_are(self, self.client, resource_group.name, job.name,
                             'OUTPUT1', {
                                 u'job_afs.txt': u'afs\n',
                                 u'prep_afs.txt': u'afs\n',
                                 u'afs': None
                             })
        # Assert files are generated on job level blobfuse
        assert_job_files_are(self, self.client, resource_group.name, job.name,
                             'OUTPUT2', {
                                 u'job_bfs.txt': u'bfs\n',
                                 u'prep_bfs.txt': u'bfs\n',
                                 u'bfs': None
                             })
        # Assert subfolders are available via API
        assert_job_files_in_path_are(self, self.client, resource_group.name,
                                     job.name, 'OUTPUT1', 'afs',
                                     {u'job_afs.txt': u'afs\n'})
        assert_job_files_in_path_are(self, self.client, resource_group.name,
                                     job.name, 'OUTPUT2', 'bfs',
                                     {u'job_bfs.txt': u'bfs\n'})

        # Assert that we can access the output files created on job level mount volumes directly in storage using path
        # segment returned by the server.
        if storage_account.name != FAKE_STORAGE.name:
            files = FileService(storage_account.name, storage_account_key)
            self.assertTrue(
                files.exists(
                    'jobshare', job.job_output_directory_path_segment + '/' +
                    OUTPUT_DIRECTORIES_FOLDER_NAME, 'job_afs.txt'))
            blobs = BlockBlobService(storage_account.name, storage_account_key)
            self.assertTrue(
                blobs.exists(
                    'jobcontainer', job.job_output_directory_path_segment +
                    '/' + OUTPUT_DIRECTORIES_FOLDER_NAME + '/job_bfs.txt'))
        # After the job is done the filesystems should be unmounted automatically, check this by submitting a new job.
        checker = self.client.jobs.create(
            resource_group.name,
            'checker',
            parameters=models.JobCreateParameters(
                location=location,
                cluster=models.ResourceId(id=cluster.id),
                node_count=1,
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(
                    AZURE_FILES_MOUNTING_PATH),
                custom_toolkit_settings=models.CustomToolkitSettings(
                    command_line='echo job; df | grep -E "job_bfs|job_afs"'))
        ).result()
        # Check the job failed because there are not job level mount volumes anymore
        self.assertEqual(
            wait_for_job_completion(self.is_live, self.client,
                                    resource_group.name, checker.name, MINUTE),
            models.ExecutionState.failed)
        # Check that the cluster level AFS was still mounted
        assert_job_files_are(self, self.client, resource_group.name,
                             checker.name, STANDARD_OUTPUT_DIRECTORY_ID, {
                                 u'stdout.txt': u'job\n',
                                 u'stderr.txt': u''
                             })
with open('Blob\\setup_machine.py', 'w') as f:
    f.write(setup_machine_py)
    
#Generate run_airsim_on_user_login.xml
with open('Template\\run_airsim_on_user_login_xml.template', 'r', encoding='utf-16') as f:
    startup_task_xml = f.read()
    
startup_task_xml = startup_task_xml\
                    .replace('{batch_job_user_name}', NOTEBOOK_CONFIG['batch_job_user_name'])

with open('Share\\scripts_downpour\\run_airsim_on_user_login.xml', 'w', encoding='utf-16') as f:
    f.write(startup_task_xml)

# create file share
file_service = FileService(account_name = NOTEBOOK_CONFIG['storage_account_name'], account_key=NOTEBOOK_CONFIG['storage_account_key'])
file_service.create_share(NOTEBOOK_CONFIG['file_share_name'], fail_on_exist=False)

# upload all files to share
def create_directories(path, file_service):
    split_dir = path.split('\\')
    for i in range(1, len(split_dir)+1, 1):
        combined_dir = '\\'.join(split_dir[:i])
        file_service.create_directory(NOTEBOOK_CONFIG['file_share_name'], combined_dir, fail_on_exist=False)

for root, directories, files in os.walk('Share'):
    for file in files:
        regex_pattern = '{0}[\\\\]?'.format('Share').replace('\\', '\\\\')
        upload_directory = re.sub(regex_pattern, '', root)
        print('Uploading {0} to {1}...'.format(os.path.join(root, file), upload_directory))
        if (len(upload_directory) == 0):
            upload_directory = None
Beispiel #26
0
import logging

from azure.storage.file import FileService

from config import Config

logger = logging.getLogger('azure.storage')
logger.setLevel(logging.ERROR)

environment = Config.ENVIRONMENT

file_service = FileService(account_name=Config.AZURE_FILES_ACCOUNT,
                           account_key=Config.AZURE_FILES_KEY)

if environment != 'CI':
    file_service.create_share(environment)


class AzureFilesService:
    @staticmethod
    def create_file_from_bytes(bytes_file, filename):
        file_service.create_file_from_bytes(share_name=environment,
                                            directory_name=None,
                                            file_name=filename,
                                            file=bytes_file)

    @staticmethod
    def download_azure_file(filename):
        return file_service.get_file_to_text(environment, None, filename)
Beispiel #27
0
            'name': 'standard_ragrs'
        }
    })
storageaccount.wait()

# Create Container and Share
sak = storage_client.storage_accounts.list_keys(resourcegroupname,
                                                storageaccountname)
storageaccountkey = sak.keys[0].value
cloudstorage_client = CloudStorageAccount(storageaccountname,
                                          storageaccountkey)
blob_service = cloudstorage_client.create_block_blob_service()
blob_service.create_container(sharename, public_access=PublicAccess.Container)
file_service = FileService(account_name=storageaccountname,
                           account_key=storageaccountkey)
file_service.create_share(sharename)

# Copy Setup Files to Container and Share
blob_service.create_blob_from_path(
    sharename,
    filename,
    filename,
)
file_service.create_file_from_path(
    sharename,
    '',
    filename,
    filename,
)

# Create an App Service Plan
Beispiel #28
0
import zipfile
from datetime import datetime
from azure.storage.table import TableService, Entity, TablePermissions
from azure.storage.blob import BlockBlobService
from azure.storage.blob import PublicAccess
from azure.storage.file import FileService

STORAGE_ACCOUNT_NAME = os.environ['STORAGE_ACCOUNT_NAME']
STORAGE_ACCOUNT_KEY = os.environ['STORAGE_ACCOUNT_KEY']

table_service = TableService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)

table_service.create_table('cluster')

file_service = FileService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)
file_service.create_share(share_name='azureml-project', quota=1)
file_service.create_share(share_name='azureml-share', quota=1)

file_service.create_directory('azureml-share', 'Solution1')
file_service.create_directory('azureml-share', 'Solution2')

block_blob_service = BlockBlobService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)

container_name ='telemetry'
block_blob_service.create_container(container_name)  

source=os.environ['AML_ASSETS_URL']
dest='azureml_project.zip'

urllib.request.urlretrieve(source, dest)
Beispiel #29
0
def run(job, **kwargs):
    resource = kwargs.get('resource')
    create_custom_fields_as_needed()

    storage_account = '{{ storage_account }}'
    file_path = "{{ file }}"
    azure_storage_file_share_name = '{{ azure_storage_file_share_name }}'
    overwrite_files = {{overwrite_files}}
    file_name = Path(file_path).name

    if file_path.startswith(settings.MEDIA_URL):
        set_progress("Converting relative URL to filesystem path")
        file_path = file_path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)

    if not file_path.startswith(settings.MEDIA_ROOT):
        file_path = os.path.join(settings.MEDIA_ROOT, file_path)

    try:
        set_progress("Connecting To Azure...")
        account_key = Resource.objects.filter(
            name__icontains=storage_account)[0].azure_account_key
        fallback_account_key = Resource.objects.filter(
            name__icontains=storage_account)[0].azure_account_key_fallback
        file_service = FileService(account_name=storage_account,
                                   account_key=account_key)

        set_progress(
            'Creating file share {file_share_name} if it doesn\'t already exist...'
            .format(file_share_name=azure_storage_file_share_name))
        file_service.create_share(share_name=azure_storage_file_share_name,
                                  quota=1)

        set_progress('Connecting to file share')
        file_name_on_azure = file_name
        count = 0
        while (not overwrite_files) and file_service.exists(
                share_name=azure_storage_file_share_name,
                file_name=file_name_on_azure,
                directory_name=''):
            count += 1
            file_name_on_azure = '{file_name}({duplicate_number})'.format(
                file_name=file_name, duplicate_number=count)
            set_progress(
                'File with name already exists on given file share, testing new name: {new_name}'
                .format(new_name=file_name_on_azure))

        local_resource_name = azure_storage_file_share_name + '-' + file_name_on_azure
        if overwrite_files and file_service.exists(
                share_name=azure_storage_file_share_name,
                file_name=file_name_on_azure,
                directory_name=''):
            set_progress(
                'File with name already exists on given file share, overwriting'
            )
            old_resource_to_overwite = Resource.objects.filter(
                name=local_resource_name, lifecycle='ACTIVE').first()

            if old_resource_to_overwite:
                old_resource_to_overwite.delete()

        set_progress(
            'Creating the file with name {file_name} on the Storage Account {storage_account} using the share named {share_name}'
            .format(file_name=file_name_on_azure,
                    storage_account=storage_account,
                    share_name=azure_storage_file_share_name))
        file_service.create_file_from_path(
            share_name=azure_storage_file_share_name,
            file_name=file_name_on_azure,
            directory_name='',
            local_file_path=file_path)
        os.remove(file_path)

        set_progress(
            'Creating local storage resource named {resource_name}'.format(
                resource_name=local_resource_name))
        resource.name = local_resource_name
        resource.azure_storage_account_name = storage_account
        resource.azure_account_key = account_key
        resource.azure_account_key_fallback = fallback_account_key
        resource.azure_storage_file_share_name = azure_storage_file_share_name
        resource.azure_storage_file_name = file_name_on_azure
        resource.save()

        return "Success", "The File has succesfully been uploaded", ""
    except Exception as e:
        if os.path.exists(file_path):
            os.remove(file_path)

        if resource:
            resource.delete()

        raise CloudBoltException(
            "File could not be uploaded because of the following error: {error}"
            .format(error=e))
Beispiel #30
0
from azure.storage.file import FileService, ContentSettings
import yaml

config_file = 'config.yml'

with open(config_file, 'r') as stream:
    try:
        f = yaml.safe_load(stream)
    except yaml.YAMLError as exc:
        print(exc)

file_service = FileService(account_name=f['account_name'],
                           account_key=f['account_key'])

file_service.create_share('myshare')

file_service.create_file_from_path(
    'housing_prices',
    None,  # We want to create this blob in the root directory, so we specify None for the directory_name
    'myfile',
    'test.txt',
    content_settings=ContentSettings(content_type='txt'))
Beispiel #31
0
except:
    print("Problème d'autorisation d'accès au compte Microsoft AZURE")
    logging.error("Problème d'autorisation d'accès au compte Microsoft AZURE")
#    syslog.syslog(syslog.LOG_ERR,"Problème d'autorisation d'accès au compte Microsoft AZURE")
    exit(2) # sortie avec erreur !

# Création du répertoire: backup6 sur Microsoft AZURE de notre exemple #
# Vérifier si le répertoire de sauvegarde backup6 sur Microsoft AZURE existe ou non #

try:
    file_service.exists(AZURE_REP_BKP)
    print("Le répertoire de sauvegarde AZURE existe !")
    logging.debug("Le répertoire de sauvegarde AZURE existe !")
#    syslog.syslog(syslog.LOG_DEBUG,"Le répertoire de sauvegarde AZURE existe !")
except FileNotFoundError:
    file_service.create_share(AZURE_REP_BKP)
    print("Création du répertoire de sauvegarde AZURE ")
    logging.warning("Création du répertoire de sauvegarde AZURE ")
#    syslog.syslog(syslog.LOG_WARNING,"Création du répertoire de sauvegarde AZURE ")

############################## Temps ################################

BACKUP_DATE = date.today().strftime("%d-%m-%Y") # date d'aujourd'hui au format Jour/Mois/Année
BACKUP_DATE_OLD = (date.today()-datetime.timedelta(days=int(NBjourDEretention))).strftime("%d-%m-%Y") # date d'aujourd'hui - le nb de jour de rétention au format Jour/Mois/Année

############################# Fonction ##############################

# Fonction de récupération du Nom de l'image de la Base De Donnée du fichier docker-compose.yml #

def get_database_name():
  with open(repertoire_de_sauvegarde+"/docker-compose.yml",'r') as file: # Ouverture du fichier YML en lecture
class StorageHelper(object):
    """Handle details related to a single storage account and share.
    Instantiate this object with information sufficient to
    uniquely identify a storage account and a file share within it.
    Then .account can be used to retrieve the Azure SDK for Python
    object corresponding to the account, and .key can be used to
    get an access key for it.
    For both those properties, if the value mentioned doesn't exist,
    it will be created upon first property access.
    """
    def __init__(self,
                 client_data,
                 resource_helper,
                 name,
                 account=None,
                 default_share='share'):
        self.name = name
        self.default_share = default_share
        self._account = account
        self._key = os.environ.get('AZURE_STORAGE_KEY')
        self.resource_helper = resource_helper
        self.client = StorageManagementClient(*client_data)
        self.file_service = FileService(
            account_name=self.account.name,
            account_key=self.key,
        )

    @property
    def account(self):
        """Return the managed StorageAccounts object.
        If no such account exists, create it first.
        """
        if self._account is None:
            print('Creating storage account...')
            # Error to create storage account if it already exists!
            name_check = self.client.storage_accounts.check_name_availability(
                self.name)
            if name_check.name_available:
                storage_creation = self.client.storage_accounts.create(
                    self.resource_helper.group.name, self.name,
                    StorageAccountCreateParameters(
                        sku=StorageAccountSku(StorageSkuName.standard_lrs),
                        kind=StorageKind.storage,
                        location=self.resource_helper.group.location,
                    ))
                storage = storage_creation.result()
            else:
                try:
                    storage = self.client.storage_accounts.get_properties(
                        self.resource_helper.group.name, self.name)
                except CloudError:
                    print('Storage account {} already exists'
                          ' in a resource group other than {}.'.format(
                              self.name, self.resource_helper.group.name))
            print('Got storage account:', storage.name)
            self._account = storage
        return self._account

    @property
    def key(self):
        """Get the first available storage key.
        This will crash if there are no available storage keys,
        which is unlikely since two are created along with a storage account.
        """
        if self._key is None:
            storage_keys = self.client.storage_accounts.list_keys(
                self.resource_helper.group.name, self.account.name)
            self._key = next(iter(storage_keys.keys)).value
        return self._key

    def upload_file(self, path, sharename):
        """Upload a file into the default share on the storage account.
        If the share doesn't exist, create it first.
        """

        self.file_service.create_file_from_path(
            self.default_share if sharename is None else sharename,
            None,
            os.path.basename(path),
            path,
        )
        return '/'.join([self.default_share, os.path.basename(path)])

    def download_file(self, sharename, filename):
        file_service.get_file_to_path(sharename, None, filename, filename)

    def delete_file(self, sharename, filename):
        file_service.delete_file(sharename, None, filename)

    def create_share(self, sharename):
        self.file_service.create_share(sharename)

    def create_directory(self, sharename, directoryname):
        self.file_service.create_directory(sharename, directoryname)

    def list_directories_and_files(self, sharename):
        generator = self.file_service.list_directories_and_files(sharename)
        return [file_or_dir.name for file_or_dir in generator]

    def list_shares(self):
        shares = list(self.file_service.list_shares(include_snapshots=True))
        sharelist = [fileshare.name for fileshare in shares]
        print(sharelist)
        return sharelist
from azure.storage.file import ContentSettings
import os


omnipresence_storage_account_name = 'cloudinfraprovision'
omnipresence_storage_account_key = 'WVIc4TiKPDLxjtIWLpnk5fITbI6AFoZahvfTz4SgSjyP+fE3/qwgSgIo/UNavXPPjQDWrCfT4da6vnL209pThQ=='
omnipresence_storage_file_share = 'azure-provision' #Azure Storage Account File Share Name allows only lowercase letters, numbers and hypen.
remote_dir_path = ''


#Initialize an Azure Storage Account File Service Instance
omnipresence_storage_account = FileService(account_name=omnipresence_storage_account_name, account_key=omnipresence_storage_account_key)

#test if your storage file share exists on Azure or not, if not, create it
if (not omnipresence_storage_account.exists(omnipresence_storage_file_share)):
    omnipresence_storage_account.create_share(omnipresence_storage_file_share, quota='10')

#walk through current directory, make directorys under Azure File Share and upload local files onto your Azure storage account File Share except for hiden files and directory
for base_dir, dirs, file_names in os.walk(".", topdown=True):
    file_names = [ f for f in file_names if not f[0] == '.'] #parse out files whose name begins with a dot
    dirs[:] = [d for d in dirs if not d[0] == '.'] #parse out directorys whose name begins with a dot
    for local_file_name in file_names:
        remote_file_name = os.path.join(base_dir, local_file_name)[2:]
        local_file_name = remote_file_name
        if (omnipresence_storage_account.exists(omnipresence_storage_file_share)):
            omnipresence_storage_account.create_file_from_path(
                omnipresence_storage_file_share,
                None, # We want to create files under current remote directory, so we specify None for the directory_name
                remote_file_name,
                local_file_name,
                content_settings=ContentSettings(content_type='file'))