Пример #1
0
def generate_and_upload_drum_rnn_midi_sequence(args):
    current_dir = os.path.dirname(os.path.realpath(__file__))
    output_dir = os.path.join(current_dir, "output")
    model_dir = os.path.join(current_dir, "assets/MLmodels")
    model_path = os.path.join(model_dir, "drum_kit_rnn.mag")
    if not os.path.exists(model_path):
        url = os.path.join(S3_URL, "models", "drum_kit_rnn.mag")
        r = requests.get(url, allow_redirects=True)
        myfile = requests.get(url)
        open(model_path, 'wb').write(myfile.content)
    #TODO switch to subprocess
    os.system('drums_rnn_generate '
              ' --config="drum_kit" '
              ' --bundle_file="{}"'
              ' --output_dir="{}"'
              ' --num_outputs=1 '
              ' --num_steps={}'
              ' --primer_drums="{}"'.format(model_path, output_dir, args.num_steps, args.primer_drums))
    list_of_files = [os.path.join(os.getcwd(), output_dir, file) for file in os.listdir(output_dir)]
    latest_file = max(list_of_files, key=os.path.getctime)
    midi_sequence = extract_midi_data_from_midi_file(latest_file)['notes']
    S3.initialize()
    s3_path = S3.upload_file(latest_file, object_name='midi/{}/{}'.format(args.userId, os.path.split(latest_file)[-1]))
    os.remove(latest_file)
    print(s3_path)
    print(midi_sequence)
Пример #2
0
 def __init__(self, credentials, path):
     # Init Variables
     self._path = path
     # Init Classes
     self._s3 = S3(credentials)
     self._s3u = S3Upload(self._s3, path)
     self._s3mpu = S3MultipartUpload(self._s3, path)
Пример #3
0
 def _init_dataset(self, config):
     self.S3 = None
     if self.config.config_Datasets.S3_connection == True:
         self.S3 = S3(self.config.config_Datasets.S3_bucket_name)
     if self.config.agent_name == "DQN":
         self.SimCache = SimCache(self.config.config_Datasets.ddqn.sim,
                                  self.S3)
Пример #4
0
 def DoSomething(self, ctx):
     if (ctx.Condition == 5):
         from S3 import S3
         ctx.SetState(S3())
         ctx.DoSomething()
     else:
         print('S5')
Пример #5
0
    def __init__(self,
                 bucket_name,
                 access_key,
                 secret_key,
                 s3_host,
                 multipart_id,
                 part_num,
                 file_name,
                 offset,
                 byte_count,
                 retries=5,
                 secure=True):
        self.bucket_name = bucket_name
        self.access_key = access_key
        self.secret_key = secret_key
        self.s3_host = s3_host
        self.multipart_id = multipart_id
        self.part_num = part_num
        self.file_name = file_name
        self.offset = offset
        self.byte_count = byte_count
        self.retries = retries
        self.secure = secure

        try:
            self.s3_conn = S3(self.access_key, self.secret_key, self.s3_host,
                              self.secure, self.retries)
            self.bucket = self.s3_conn.get_bucket(self.bucket_name)
        except Exception, e:
            logging.error(
                "Could not get AWS S3 connection to bucket %s! Error: %s" %
                (self.bucket_name, e))
            raise e
Пример #6
0
    def _get_filelist_remote(remote_uri, recursive=True):
        ## If remote_uri ends with '/' then all remote files will have
        ## the remote_uri prefix removed in the relative path.
        ## If, on the other hand, the remote_uri ends with something else
        ## (probably alphanumeric symbol) we'll use the last path part
        ## in the relative path.
        ##
        ## Complicated, eh? See an example:
        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
        ## Furthermore a prefix-magic can restrict the return list:
        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
        ## { 'xyz/blah.txt' : {} }

        info(u"Retrieving list of remote files for %s ..." % remote_uri)

        s3 = S3(Config())
        response = s3.bucket_list(remote_uri.bucket(),
                                  prefix=remote_uri.object(),
                                  recursive=recursive)

        rem_base_original = rem_base = remote_uri.object()
        remote_uri_original = remote_uri
        if rem_base != '' and rem_base[-1] != '/':
            rem_base = rem_base[:rem_base.rfind('/') + 1]
            remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base))
        rem_base_len = len(rem_base)
        rem_list = SortedDict(ignore_case=False)
        break_now = False
        for object in response['list']:
            if object['Key'] == rem_base_original and object['Key'][
                    -1] != os.path.sep:
                ## We asked for one file and we got that file :-)
                key = os.path.basename(object['Key'])
                object_uri_str = remote_uri_original.uri()
                break_now = True
                rem_list = {
                }  ## Remove whatever has already been put to rem_list
            else:
                key = object['Key'][
                    rem_base_len:]  ## Beware - this may be '' if object['Key']==rem_base !!
                object_uri_str = remote_uri.uri() + key
            rem_list[key] = {
                'size': int(object['Size']),
                'timestamp': dateS3toUnix(
                    object['LastModified']
                ),  ## Sadly it's upload time, not our lastmod time :-(
                'md5': object['ETag'][1:-1],
                'object_key': object['Key'],
                'object_uri_str': object_uri_str,
                'base_uri': remote_uri,
            }
            if break_now:
                break
        return rem_list
Пример #7
0
    def __str__(self):
        tree = ET.Element("InvalidationBatch")
        s3 = S3(Config())

        for path in self.paths:
            if len(path) < 1 or path[0] != "/":
                path = "/" + path
            appendXmlTextNode("Path", s3.urlencode_string(path), tree)
        appendXmlTextNode("CallerReference", self.reference, tree)
        return ET.tostring(tree)
Пример #8
0
 def DoSomething(self, ctx):
     if (ctx.Condition == 7):
         from S3 import S3
         ctx.SetState(S3())
         ctx.DoSomething()
     elif (ctx.Condition == 5):
         from S5 import S5
         ctx.SetState(S5())
         ctx.DoSomething()
     else:
         print('S2')
         pass
Пример #9
0
 def _get_remote_attribs(uri, remote_item):
     response = S3(cfg).object_info(uri)
     remote_item.update({
     'size': int(response['headers']['content-length']),
     'md5': response['headers']['etag'].strip('"\''),
     'timestamp' : dateRFC822toUnix(response['headers']['date'])
     })
     try:
         md5 = response['s3cmd-attrs']['md5']
         remote_item.update({'md5': md5})
         debug(u"retreived md5=%s from headers" % md5)
     except KeyError:
         pass
Пример #10
0
def input_VE(poste, courbe_de_charge, penetration, tailles, puissances,
             SOC_min, SOC_max, taux_base, taux_pos, scenario):

    # Dates et heures
    heures = horodate_list(courbe_de_charge)

    # Flotte de VE
    flux = trajets_quotidiens(poste)
    nombre_VE_sort, nombre_VE_ent = nombre_VE(
        penetration, flux)  #nombre de VE (selon taux de pénétration)
    SOC_sort, SOC_ent = seuil_recharge(
        SOC_min, SOC_max, nombre_VE_sort,
        nombre_VE_ent)  #% recharge initiale batterie
    dist_parc_VE_sort, dist_parc_VE_ent = dist_domicile_travail(
        flux, nombre_VE_sort,
        nombre_VE_ent)  #liste des distances parcourues par chaque VE)
    repart_puissances_sort, repart_puissances_ent = puissance_charge(
        puissances, nombre_VE_sort,
        nombre_VE_ent)  #chaque individu se voit attribuer une puissance
    repart_taille_sort, repart_taille_ent = taille_batterie(
        tailles, nombre_VE_sort, nombre_VE_ent
    )  #repartition de la taille des batterie pour chaque individu

    # Débuts de charge et interdictions
    if scenario == 1:
        T_debut_sort, T_debut_ent, plage_sort, plage_ent = S1(
            heures, nombre_VE_sort, nombre_VE_ent)
    elif scenario == 2:
        T_debut_sort, T_debut_ent, plage_sort, plage_ent = S2(
            heures, nombre_VE_sort, nombre_VE_ent, taux_base)
    elif scenario == 3:
        T_debut_sort, T_debut_ent, plage_sort, plage_ent = S3(
            heures, nombre_VE_sort, nombre_VE_ent, taux_pos)

    return {
        'SOC_sort': SOC_sort,
        'SOC_ent': SOC_ent,
        'dist_parc_VE_sort': dist_parc_VE_sort,
        'dist_parc_VE_ent': dist_parc_VE_ent,
        'repart_taille_sort': repart_taille_sort,
        'repart_taille_ent': repart_taille_ent,
        'repart_puissances_sort': repart_puissances_sort,
        'repart_puissances_ent': repart_puissances_ent,
        'T_debut_sort': T_debut_sort,
        'T_debut_ent': T_debut_ent,
        'plage_sort': plage_sort,
        'plage_ent': plage_ent
    }
Пример #11
0
def collect_from_services(services_required, region):
    results = []
    if 'ec2' in services_required:
        ec2 = EC2(region)
        resources = ec2.collect()
        if len(resources):
            results.append(resources)

    if 's3' in services_required:
        s3 = S3(region)
        resources = s3.collect()

        if len(resources):
            results.append(resources)

    if results:
        return pd.concat(results)
    else:
        return None
Пример #12
0
 def upload_pic(self):
     s3 = S3()
     try:
         # lock.acquire()
         print "Start Upload", self.merge_output
         S3list = s3.upload_image_list(str(self.image_dic))
         print "Done Upload", self.merge_output
         print S3list
         # lock.release()
         lock.acquire()
         self.s3_list_file.write(str(S3list) + '\n')
         self.s3_list_file.flush()
         lock.release()
     except Exception, e:
         lock.acquire()
         print "Upload Error. %s " % e
         self.error_file.write(str(url) + '\n')
         self.error_file.flush()
         lock.release()
         return False
Пример #13
0
def fetch_remote_list(args, require_attribs = False, recursive = None):
    def _get_filelist_remote(remote_uri, recursive = True):
        ## If remote_uri ends with '/' then all remote files will have
        ## the remote_uri prefix removed in the relative path.
        ## If, on the other hand, the remote_uri ends with something else
        ## (probably alphanumeric symbol) we'll use the last path part
        ## in the relative path.
        ##
        ## Complicated, eh? See an example:
        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
        ## Furthermore a prefix-magic can restrict the return list:
        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
        ## { 'xyz/blah.txt' : {} }

        info(u"Retrieving list of remote files for %s ..." % remote_uri)

        s3 = S3(Config())
        response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive)

        rem_base_original = rem_base = remote_uri.object()
        remote_uri_original = remote_uri
        if rem_base != '' and rem_base[-1] != '/':
            rem_base = rem_base[:rem_base.rfind('/')+1]
            remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base))
        rem_base_len = len(rem_base)
        rem_list = FileDict(ignore_case = False)
        break_now = False
        for object in response['list']:
            if object['Key'] == rem_base_original and object['Key'][-1] != "/":
                ## We asked for one file and we got that file :-)
                key = os.path.basename(object['Key'])
                object_uri_str = remote_uri_original.uri()
                break_now = True
                rem_list = FileDict(ignore_case = False)   ## Remove whatever has already been put to rem_list
            else:
                key = object['Key'][rem_base_len:]      ## Beware - this may be '' if object['Key']==rem_base !!
                object_uri_str = remote_uri.uri() + key
            rem_list[key] = {
                'size' : int(object['Size']),
                'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
                'md5' : object['ETag'][1:-1],
                'object_key' : object['Key'],
                'object_uri_str' : object_uri_str,
                'base_uri' : remote_uri,
                'dev' : None,
                'inode' : None,
            }
            md5 = object['ETag'][1:-1]
            rem_list.record_md5(key, md5)
            if break_now:
                break
        return rem_list

    cfg = Config()
    remote_uris = []
    remote_list = FileDict(ignore_case = False)

    if type(args) not in (list, tuple):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 's3':
            raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
        remote_uris.append(uri)

    if recursive:
        for uri in remote_uris:
            objectlist = _get_filelist_remote(uri)
            for key in objectlist:
                remote_list[key] = objectlist[key]
                remote_list.record_md5(key, objectlist.get_md5(key))
    else:
        for uri in remote_uris:
            uri_str = str(uri)
            ## Wildcards used in remote URI?
            ## If yes we'll need a bucket listing...
            if uri_str.find('*') > -1 or uri_str.find('?') > -1:
                first_wildcard = uri_str.find('*')
                first_questionmark = uri_str.find('?')
                if first_questionmark > -1 and first_questionmark < first_wildcard:
                    first_wildcard = first_questionmark
                prefix = uri_str[:first_wildcard]
                rest = uri_str[first_wildcard+1:]
                ## Only request recursive listing if the 'rest' of the URI,
                ## i.e. the part after first wildcard, contains '/'
                need_recursion = rest.find('/') > -1
                objectlist = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion)
                for key in objectlist:
                    ## Check whether the 'key' matches the requested wildcards
                    if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str):
                        remote_list[key] = objectlist[key]
            else:
                ## No wildcards - simply append the given URI to the list
                key = os.path.basename(uri.object())
                if not key:
                    raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
                remote_item = {
                    'base_uri': uri,
                    'object_uri_str': unicode(uri),
                    'object_key': uri.object()
                }
                if require_attribs:
                    response = S3(cfg).object_info(uri)
                    remote_item.update({
                    'size': int(response['headers']['content-length']),
                    'md5': response['headers']['etag'].strip('"\''),
                    'timestamp' : dateRFC822toUnix(response['headers']['date'])
                    })
                    # get md5 from header if it's present.  We would have set that during upload
                    if response['headers'].has_key('x-amz-meta-s3cmd-attrs'):
                        attrs = parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs'])
                        if attrs.has_key('md5'):
                            remote_item.update({'md5': attrs['md5']})

                remote_list[key] = remote_item
    return remote_list
Пример #14
0
    def _get_filelist_remote(remote_uri, recursive=True):
        ## If remote_uri ends with '/' then all remote files will have
        ## the remote_uri prefix removed in the relative path.
        ## If, on the other hand, the remote_uri ends with something else
        ## (probably alphanumeric symbol) we'll use the last path part
        ## in the relative path.
        ##
        ## Complicated, eh? See an example:
        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
        ## Furthermore a prefix-magic can restrict the return list:
        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
        ## { 'xyz/blah.txt' : {} }

        info(u"Retrieving list of remote files for %s ..." % remote_uri)
        empty_fname_re = re.compile(r'\A\s*\Z')

        total_size = 0

        s3 = S3(Config())
        response = s3.bucket_list(remote_uri.bucket(),
                                  prefix=remote_uri.object(),
                                  recursive=recursive,
                                  uri_params=uri_params)

        rem_base_original = rem_base = remote_uri.object()
        remote_uri_original = remote_uri
        if rem_base != '' and rem_base[-1] != '/':
            rem_base = rem_base[:rem_base.rfind('/') + 1]
            remote_uri = S3Uri(u"s3://%s/%s" % (remote_uri.bucket(), rem_base))
        rem_base_len = len(rem_base)
        rem_list = FileDict(ignore_case=False)
        break_now = False
        for object in response['list']:
            if object['Key'] == rem_base_original and object['Key'][-1] != "/":
                ## We asked for one file and we got that file :-)
                key = unicodise(os.path.basename(deunicodise(object['Key'])))
                object_uri_str = remote_uri_original.uri()
                break_now = True
                rem_list = FileDict(
                    ignore_case=False
                )  ## Remove whatever has already been put to rem_list
            else:
                key = object['Key'][
                    rem_base_len:]  ## Beware - this may be '' if object['Key']==rem_base !!
                object_uri_str = remote_uri.uri() + key
            if empty_fname_re.match(key):
                # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems.
                warning(u"Empty object name on S3 found, ignoring.")
                continue
            rem_list[key] = {
                'size': int(object['Size']),
                'timestamp': dateS3toUnix(
                    object['LastModified']
                ),  ## Sadly it's upload time, not our lastmod time :-(
                'md5': object['ETag'].strip('"\''),
                'object_key': object['Key'],
                'object_uri_str': object_uri_str,
                'base_uri': remote_uri,
                'dev': None,
                'inode': None,
            }
            if '-' in rem_list[key][
                    'md5']:  # always get it for multipart uploads
                _get_remote_attribs(S3Uri(object_uri_str), rem_list[key])
            md5 = rem_list[key]['md5']
            rem_list.record_md5(key, md5)
            total_size += int(object['Size'])
            if break_now:
                break
        return rem_list, total_size
Пример #15
0
    "host": host,
    "body_style": "donkey",
    "body_rgb": (128, 128, 128),
    "car_name": "42AI Potato Qarnot",
    "font_size": 100,
    "racer_name": "DDQN",
    "country": "FR",
    "bio": "Learning to drive w DDQN RL",
    "guid": str(uuid.uuid4()),
    "max_cte": 10,
}

if __name__ == "__main__":
    env = gym.make(env_name, conf=config_Simulator)

    S3 = S3(config.config_NeuralPlayer.config_Datasets.S3_bucket_name)
    agent = DQNAgent(config=config_Agent, S3=S3)
    agent.config.epsilon = 0.1
    preprocessor = PreprocessingVannilla(
        config.config_NeuralPlayer.config_Preprocessing)

    env.reset()
    i = 0
    state, reward, done, infos = env.step([0, 0.1])
    while (i < 1000):
        processed_state = preprocessor.process(state)
        action = agent.get_action(processed_state)
        state, reward, done, infos = env.step(action)
        print(action, done, infos)
        i += 1
Пример #16
0
    def __init__(self, credentials, path):
        # Init AWS Variables
        self._path = path

        # Init S3 Class
        self._s3 = S3(credentials)
Пример #17
0
	def _init_dataset(self, config):
		self.S3 = None
		if self.config.config_Datasets.S3_connection == True:
			self.S3 = S3(self.config.config_Datasets.S3_bucket_name)
		Logger.info("Initializing simcache")
		self.SimCache = SimCache(self.config.config_Datasets.sim, self.S3)
Пример #18
0
	def is_dns_compatible(self):
		return S3.check_bucket_name_dns_conformity(self._bucket)
Пример #19
0
def pushToS3(fileName):
    print "Pushing to S3 . . . "
    s3Wrapper = S3(fileName)
    s3Wrapper.uploadData()