Пример #1
0
    def _instances(self):
        """
        Return:
            [
                {'app':'app1', 'intf':'intf1', 'instance_id':xxxxxxx, 'endpoints'},
                {'app':'app1', 'intf':'intf2', 'instance_id':xxxxxxx},
                {'app':'app1', 'intf':'intf3', 'instance_id':xxxxxxx},
            ]
        """
        app_dirs = glob.iglob(os.path.join(self._dataflow_path, '*'))
        app_dirs = (os.basename(x) for x in app_dirs if os.path.isdir(x))

        all_instances = []
        if self._project == 'bpc':
            for record in self._datastore.conn['bpc']['instance_id'].find({}):
                all_instances.append({
                                        'instance_id':record['instance_id'],
                                        'app':record['app_name'],
                                        'intf':record['intf_name'],
                                     }
                                    )
        for app in app_dirs:
            intf_dirs = glob.iglob(os.path.join(self._dataflow_path, app, '*'))
            intf_dirs = (os.basename(x) for x in intf_dirs if os.path.isdir(x))

            for intf in intf_dirs:
                for instance in all_instances:
                    if instance['app'] == app and instance['intf'] == intf:
                        yield instance
                raise ValueError('Not find matched instance_id:%s, %s' % (app, intf))
Пример #2
0
 def open(self, path, mode='r'):
     container, subpath = self._splitAzurePath(path)
     if mode == 'r':
         _file = self._service.get_file_to_text(container,
                                                os.dirname(subpath),
                                                os.basename(subpath))
         stream = NamedStringIO(_file.content, name=_file.name)
     elif mode == 'rb':
         _file = self._service.get_file_to_bytes(container,
                                                 os.dirname(subpath),
                                                 os.basename(subpath))
         stream = NamedBytesIO(_file.content, name=_file.name)
     elif mode == 'w':
         stream = NamedStringIO(name=subpath)
     elif mode == 'wb':
         stream = NamedBytesIO(name=subpath)
     else:
         raise NotImplementedError(mode + ' is not supported')
     try:
         yield stream
     finally:
         if mode == 'w':
             file_write = self._service.create_file_from_text
         elif mode == 'wb':
             file_write = self._service.create_file_from_bytes
         else:
             return
         self.makedirs(os.dirname(path))
         file_write(container, os.path.dirname(subpath),
                    os.path.basename(subpath), stream.getvalue())
Пример #3
0
    def _instances(self):
        """
        Return:
            [
                {'app':'app1', 'intf':'intf1', 'instance_id':xxxxxxx, 'endpoints'},
                {'app':'app1', 'intf':'intf2', 'instance_id':xxxxxxx},
                {'app':'app1', 'intf':'intf3', 'instance_id':xxxxxxx},
            ]
        """
        app_dirs = glob.iglob(os.path.join(self._dataflow_path, '*'))
        app_dirs = (os.basename(x) for x in app_dirs if os.path.isdir(x))

        all_instances = []
        if self._project == 'bpc':
            for record in self._datastore.conn['bpc']['instance_id'].find({}):
                all_instances.append({
                    'instance_id': record['instance_id'],
                    'app': record['app_name'],
                    'intf': record['intf_name'],
                })
        for app in app_dirs:
            intf_dirs = glob.iglob(os.path.join(self._dataflow_path, app, '*'))
            intf_dirs = (os.basename(x) for x in intf_dirs if os.path.isdir(x))

            for intf in intf_dirs:
                for instance in all_instances:
                    if instance['app'] == app and instance['intf'] == intf:
                        yield instance
                raise ValueError('Not find matched instance_id:%s, %s' %
                                 (app, intf))
Пример #4
0
def mix_audio(input_nonvoc, input_voc, output_dir):
    sys.stderr.write('mix_audio: {} + {}\r\n'.format(os.basename(input_nonvoc),
                                                     os.basename(input_voc)))
    nonvoc = AudioSegment.from_file(input_nonvoc)
    voc = AudioSegment.from_file(input_voc)

    # mix sound2 with sound1, starting at 5000ms into sound1)
    output = nonvoc.overlay(voc, position=0)
    output_filename = str(uuid.uuid4())
    # save the result
    # voc.export(os.path.join(output_dir, '{}_gt.wav'.format(output_filename)), format='wav')
    out_file = os.path.join(output_dir, '{}.wav'.format(output_filename))
    output.export(out_file, format='wav')
    print('"{}" "{}"'.format(input_voc, out_file))
def get_icon(name, size):
    """ Return a gtk.gdk.Pixbuf """
    found = False
    if name.startswith('/'):
        try:
            # name is an abslolute pathname to a file
            pixbuf = gtk.gdk.pixbuf_new_from_file(name)
            pixbuf = pixbuf.scale_simple(size, size, gtk.gdk.INTERP_BILINEAR)
            found = True
        except:
            # print "get_icon: Cannot get pixbuf from file: " + name
            name = os.basename(name).splitext()[0]

    if not found:
        theme = gtk.icon_theme_get_default()
        try:
            pixbuf = theme.load_icon(name, size, gtk.ICON_LOOKUP_USE_BUILTIN)
        except:
            try:
                filename = os.path.join('/usr/share/pixmaps', name)
                try:
                    pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
                    found = True
                except:
                    # print "get_icon: [%s] not present in theme" % name
                    pixbuf = theme.load_icon('applications-system', size, gtk.ICON_LOOKUP_USE_BUILTIN)
            except:
                pixbuf = theme.load_icon(gtk.STOCK_PREFERENCES, 16, 0)
    # print "get_icon(name, size): icon name: %s pixbuf size: %dx%d" % (name, pixbuf.get_width(), pixbuf.get_height())
    if pixbuf.get_width() < 48 or pixbuf.get_height() < 48:
        pixbuf = pixbuf.scale_simple(size, size, gtk.gdk.INTERP_BILINEAR)
    if pixbuf.get_width() > 48 or pixbuf.get_height() > 48:
        pixbuf = pixbuf.scale_simple(size, size, gtk.gdk.INTERP_BILINEAR)
    return pixbuf
def get_icon(name, size):
    """ Return a gtk.gdk.Pixbuf """
    found = False
    if name.startswith('/'):
        try:
            # name is an abslolute pathname to a file
            pixbuf = gtk.gdk.pixbuf_new_from_file(name)
            pixbuf = pixbuf.scale_simple(size, size, gtk.gdk.INTERP_BILINEAR)
            found = True
        except:
            # print "get_icon: Cannot get pixbuf from file: " + name
            name = os.basename(name).splitext()[0]

    if not found:
        theme = gtk.icon_theme_get_default()
        try:
            pixbuf = theme.load_icon(name, size, gtk.ICON_LOOKUP_USE_BUILTIN)
        except:
            try:
                filename = os.path.join('/usr/share/pixmaps', name)
                try:
                    pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
                    found = True
                except:
                    # print "get_icon: [%s] not present in theme" % name
                    pixbuf = theme.load_icon('applications-system', size,
                                             gtk.ICON_LOOKUP_USE_BUILTIN)
            except:
                pixbuf = theme.load_icon(gtk.STOCK_PREFERENCES, 16, 0)
    # print "get_icon(name, size): icon name: %s pixbuf size: %dx%d" % (name, pixbuf.get_width(), pixbuf.get_height())
    if pixbuf.get_width() < 48 or pixbuf.get_height() < 48:
        pixbuf = pixbuf.scale_simple(size, size, gtk.gdk.INTERP_BILINEAR)
    if pixbuf.get_width() > 48 or pixbuf.get_height() > 48:
        pixbuf = pixbuf.scale_simple(size, size, gtk.gdk.INTERP_BILINEAR)
    return pixbuf
Пример #7
0
def get_info(index):
    pres_data = {}
    file_name = os.path.join(os.basename(__file__), 'presidents.txt')

    with open(file_name) as pres_in:
        for line in pres_in:
            flds = line[:-1].split(":")
            if int(flds[0]) == index:
                pres_data["lastname"] = flds[1]
                pres_data["firstname"] = flds[2]

                pres_data["birthdate"] = mkdate(flds[3])
                pres_data["deathdate"] = mkdate(flds[4])

                pres_data["birthplace"] = flds[5]
                pres_data["birthstate"] = flds[6]

                pres_data["termstart"] = mkdate(flds[7])
                pres_data["termend"] = mkdate(flds[8])

                pres_data["party"] = flds[9]

                break

    return pres_data
Пример #8
0
 def write_pid(self, program):
     pid = str(os.getpid())
     filename = os.path.join(self.const.Const().get('pid-path', make=True), os.basename(program)+'.pid')
     if os.path.exists(filename):
         self.str2file(filename, pid, mode='a')
     else:
         self.str2file(filename, pid, mode='w')
Пример #9
0
	def trans_package(self):
		# Transfer the packages from the filesystem format on DVD
		# to the datastream format on to disk
		for i in pkg_list:
			pkg_src = os.dirname(i)
			pkg_name = os.basename(i)
			os.system('pkgtrans -s %s %s %s' % (i, pkg_src, pkg_name))
Пример #10
0
def main():
    # Default Settings
    title = 'Image Viewer'
    image = ''
    action = basename(argv[0])
    back = ''
    forward = ''
    hidden = ''
    if dictionary is None:
        # Nothing To Do
        pass
    elif dictionary.had_key('directory'):
        # Something To Do
        if not dictionary.has_key('hidden'):
            # Just Began Viewing
            if isdir(dictionary['directory']):
                # Check For Pickle
                image_pickle = join(dictionary['directory'], 'image.pickle')
                if exists(image_pickle):
                    # Load Directory Listing
                    dir_data = load(file(image_pickle))
                else:
                    # Create Directory Listing
                    dir_data = update_image_pickle(dictionary['directory'])
                    # START HERE
                image = WHAT_TO_DECIDE_HERE
            else:
                # Post Error Message
                dictionary['directory'] = 'No Such Directory'
        else:
            # Find Next Image
    show_form(title, image, action, back, forward, hidden)
Пример #11
0
def downloadFile(data):
    test = re.search(
        "https\:\/\/.{1,20}\.my3cx\.it\:5001\/management\/Reports\/.{1,30}_[0-9]{4}_[a-zA-Z0-9]{20}.csv",
        str(data)).group()
    nameFile = os.basename(test)
    urllib3.request.urlretrieve(test, nameFile)
    return os.path.join('./', nameFile)
Пример #12
0
 def trans_package(self):
     # Transfer the packages from the filesystem format on DVD
     # to the datastream format on to disk
     for i in pkg_list:
         pkg_src = os.dirname(i)
         pkg_name = os.basename(i)
         os.system('pkgtrans -s %s %s %s' % (i, pkg_src, pkg_name))
Пример #13
0
    def post(self):
        global worker
        """
        embeddingの開始
        """
        if "name" in request.json:
            name = request.json["name"]
        else:
            name = os.basename(request.json['audio_id'])
        ###
        #src_list = BASE_PATH+request.json['audio_id_list']
        #if 'event_list' in request.json:
        #    event_list = request.json['event_list']
        ###
        config_path = CONFIG_PATH + name + ".json"
        with open(config_path, 'w') as outfile:
            json.dump(request.json, outfile)

        feature = request.json[
            "feature"] if "feature" in request.json else "mel"
        method = request.json["method"] if "method" in request.json else "umap"
        result_path = RESULT_PATH
        cmd = [
            "python", "./src/embedding.py", "--config", config_path, "--name",
            name, "--feature", feature, "--method", method
        ]
        log_path = LOG_PATH + name + ".txt"
        with open(log_path, 'w') as f:
            print(" ".join(cmd))
            p = subprocess.Popen(cmd, stdout=f, stderr=subprocess.STDOUT)
            #p = subprocess.Popen(cmd)
        pid = int(p.pid)
        worker[pid] = {"process": p, "name": name}
        res = {"worker_id": int(p.pid), "name": name}
        return res
Пример #14
0
def clone_repo(path):
    tmpdir = tempfile.mkdtemp(prefix='git_clone_' + os.basename(path))
    try:
        res = subprocess.check_output(['git', 'clone', path, tmpdir])
        return tmpdir
    except CalledProcessError, e:
        clean_checkout(tmpdir)
        raise
Пример #15
0
 def write_pid(self, program):
     pid = str(os.getpid())
     filename = os.path.join(self.const.Const().get('pid-path', make=True),
                             os.basename(program) + '.pid')
     if os.path.exists(filename):
         self.str2file(filename, pid, mode='a')
     else:
         self.str2file(filename, pid, mode='w')
Пример #16
0
def BigExternal(lab_dir, logger):
    '''
    Ensure large files named in the config/bigexternal.txt are present in the lab directory
    '''
    big_list = os.path.join(lab_dir, 'config', 'bigexternal.txt')
    if not os.path.isfile(big_list):
        #print('Missing bigexternal.txt from %s' % big_list)
        return
    else:
        logger.debug('BigExternal file found: %s' % big_list)
    full = os.path.abspath(lab_dir)
    homedir = os.environ['HOME']
    cache_path = os.path.join(homedir, '.local', 'share', 'labtainer',
                              'big_cache')
    lab_cache = os.path.join(cache_path, os.path.basename(lab_dir))
    if os.path.isfile(big_list):
        with open(big_list) as fh:
            for line in fh:
                line = line.strip()
                if len(line) > 0 and not line.startswith('#'):
                    from_file, to_file = line.split()
                    to_path = os.path.join(lab_dir, to_file)
                    if not os.path.isfile(to_path):
                        cache_to_path = os.path.join(lab_cache, to_file)
                        try:
                            os.makedirs(os.path.dirname(cache_to_path))
                        except:
                            pass
                        if not os.path.isfile(cache_to_path):
                            logger.debug('missing %s, get it from %s success' %
                                         (to_path, from_file))
                            cmd = 'curl -L -R --create-dirs -o %s %s' % (
                                to_path, from_file)
                            logger.debug('cmd: %s' % cmd)
                            ok = os.system(cmd)
                            logger.debug('result: %d' % ok)
                            shutil.copyfile(to_path, cache_to_path)
                        else:
                            try:
                                os.makedirs(os.path.dirname(to_path))
                            except:
                                pass
                            logger.debug('got %s from cache' % to_path)
                            shutil.copyfile(cache_to_path, to_path)

                    else:
                        size = os.stat(to_path).st_size
                        if size < 50000:
                            if os.basename(to_path) == 'home.tar':
                                print('Remove the file at %s, and run again.' %
                                      to_path)
                            else:
                                print('File at %s is supposed to be large.' %
                                      to_path)
                                print(
                                    'Try removing the file and running again.  Or get the correct %s from %s'
                                    % (to_path, from_file))
                                exit(1)
Пример #17
0
def load_game(_):
    """ Offer a selection of files that exist that are loadable """
    import ipdb
    ipdb.set_trace()
    load_dir = SAVE_DIR
    loadable_files = [os.basename(f) for f in os.path.walk(load_dir)]
    index = menu(loadable_files)
    with shelve.open(loadable_files[index], 'r') as f:
        encounter_loop(f['state'])
Пример #18
0
 def add_ose_layer(self):
     """ """
     shutil.copytree(self._get_resource_path(__name__, "data/modules"), "%s/" %self.jboss_home)
     for module in ['org/jgroups',
                    'org/jboss/as/clustering/common',
                    'org/jboss/as/clustering/jgroups',
                    'org/jboss/as/ee']:
             for jar in glob.glob("%s/modules/system/layers/base/.overlays/*/%s/main/*.jar"
                                  %(self.jboss_home, module)):
                 os.symlink(os.basename(jar), jar)
Пример #19
0
 def remove_pid(self, program):
     pid = str(os.getpid)
     filename = os.path.join(self.const.Const().get("pid-path"), os.basename(program)+'.pid')
     pids = self.file2str(filename).strip().split('\n') or []
     for p in pids:
         if p.strip() == pid:
             pids.pop(pids.index(p))
     if pids:
         self.str2file(filename, '\n'.join(pids))
     else:
         os.remove(filename)
Пример #20
0
def persistences():
    """
    Lists the available persistences
    """
    persistenceFolder = getPersistenceFolder()

    persistencesList = []
    for p in glob.glob(persistenceFolder + "*.pickle"):
        persistencesList.append(os.basename(p).split('.pickle')[0])

    return persistencesList
Пример #21
0
def persistences():
    """
    Lists the available persistences
    """
    persistenceFolder = getPersistenceFolder()

    persistencesList = []
    for p in glob.glob(persistenceFolder + "*.pickle"):
        persistencesList.append(os.basename(p).split('.pickle')[0])
    
    return persistencesList
Пример #22
0
    def filepath(name):
        if os.path.exists(name):
            return name

        name = os.basename(name) if os.sep in name else name
        for confdir in _conf_directories():
            path = os.path.join(confdir, name)
            if os.path.exists(path):
                return path

        return None
Пример #23
0
 def remove_pid(self, program):
     pid = str(os.getpid)
     filename = os.path.join(self.const.Const().get("pid-path"),
                             os.basename(program) + '.pid')
     pids = self.file2str(filename).strip().split('\n') or []
     for p in pids:
         if p.strip() == pid:
             pids.pop(pids.index(p))
     if pids:
         self.str2file(filename, '\n'.join(pids))
     else:
         os.remove(filename)
Пример #24
0
def load_pdf_metadata(pdf_path: str, current_obj: Mapping[T,
                                                          T]) -> Mapping[T, T]:
    """
    Load the pdf metadata
    """
    pdf_name = os.basename(pdf_path)
    df, limit = parse_pdf(pdf_path)
    df = df.to_dict()
    limit = list(limit)
    current_obj['metadata'] = df
    current_obj['metadata_dimension'] = limit
    current_obj['pdf_name'] = pdf_name
    current_obj['event_stream'].append('metadata')
    return current_obj
Пример #25
0
def reimpose_neighbour_masks(source_galaxies, source_masks, target, compressed=True):
    """Direct this function towards a set of neighbour free MEDS files"""
    if compressed:
        suffix = ".fz"
    else:
        suffix = ""

    files = glob.glob("%s/DES*.fits%s"%(source, suffix))

    print "Will remove neighbour masking in %d MEDS files."

    for i, f in files:
        tile = os.basename(f)[:12]
        print i, tile
        m = s.meds_wrapper(f)
Пример #26
0
def writetrainlist(path_root):
    oriimage_root = path_root + '/oriimage'
    classify_temp = os.listdir(oriimage_root)
    strlist = ''
    for c in classify_temp:
        labelimage = os.path.join(oriimage_root, c)  #原始图片作为类别标签
        croot = os.path.join(path_root, os.basename(c))
        for d in os.listdir(croot):
            trainimage = os.path.join(croot, d)

            strlist += trainimage + ' ' + labelimage + '\n'

    txtlist = open(path_root + '.txt', 'w')
    txtlist.write(strlist)
    txtlist.close()
Пример #27
0
def get_product_id(specification, version):
    '''
    Get the product id from the specification and version.
    @param specification: specification name to create product id from
    @param version: version to used to create the product
    @return: product id
    '''
    match = TYPE_TAIL_RE.match(specification)
    if not match or not match.group(1) in ["job-spec", "hysds-io"]:
        raise Exception("Invalid specification path")
    ptype = "job" if match.group(1) == "job-spec" else "hysds-io"
    name = match.group(2)
    if name == "":
        name = get_repo(os.basename(specification))
    return "{0}-{1}:{2}".format(ptype, name, version)
Пример #28
0
    def post(self):
        """
        separationの開始
        """
        global worker
        src_path=BASE_PATH+request.json['audio_id']
        tf_path=TF_PATH+request.json['transfer_function']

        if "name" in request.json:
            name= request.json["name"]
        else:
            name=os.basename(request.json['audio_id'])
        log_path=LOG_PATH+name+".txt"
        result_path=RESULT_PATH

        lowest_freq   = request.json['lowest_freq'] if 'lowest_freq' in request.json else 1000
        interval      = request.json['interval'] if 'interval' in request.json else 400
        localization=request.json['localization']
        event_list=localization["event_list"]
        tl=convert_events2tl(event_list,interval)
        loc_filename=RESULT_PATH+name+".loc.json"
        tl_filename=RESULT_PATH+name+".tl.json"
        with open(loc_filename, 'w') as fp:
            json.dump(localization, fp)
        with open(tl_filename, 'w') as fp:
            json.dump({"interval": interval, "tl": tl}, fp)
        
        os.makedirs(RESULT_PATH+name+"_sep/",exist_ok=True)
        cmd=[
            "micarrayx-separate",
            tf_path,
            src_path,
            "--timeline", tl_filename,
            #"--min_freq", str(lowest_freq),
            "--out", RESULT_PATH+name+"_sep/sep",
            "--out_sep_spectrogram_fig",
            "--out_sep_spectrogram_csv",
            ]
        with open(log_path, 'w') as f:
            print(" ".join(cmd))
            p = subprocess.Popen(cmd, stdout=f, stderr=subprocess.STDOUT)
            #print(cmd)
            #p = subprocess.Popen(cmd)
        pid=int(p.pid)
        worker[pid]={"process":p,"name":name}
        res={"worker_id":int(p.pid),"name":name}
        return res
def _convert_to_trRecord(fileName, buffer4Image, label, text, height, width):
    """Builds proto"""

    color = 'RGB'
    channel = 3
    image_formate = 'JPEG'

    image = tf.train.Example(features=tf.train.Features(feature={
        'image/height': _int64_feature(height),
        'image/width': _int64_feature(width),
        'image/color': _bytes_feature(tf.compat.as_bytes(color)),
        'image/channel': _int64_feature(channel),
        'image/class/label': _int64_feature(label),
        'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
        'image/format': _bytes_feature(tf.compat.as_bytes(os.basename(fileName))),
        'image/encoded': _bytes_feature(tf.compat.as_bytes(buffer4Image))}))
    return image
Пример #30
0
def regroup_tarfiles(cc):
    """Re-group all chunks again per [Case folder] compressed file. First all
    chunks are copied to the node scratch disc, then start working on them.
    This only works on a node with PBS stuff.

    Make sure to maintain the same location as defined by the tags!

    [res_dir] and [Case folder] could be multiple directories deep, bu the
    final archive will only contain the files (no directory structure), and
    the name of the archive is that of the last directory:
        /[res_dir]/[Case folder]/[Case folder].tar.xz
        /res/dir/case/folder/dlcname/dlcname.tar.xz

    Parameters
    ----------

    path_pattern : str
        /path/to/files/*.tar.xz

    """

    USER = os.getenv('USER')
    PBS_JOBID = os.getenv('PBS_JOBID')
    scratch = os.path.join('/scratch', USER, PBS_JOBID)
    src = os.getenv('PBS_O_WORKDIR')

    path_pattern = '/home/dave/SimResults/NREL5MW/D0022/prepost-data/*.xz'

    for ffname in tqdm(glob.glob(path_pattern)):
        appendix = os.path.basename(ffname).split('_')[0]
        with tarfile.open(ffname, mode='r:xz') as tar:
            # create new tar files if necessary for each [Case folder]
            for tarinfo in tar.getmembers():
                t2_name = os.path.basename(os.path.dirname(tarinfo.name))
                t2_dir = os.path.join(os.path.dirname(path_pattern), t2_name)
                if not os.path.isdir(t2_dir):
                    os.makedirs(t2_dir)
                t2_path = os.path.join(t2_dir, t2_name + '_%s.tar' % appendix)
                fileobj = tar.extractfile(tarinfo)
                # change the location of the file in the new archive:
                # the location of the archive is according to the folder
                # structure as defined in the tags, remove any subfolders
                tarinfo.name = os.basename(tarinfo.name)
                with tarfile.open(t2_path, mode='a') as t2:
                    t2.addfile(tarinfo, fileobj)
    def find(self, query):
        """
		Finds nodes based on the query.

		args: metric query.

		Returns a list of Nodes.
		"""
        cleaned_query = query.replace('\\', '')
        query_parts = cleaned_query.split('.')

        for root_dir in self.storage_dirs:
            for abs_file_path, metric_name in self._find(
                    root_dir, query_parts):

                # Filter out . and ..
                if os.basename(abs_file_path).startswith('.'):
                    continue
Пример #32
0
def m():
    excluese_times = get_json('git_commit/timecmt.json')
    times = generate_timeline(excluese_times)

    excluese_file = get_json('git_commit/ex_file.json')
    idx = 0
    for file in get_files('/Users/chenxin/tools/algorithms', excluese_file):

        excluese_file.append(os.basename(file))
        time = times[idx]
        fackaddfile(
            file, time + ' {}:{}:{}'.format(get_radom(8, 20), get_radom(0, 60),
                                            get_radom(0, 60)))
        excluese_times.append(time)
        idx += 1

    dumps(excluese_times, 'base.json')
    dumps(excluese_file, 'ex_files.json')
Пример #33
0
def reimpose_neighbour_masks(source_galaxies,
                             source_masks,
                             target,
                             compressed=True):
    """Direct this function towards a set of neighbour free MEDS files"""
    if compressed:
        suffix = ".fz"
    else:
        suffix = ""

    files = glob.glob("%s/DES*.fits%s" % (source, suffix))

    print "Will remove neighbour masking in %d MEDS files."

    for i, f in files:
        tile = os.basename(f)[:12]
        print i, tile
        m = s.meds_wrapper(f)
Пример #34
0
def ndPyLibExportCam(isImagePlane, isFbx, isAbc, frameHandle, CameraScale):
    if cmds.file(q=True, modified=True) == 1:
        ndPyLibPlatform('please save scene file...')
    else:
        filepath = cmds.file(q=True, sceneName=True)
        filename = os.basename(filepath)

        project = cmds.match('P:/Project/[a-zA-Z0-9]+/', filepath)
        roll = cmds.match('/roll[a-zA-Z0-9]+/', filepath)
        roll = cmds.match('roll[a-zA-Z0-9]+', roll)
        sequence = cmds.match('/s[0-9]+[a-zA-Z]*/', filepath)
        sequence = cmds.match('s[0-9]+[a-zA-Z]*/', filepath)
        shot = cmds.match('/c[0-9]+[a-zA-Z_]*/', filepath)
        shot = cmds.match('c[0-9]+[a-zA-Z_]*/', shot)

        shotpath = os.path.join(project, 'shots', roll, sequence, shot)
        publishpath = os.path.join(shotpath, 'publish', 'Cam', filename)

        oFilename = os.path.join(sequence, shot, '_cam')

        ndPyLibPlatform('project : ' + project)
        ndPyLibPlatform('roll : ' + roll)
        ndPyLibPlatform('sequence : ' + sequence)
        ndPyLibPlatform('shot : ' + shot)
        ndPyLibPlatform('filename : ' + filename)

        if os.exists(shotpath) != 1:
            ndPyLibPlatform('no exist folder...')
        else:
            if not os.exists(publishpath):
                os.mkdir(publishpath, md=True)

            outputfiles = ndPyLibExportCam_exportCamera(
                publishpath, oFilename, isImagePlane, isFbx, isAbc,
                frameHandle, CameraScale)

            for o in outputfiles:
                ndPyLibPlatform('output file : ' + o)

            for o in outputfiles:
                destFile = os.path.dirname(
                    os.path.dirname(o)) + '/' + os.path.basename(o, '')
                cmds.sysFile(o, copy=destFile)
Пример #35
0
    def _read(self, path):
        if os.path.exists(path) is False:
            logging.error('File {0} is not found'.format(path))
            raise Exception("File not found")

        basename = os.basename(path)
        if basename.find('.csv') != -1:
            logging.info("Try to load .csv file is loaded")
            return np.loadtxt(open(path,"rb"),delimiter=",",skiprows=1)
        else:
            #load every line
            f = open(path, 'r')
            result = []
            logging.info("Try to load data from file")
            lines = f.readlines()
            f.close()
            for line in lines:
                result.append([float(item) for item in line.split()])
            return np.array(result)
Пример #36
0
def main():
    them = 'each_wordsquare'
    me = 'square_meaning'

    dir_path = os.path.dirname(os.path.realpath(__file__))
    download_path = os.path.join(dir_path, "downloaded_images")
    try:
        os.makedirs(download_path)
    except OSError as exc:
        pass

    my_tweets = get_tweets(me)
    their_tweets = get_tweets(them)

    their_tweets_by_id = {x.id: x for x in their_tweets}
    previous_tweets = set(x.in_reply_to_status_id for x in my_tweets
                          if x.in_reply_to_status_id is not None)

    new_tweets = set(their_tweets_by_id.keys()) - previous_tweets

    for tweet in sorted(new_tweets):
        text = their_tweets_by_id[tweet].text
        image_meta = webdriver.load_image_info(text, 5)

        for image in image_meta:
            if image.get(SUMMARY):
                try:
                    image_name = os.path.join(
                        download_path, os.path.basename(image.get(IMAGE)))
                    image_name = image_name.split('?')[0]
                    webdriver.download_image(image.get(IMAGE), image_name)

                    send_tweet(image_name, image.get(SUMMARY), them, tweet)
                except:
                    continue
                else:
                    break
        else:
            for image in image_meta:
                if image.get(PAGE_TITLE):
                    image_name = os.path.join(download_path,
                                              os.basename(image.get(IMAGE)))
                    webdriver.download_image(image.get(IMAGE), image_name)
Пример #37
0
def load_page_data(img_dir: str, current_obj: Mapping[T, T]) -> Mapping[T, T]:
    """
    Iterate through the img directory, and retrieve the page level data
    """
    page_data = []
    for f in glob.glob(f'{img_tmp}/*'):
        page_obj = {}
        page_num = int(os.basename(f))
        img = Image.open(f)
        width, height = img.size
        with open(f, 'rb') as bimage:
            bstring = bimage.read()
            page_obj['bytes'] = bstring
        page_obj['page_width'] = width
        page_obj['page_height'] = height
        page_obj['page_num'] = page_num
        page_data.append(page_obj)
    current_obj['page_data'] = page_data
    current_obj['event_stream'] = ['imagedata']
    return current_obj
Пример #38
0
    def regularStylePath(self):
        if "Italic" not in os.path.basename(self.familyPath):
            basicStylePath_ = os.path.join(
                self.familyPath,
                os.path.basename(self.familyPath).strip() + "-Regular.ufo")
            if not os.path.exists(basicStylePath_):
                basicStylePath_ = os.path.join(
                    self.familyPath,
                    os.path.basename(self.familyPath).strip() + "-Light.ufo")
            if not os.path.exists(basicStylePath_):
                basicStylePath_ = os.path.join(
                    self.familyPath,
                    os.path.basename(self.familyPath).strip() + "-Thin.ufo")
        else:
            basicStylePath_ = os.path.join(
                self.familyPath,
                os.basename(self.familyPath).strip() + "-Italic.ufo")
        print("    >>> " + self.n + "Variable has failed.\n        "\
              "        A static ttf version of the 'Regular' weight"\
              "\n        will be generated instead.")

        return basicStylePath_
Пример #39
0
def get_utkface_np_data(img_dir, img_size):
    # annotation format: [age]_[gender]_[race]_[date&time].jpg
    np_imgs = []
    labels = []

    imgs_list = glob(os.path.join(img_dir, "*.jpg"))
    for img in imgs_list:
        pil_img = Image.open(img)
        resized_pil_img = pil_img.resize((img_size, img_size))
        np_img = np.asarray(resized_pil_img)
        name_split = img.split("_")
        if not len(name_split) == 4:
            raise ValueError("annotation format is not correct for {}"
                             .format(os.basename(img)))
        age, gender, race, _ = name_split
        np_imgs.append(np_img)
        labels.append((age, gender, race))

    np_imgs = np.array(np_imgs)
    labels = np.array(labels)

    return np_imgs, labels
Пример #40
0
def find_latest_dl_date(save_dir):

    latest_date = 0

    filepaths = glob.glob(save_dir + '/*.pdf')

    for filepath in filepaths:

        filename = os.basename(filepath)
        filedate = int(filename.split('_')[2])

        if filedate > latest_date:
            latest_date = filedate

    latest_date = str(latest_date)

    year = int(latest_date[:4])
    month = int(latest_date[4:6])
    day = int(latest_date[6:])
    
    last_dl_date = datetime.datetime(year,month,day,0,0,0)

    return last_dl_date
Пример #41
0
def BigExternal(lab_dir):
    '''
    Ensure large files named in the config/bigexternal.txt are present in the lab directory
    '''
    big_list = os.path.join(lab_dir, 'config', 'bigexternal.txt')
    if not os.path.isfile(big_list):
        #print('Missing bigexternal.txt from %s' % big_list)
        return
    full = os.path.abspath(lab_dir)
    if os.path.isfile(big_list):
        with open(big_list) as fh:
            for line in fh:
                line = line.strip()
                if len(line) > 0 and not line.startswith('#'):
                    from_file, to_file = line.split()
                    to_path = os.path.join(lab_dir, to_file)
                    if not os.path.isfile(to_path):
                        cmd = 'curl -L -R --create-dirs -o %s %s' % (to_path,
                                                                     from_file)
                        print('cmd: %s' % cmd)
                        ok = os.system(cmd)
                        print('missing %s, get it from %s success %d' %
                              (to_path, from_file, ok))
                    else:
                        size = os.stat(to_path).st_size
                        if size < 50000:
                            if os.basename(to_path) == 'home.tar':
                                print('Remove the file at %s, and run again.' %
                                      to_path)
                            else:
                                print('File at %s is supposed to be large.' %
                                      to_path)
                                print(
                                    'Try removing the file and running again.  Or get the correct %s from %s'
                                    % (to_path, from_file))
                                exit(1)
Пример #42
0
 def save(self):
     """Save ID3 data."""
     try:
         self.id3.save(v1=2)
     except:
         raise Exception ("Except error: ID3 from file %s was NOT saved." %(os.basename(self.f)))
Пример #43
0
def read(fname):
    return open(os.path.join(os.path.dirname(__file__), fname)).read()


setup(
    name='sagemaker-chainer-container',
    version='1.0',
    description=
    'Open source library template for creating containers to run on Amazon SageMaker.',
    packages=find_packages(where='src', exclude='test'),
    package_dir={
        'sagemaker_chainer_container': 'src/sagemaker_chainer_container'
    },
    py_modules=[
        os.splitext(os.basename(path))[0] for path in glob('src/*.py')
    ],
    long_description=read('README.rst'),
    author='Amazon Web Services',
    license='Apache License 2.0',
    classifiers=[
        "Development Status :: 5 - Production/Stable",
        "Intended Audience :: Developers",
        "Natural Language :: English",
        "License :: OSI Approved :: Apache Software License",
        "Programming Language :: Python",
        'Programming Language :: Python :: 2.7',
        'Programming Language :: Python :: 3.5',
    ],
    # Temporarily freeze sagemaker-containers version to 2.2.5 until we have a proper fix
    # freeze numpy version because of the python2 bug
Пример #44
0
from glob import glob
import os

from setuptools import setup, find_packages


def read(fname):
    return open(os.path.join(os.path.dirname(__file__), fname)).read()


setup(name="sagemaker",
      version="1.9.2",
      description="Open source library for training and deploying models on Amazon SageMaker.",
      packages=find_packages('src'),
      package_dir={'': 'src'},
      py_modules=[os.splitext(os.basename(path))[0] for path in glob('src/*.py')],
      long_description=read('README.rst'),
      author="Amazon Web Services",
      url='https://github.com/aws/sagemaker-python-sdk/',
      license="Apache License 2.0",
      keywords="ML Amazon AWS AI Tensorflow MXNet",
      classifiers=[
          "Development Status :: 5 - Production/Stable",
          "Intended Audience :: Developers",
          "Natural Language :: English",
          "License :: OSI Approved :: Apache Software License",
          "Programming Language :: Python",
          "Programming Language :: Python :: 2.7",
          "Programming Language :: Python :: 3.5",
      ],
Пример #45
0
            sys.stdout.flush()
    try:
        return array(out).transpose()
    except:
        return out


if __name__ == '__main__':
    import sys,os
    fname=sys.argv[1]
    exargs={}
    for a in sys.argv[2:]:
        if a[:5]=='erng=': exargs['erng']=float(a[5:])
    if fname[-4:]=='.npy':
        fdir=os.dirname(fname)
        fname=os.basename(fname)
        rep=meas_all(fname,fdir)
    else:
        init_single(fname)
        rep=anal(range(len(dt)))
    for a in rep:
        print a


global counter
counter=0
def bump(w2=310,w1=1500,vl=None,slim=[2.2,2.6],rep=1,min_dis=0.015,loud=0,fit_wid=2):
    global counter
    if vl==None: 
        oxide=polyval(spectra.cau_sio2,px)
        epsil=profit.dielect(px,spectra.lor_si_low[0],spectra.lor_si_low[1])
        s = re.search(r"((90[0-9]{7})(\s*\n*\s*)(([A-Z][a-z]*[^\S\r\n]*)*\n*[^\S\r\n]*)(([A-Z][a-z]*[^\S\r\n]*)*)*)", toWrite)

        print s

        untrimmed = ''.join([i for i in s.group() if not i.isdigit()])
        trimmed1 = untrimmed.strip()
        trimmed2 = trimmed1.replace("\n", ", ")
        candidateName = trimmed2.replace(", ,",", ")

        print "Candidate Name: " + str(candidateName)

        if #male
            filename = directory + "/Male/" + candidateName + "/" + candidateName + " - Application.txt"
        else #female
            filename = directory + "/Female/" + candidateName + "/" + candidateName + " - Application.txt"


        # Create the file, writing the GTID and then the application content
        # We have the write the GTID because it was rmemoved on the split.
        newFile = file(filename, "w")
        newFile.write(toWrite)
        newFile.close()


currentpath = os.path.realpath(__file__)
thisFile = os.basename(__file__)
directorypath = re.split(r"(.*)\\SSSCode\\Modules\\"+thisFile, currentpath)
directory = directorypath[1] + "\SSSCodeTestingMaterial\StaffSelectionOutput"

sortApps(sampletxt, directory)
Пример #47
0
        for o in (0,1):                         # yes, it's not quite right to use skyflat_o*r*c                      
            skycont_c = (bkg_oyc[o].T[isskycont_oyc[o].T]/ \
                skyflat_orc[o].T[isskycont_oyc[o].T]).reshape((cols,-1)).mean(axis=-1)
            skycont_yc = skycont_c*skyflat_orc[o]           
            profile_oyc[o] -= skycont_yc
            rblk = 1; cblk = int(cols/16)
            profile_oyc[o] = blksmooth2d(profile_oyc[o],(okprof_oyc[o] & ~isline_oyc[o]),   \
                        rblk,cblk,0.25,mode='mean')              
            for c in range(cols):
                psf_orc[o,:,c] = shift(profile_oyc[o,:,c],-drow_oc[o,c],cval=0,order=1)
                isbkgcont_orc[o,:,c] = shift(isbkgcont_oyc[o,:,c].astype(int),-drow_oc[o,c],cval=0,order=1) > 0.1
                isnewbadbin_orc[o,:,c] = shift(isnewbadbin_oyc[o,:,c].astype(int),-drow_oc[o,c],cval=1,order=1) > 0.1
            targetrow_od[o,0] = trow_o[o] - np.argmax(isbkgcont_orc[o,trow_o[o]::-1,cols/2] > 0)
            targetrow_od[o,1] = trow_o[o] + np.argmax(isbkgcont_orc[o,trow_o[o]:,cols/2] > 0)
        maprow_od = np.vstack((edgerow_od[:,0],targetrow_od[:,0],targetrow_od[:,1],edgerow_od[:,1])).T
        maprow_od += np.array([-2,-2,2,2])

#        pyfits.PrimaryHDU(psf_orc.astype('float32')).writeto('psf_orc.fits',clobber=True) 
#        pyfits.PrimaryHDU(skyflat_orc.astype('float32')).writeto('skyflat_orc.fits',clobber=True)         
        return psf_orc,skyflat_orc,isnewbadbin_orc,isbkgcont_orc,maprow_od,drow_oc
                        
#---------------------------------------------------------------------------------------
if __name__=='__main__':
    infilelist=sys.argv[1:]
    for file in infilelist:
        hdulist = pyfits.open(file)
        name = os.basename(file).split('.')[0]
        psf_orc,skyflat_orc,isnewbadbin_orc,maprow_od,drow_oc = specpolsignalmap(hdulist)
        pyfits.PrimaryHDU(psf_orc.astype('float32')).writeto(name+'_psf.fits',clobber=True) 
        pyfits.PrimaryHDU(skyflat_orc.astype('float32')).writeto(name+'_skyflat.fits',clobber=True) 
Пример #48
0
def runGame(userIDs, muValues, sigmaValues):
	# Setup working path
	workingPath = "workingPath"
	if os.path.exists(workingPath):
		shutil.rmtree(workingPath)	
	os.makedirs(workingPath)
	os.chmod(workingPath, 0o777)
	
	shutil.copyfile("TR_environment_main.py", os.path.join(workingPath, "TR_environment_main.py"))
	shutil.copyfile("TR_environment_networking.py", os.path.join(workingPath, "TR_environment_networking.py"))
	
	sandbox = Sandbox(workingPath)

	# Unpack and setup bot files
	botPaths = [os.path.join(workingPath, str(userID)) for userID in userIDs]
	for botPath in botPaths: os.mkdir(botPath)
	for a in range(len(userIDs)): unpack("../outputs/TR/"+ str(userIDs[a]) + ".zip", botPaths[a])
	for botPath in botPaths:
		print(botPath)
		os.chmod(botPath, 0o777)
		os.chmod(os.path.join(botPath, "run.sh"), 0o777)
	
	# Build the shell command that will run the game. Executable called environment houses the game environment
	runGameShellCommand = "python3 /var/www/nycsl/problems/workers/"+workingPath+"/TR_environment_main.py "
	for botPath in botPaths: runGameShellCommand += "\"cd "+os.path.abspath(botPath)+"; "+os.path.join(os.path.abspath(botPath), "run.sh")+"\" "
	print(runGameShellCommand)

	# Run game
	sandbox.start(runGameShellCommand)
	lines = []
	while True:
		line = sandbox.read_line(200)
		print(line)
		if line == None:
			break
		lines.append(line)
	
	
	# Get player ranks and scores by parsing shellOutput
	if "won!" in lines[-2]:
		winnerIndex = int(lines[-2][len("Player ") : -len("won!")]) - 1
		loserIndex = (1 if winnerNumber == 2 else 2)-1
		
	else:
		winnerIndex = random.randrange(0, 2)
		loserIndex = 0 if winnerIndex == 1 else 1

	winnerID = userIDs[winnerIndex]
	loserID = userIDs[loserIndex]

	# Update trueskill mu and sigma values
	winnerRating = trueskill.Rating(mu=float(muValues[winnerIndex]), sigma=float(sigmaValues[winnerIndex]))
	loserRating = trueskill.Rating(mu=float(muValues[loserIndex]), sigma=float(sigmaValues[loserIndex]))
	winnerRating, loserRating = trueskill.rate_1vs1(winnerRating, loserRating)

	cursor.execute("UPDATE Submission SET mu = %f, sigma = %f, score = %d WHERE userID = %d and problemID = %d" % (winnerRating.mu, winnerRating.sigma, int(newRatings[0].mu - (3*newRatings[1].sigma)), winnerID, TRON_PROBLEM_ID))
	cursor.execute("UPDATE Submission SET mu = %f, sigma = %f, score = %d WHERE userID = %d and problemID = %d" % (loserRating.mu, loserRating.sigma, int(newRatings[1].mu - (3*newRatings[1].sigma)), loserID, TRON_PROBLEM_ID))
	cnx.commit()

	# Get replay file by parsing shellOutput
	replayFilename = lines[-1][len("Output file is stored at ") : len(lines[-1])]
	shutil.move(os.path.join(workingPath, replayFilename), "../storage")
	
	# Store results of game
	cursor.execute("INSERT INTO Game (replayFilename) VALUES (\'"+os.basename(replayFilename)+"\')")
	cnx.commit()

	cursor.execute("SELECT gameID FROM Game WHERE replayFilename = \'"+replayFilename+"\'")
	gameID = cursor.fetchone()['gameID']
	
	cursor.execute("INSERT INTO GameToUser (gameID, userID, rank, index) VALUES (%d, %d, %d)" % (gameID, winnerID, 0, 0 if userIDs[0] == winnerID else 1))
	cursor.execute("INSERT INTO GameToUser (gameID, userID, rank, index) VALUES (%d, %d, %d)" % (gameID, loserID, 1, 0 if userIDs[0] == loserID else 1))
	cnx.commit()

	# Delete working path
	shutil.rmtree(workingPath)
Пример #49
0
    sumaclust_centroids_basename = sumaclust_output_basename + '.centroids'
    sumaclust_centroids_basepath = os.path.join(args.db_dir, sumaclust_output_basename)
    sumaclust_centroids_filename = sumaclust_centroids_basename + '.fasta'
    sumaclust_centroids_filepath = os.path.join(args.db_dir, sumaclust_centroids_filename)

    clustered_ref_db_basename = cleaned_complete_ref_db_basename + '_NR{0}'.format(clustering_id_threshold_int)
    if args.by_kingdom:
        clustered_ref_db_basename += '_bk'
    clustered_ref_db_basepath = os.path.join(args.db_dir, clustered_ref_db_basename)
    clustered_ref_db_filename = clustered_ref_db_basename + '.fasta'
    clustered_ref_db_filepath = os.path.join(args.db_dir, clustered_ref_db_filename)

    # This is the output MATAM db basepath to pass to matam_assembly.py
    output_ref_db_basename = clustered_ref_db_basename
    if args.out_db_name:
        output_ref_db_basename = os.basename(args.out_db_name)
    output_ref_db_basepath = os.path.join(args.db_dir, output_ref_db_basename)
    # This is the output MATAM db file names
    # For the complete db fasta file
    output_complete_ref_db_basename = output_ref_db_basename + '.complete'
    output_complete_ref_db_basepath = os.path.join(args.db_dir, output_complete_ref_db_basename)
    output_complete_ref_db_filename = output_complete_ref_db_basename + '.fasta'
    output_complete_ref_db_filepath = os.path.join(args.db_dir, output_complete_ref_db_filename)
    # For the complete db taxo file
    output_complete_ref_db_taxo_filename = output_complete_ref_db_basename + '.taxo.tab'
    output_complete_ref_db_taxo_filepath = os.path.join(args.db_dir, output_complete_ref_db_taxo_filename)
    # For the clustered db fasta file
    output_clustered_ref_db_basename = output_ref_db_basename + '.clustered'
    output_clustered_ref_db_basepath = os.path.join(args.db_dir, output_clustered_ref_db_basename)
    output_clustered_ref_db_filename = output_clustered_ref_db_basename + '.fasta'
    output_clustered_ref_db_filepath = os.path.join(args.db_dir, output_clustered_ref_db_filename)
Пример #50
0
def _main():
    parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('-r', '--region', choices=config.AMI_ID_BY_REGION.keys(), help="Where in the world to create the VPN (default: %(default)s)", default='eu-west-1')
    parser.add_argument('-v', '--vpn-type', choices=vpntypes.TYPES.keys(), help="The type of feed to fetch (default: %(default)s)", default='ipsec')
    parser.add_argument('-a', '--api-key', help="Your AWS API key")
    parser.add_argument('-s', '--secret-key', action='store_true', help="Prompts for your AWS secret key secret_key interactively (no echo)")
    parser.add_argument('-S', '--no-prompt-secret', help="Your AWS secret key will be read from the arguments")
    parser.add_argument('-i', '--identity-file', help="The path to your key pair (not required by all VPN types)")
    parser.add_argument('-I', '--key-name', help="The name of the keypair to use. ")
    parser.add_argument('-c', '--config', help="Path to alternate config file", default=os.path.expanduser('~/.ephemvpnrc'))
    parser.add_argument('-V', '--verbose', action='store_true', help="Be more verbose")
    parser.add_argument('-q', '--quiet', action='store_true', help="Output nothing. For non-interactive unattended launches, assumes yes to all questions")
    parser.add_argument('-t', '--running-time', help="The time at or after which the VPN will terminate itself (default: %(default)s)", default='1h')
    parser.add_argument('-G', '--generate-config', action='store_true', help='Path to create a config file at (recommended {})'.format(os.path.expanduser('~/.ephemvpnrc')))
    args = parser.parse_args()

    log.info(args)
    if args.generate_config:
        return _gen_config(args.generate_config)

    # parse config file
    cf = _parse_conf(args.config)
    secret_key = None
    if args.secret_key:
        secret_key = getpass.getpass()
    elif args.no_prompt_secret:
        secret_key = args.no_prompt_secret

    if args.api_key:
        config.AWS_API_KEY = args.api_key
    if secret_key:
        config.AWS_SECRET_KEY = secret_key

    if args.identity_file:
        config.LOCAL_AWS_KEY_FILE = args.identity_file
        if args.key_name:
            config.AWS_KEY_FILE = args.key_name
        else:
           name, ext = os.splitext(os.basename(args.identity_file))
           config.AWS_KEY_FILE = name
    elif args.key_name:
        config.AWS_KEY_FILE = name

    # sanity check
    print config.AWS_API_KEY
    if not config.AWS_API_KEY or not config.AWS_SECRET_KEY or not len(config.AWS_API_KEY) > 0 or not len(config.AWS_SECRET_KEY) > 0:
        log.info("AWS Credentials required")
        parser.print_usage()
        return 1

    if not config.AWS_KEY_FILE:
        log.info("AWS Keypair name required")
        parser.print_usage()
        return 1

    # calculate running time
    d_min, shutdown_t = _parse_running_time(args.running_time)

    # at scheduler doesn't support subminute periods
    if d_min < 1:
        log.error("Error:running time must be greater than one minute")
        return 1

    log.info("ephemvpn v{}".format(ephemvpn.__version__))
    log.info("summoning one {} vpn".format(args.vpn_type))
    if not args.quiet and not _confirm_time(shutdown_t):
        return 1

    config.AT_RUNNING_MINUTES = d_min

    try:
        vpn = vpntypes.VPN(args.vpn_type, cf, config.AT_RUNNING_MINUTES)
    except ValueError:
        log.error("this vpn type is broken")
        return 1

    if vpn.needs_post_configure() and not args.identity_file:
        log.error("vpn type {} requires sshing, so an identity file (private key) is required".format(args.vpn_type))
        parser.print_usage()
        return 1

    host = launch(vpn, args.region)
    if host is not None:
        configure(host, vpn)

    log.info("\nephemeral vpn summoned\n")

    info = vpn.human_readable_data()
    info['Hostname'] = host

    longest = max(map(len, info.keys()))
    fmt = "{0:>{width}}: {1}"
    for k,v in info.iteritems():
        log.info(fmt.format(k, v, width=longest))
    # a new line for aesthetics
    log.info('\n')
    return 0
Пример #51
0
    def create_archive(self, archive_name, dest=None):
        if not len(self.nsds):
            self.log.error("Did not find any NSDs to export")
            return

        if dest is None:
            dest = tempfile.mkdtemp()

        prevdir = os.getcwd()

        if not os.path.exists(dest):
            os.makedirs(dest)

        try:
            # Convert each NSD to a TOSCA template
            for nsd_id in self.nsds:
                # Not passing the dest dir to prevent clash in case
                # multiple export of the same desc happens
                self.create_csar(nsd_id)

        except Exception as e:
            msg = "Exception converting NSD {}: {}".format(nsd_id, e)
            self.log.exception(e)
            raise YangTranslateNsdError(msg)

        os.chdir(dest)

        try:
            if archive_name.endswith(".zip"):
                archive_name = archive_name[:-4]

            archive_path = os.path.join(dest, archive_name)

            # Construct a zip of the csar archives
            zip_name = '{}.zip'.format(archive_path)

            if len(self.csars) == 1:
                # Only 1 TOSCA template, just rename csar if required
                if self.csars[0] != zip_name:
                    mv_cmd = "mv {} {}".format(self.csars[0], zip_name)
                    subprocess.check_call(mv_cmd, shell=True, stdout=subprocess.DEVNULL)
                    # Remove the temporary directory created
                    shutil.rmtree(os.path.dirname(self.csars[0]))

            else:
                with zipfile.ZipFile('{}.partial'.format(zip_name), 'w') as zf:
                    for csar in self.csars:
                        # Move file to the current dest dir
                        if os.path.dirname(csar) != dest:
                            file_mv = "mv {} {}".format(csar, dest)
                            subprocess.check_call(file_mv,
                                                  shell=True,
                                                  stdout=subprocess.DEVNULL)
                            # Remove the temporary directory created
                            shutil.rmtree(os.path.dirname(csar))

                        csar_f = os.basename(csar)
                        # Now add to the archive
                        zf.write(csar_f)
                        # Remove the csar file
                        os.remove(csar_f)

                    # Rename archive to final name
                    mv_cmd = "mv {0}.partial {0}".format(zip_name)
                    subprocess.check_call(mv_cmd, shell=True, stdout=subprocess.DEVNULL)

            return zip_name

        except Exception as e:
            msg = "Creating CSAR archive failed: {0}".format(e)
            self.log.exception(e)
            raise YangTranslateError(msg)

        finally:
            os.chdir(prevdir)