Exemple #1
0
def link_files(collection, root_dir, field, copy):
    collection.setup_angle_folder()
    walk = os.walk(root_dir)
    for step in walk:
        for file in step[2]:
            fname = os.splitext(file)[0]
            ext = os.splitext(file)[1]
            if ext == field[3]:
                    sample_match = next((x for x in collection.sample_list if x.name == fname), None)
                    if sample_match is not None:
                        sync_file(sample_match, step[0], file, field, copy)
Exemple #2
0
def check_all_exist(directory):
    '''
    Takes a directory and returns True if each ann file has a matching wavfile, False otherwise
    '''
    wavfiles = glob.glob(directory + '*.wav')
    annfiles = glob.glob(directory + '*.ann')
    wavfile_noext = set([os.splitext(f)[0] for f in wavfiles])
    annfiles_noext = set([os.splitext(f)[0] for f in wavfiles])
    if wavfile_noext == annfiles_noext:
        return True
    return False
Exemple #3
0
    def __init__(self, name, files, piece_lengths, piece_hashes, dirname=None):

        start_name = os.splitext(name) if dirname is None else dirname
        directory = test_name = safe_filename(start_name)

        i = 1
        while os.path.exists(test_name):
            test_name = directory+'({})'.format(i)
            i += 1

        self._directory = test_name+'/'
        os.mkdir(self._directory)

        self._files = {}
        start_in_bytes = 0

        for f in files:
            full_path = \
                os.path.join(self._directory, *f['path']).replace(' ', '_')
            dir_path = os.path.dirname(full_path)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)

            f.open(full_path, mode='w').close()

            end_in_bytes = start_in_bytes + int(f['length'])
            self._files[start_in_bytes, end_in_bytes] = full_path

            start_in_bytes = end_in_bytes

        self._piece_starts = [sum(p_l for p_l in piece_lengths[:i])
                              for i in range(len(piece_lengths))]

        self._piece_length = piece_lengths[0]
        self._piece_hashes = piece_hashes
Exemple #4
0
def create_lm(corpus_folder, lm_folder, prog_name):
    """
    corpus_folder - location of the corpus (ie. /<prog name>/processed/correct
    lm_folder - desired location of the resulting language model (absolute file path)
    prog_name - name of the program (without the extension)

    Creates a language model from all the .c files from the corpus_folder, and puts them in the lm_folder
    """
    print "CREATE LM %s" % corpus_folder
    # removes previous language model
    purge(lm_folder, r".*")
    # gets rid of extension in prog_name, if there is an extension
    if prog_name.find(".") >= 0:
        prog_name = os.splitext(prog_name)[0]
    # =======
    # creates the corpus =====
    dest = open(os.path.join(lm_folder, prog_name + ".corpus"), "wb")
    for filename in glob.glob(os.path.join(corpus_folder, "*.c")):
        shutil.copyfileobj(open(filename, "rb"), dest)
    dest.close()
    # =======
    # creates the intermediate files and .arpa file
    os.chdir(lm_folder)
    subprocess.call(
        "cat %s.corpus | text2wfreq | wfreq2vocab -top 20000 > %s.vocab" % (prog_name, prog_name), shell=True
    )
    subprocess.call(
        "text2idngram -vocab %s.vocab -idngram %s.idngram < %s.corpus" % (prog_name, prog_name, prog_name), shell=True
    )
    subprocess.call(
        "idngram2lm -idngram %s.idngram -vocab %s.vocab -arpa %s.arpa" % (prog_name, prog_name, prog_name), shell=True
    )
Exemple #5
0
def add_suffix(fname, suffix, range=None):
    """Add suffix to file name
    Parameters
    ----------
    fname: str
        The file name to add the suffix to
    suffix: str
        The suffix to add_suffix
    range: range
        If specified, the set of indexes will be added to the
        outputs.
    Returns
    -------
    fname, fname_with_suffix
        2-tuple of the original file name and name with suffix.
        If `range` is defined, `fname_with_suffix` will be a list.
    """
    fname_root, fname_ext = os.splitext(fname)
    if range is None:
        with_suffix = ''.join([fname_root, '_', suffix, fname_ext])
    else:
        with_suffix = []
        for idx in range:
            with_suffix.append(''.join(
                [fname_root, '_',
                 str(idx), '_', suffix, fname_ext]))

    return fname, with_suffix
Exemple #6
0
def un_rar(filename):
    rar=rarfile.RarFile(filename)
    if os.path.isdir(os.path.splitext(filename)[0]):
        pass
    else:
        os.mkdir(os.path.splitext(filename)[0])
    rar.extractall(os.splitext(filename)[0])
Exemple #7
0
def RNAalifold2fas_struct_rna(file_paths):
    for file_path in file_paths:
        seq_name_list = []
        seq_list = []
        fasta_file = open(os.splitext(file_path)[0] + '.fa', 'w')
        struc_file = open(os.splitext(file_path)[0] + '.structure.txt', 'w')
        rna_file = open(os.splitext(file_path)[0] + '.rna', 'w')

        f = open(file_path, 'r')
        for i in range(
                9):  # The likely all RNAalifold file in first nine lines
            f.readline()  # For dump the first nine file in every file
        done = 0
        while not done:
            line = f.readline()
            if line == '':
                done = 1  # stop read, since touch the end of file
                continue
            line = line.strip()
            line_list = line.split()
            if line:
                if line_list[0] == 'alifold':
                    struc_file.write(line_list[1])
                elif line_list[0][0] == '.' or line_list[0][0] == '(':
                    struc_file.write(line_list[0])
                elif line_list[0] not in seq_name_list:
                    seq_list.append(line_list[1])
                    seq_name_list.append(line_list[0])
                elif line_list[0] in seq_name_list:
                    index = seq_list.index(line_list[0])
                    seq_list[index] += line_list[1]
                else:
                    pass

        struc_file.close()
        rna_file.write(
            str(
                len(seq_name_list) + ' ' + str(len(seq_list[0])) +
                ' STRUCT\n\n'))
        rna_file.write(seq_name_list[i] + '\t' + seq_list[i] + '\n\n')
        for i in range(len(seq_name_list)):
            fasta_file.writelines(seq_name_list[i] + '\t' + seq_list[i])
            rna_file.writelines(seq_name_list[i] + '\t' + seq_list[i])
        fasta_file.close()
        rna_file.close()
    return 0
Exemple #8
0
def un_tgz(filename):
    tar = tarfile.open(filename)
    if os.path.isdir(os.path.splitext(filename)[0]):
        pass
    else:
        os.mkdir(os.path.splitext(filename)[0])
    tar.extractall(os.splitext(filename)[0])
    tar.close()
Exemple #9
0
    def get_logmel(self, path, wav_arr, sample_rate):
        if round((wav_arr.shape[0] / sample_rate), 1) > self.hparams.segment_length:
            logmel_feats = logfbank(wav_arr, samplerate=sample_rate, nfilt=self.hparams.spectrogram_scale)

            if self.data_type == "vox1" or self.data_type == "vox2":
                data_id = "_".join(os.splitext(path)[0].split("/")[-3:])
                spkid = path.split("/")[-3]
                clipid = path.split("/")[-2]
                wavid = path.split("/")[-1]
            elif "libri" in self.data_type:
                data_id = "_".join(os.splitext(path)[0].split("/")[-3::2])
                spkid = path.split("/")[-3]
                clipid = path.split("/")[-2]
                wavid = path.split("/")[-1]

            return logmel_feats, data_id, spkid, clipid, wavid
        else:
            print("wav length smaller than 1.6s: " + path)
def split_file(input_filename, export_format='wav', bitrate='192', tracks=None, artist='Unknown', album='Untitled', cover=None):
    input_filename = input_filename.split('.wav')[0]
    file_markers = read_markers(input_filename_fullpath)
    print 'Markers:', file_markers

    print 'BLOOOAAARRRRGHHGGGGHHHHH!!! (Please hold...)'
    ifile = wave.open(input_filename_fullpath)
    ifile_params = ifile.getparams()

    # Add start and end markers to list
    if file_markers[0] > 1000:
        print 'Start marker added.'
        file_markers.insert(0, 0)
    if ifile.getnframes() - file_markers[-1] > 1000:
        print 'End marker added.'
        file_markers.append(ifile.getnframes())
    print 'New marker list:', file_markers

    # Loop through cues and write regions (assumes start and end markers)
    for marker_num, marker in enumerate(file_markers):
        print 'Cue', marker_num, '@', marker
        if marker_num == len(file_markers) - 1:
            break
        region_name = os.path.join(output_path, input_filename)+'_'+str(marker_num + 1).zfill(2)+'.wav'
        region_length = file_markers[marker_num + 1] - marker
        print 'Region', marker_num, '@', region_length/44100.0, '->', region_name
        if export_format is not 'wav':
            mp3_track = AudioSegment.from_wav(ifile.readframes(region_length))
            mp3_track.export(mp3track, format=export_format, bitrate=bitrate, togs={title:str(tracks[marker_num + 1]), artist:artist, album:album})
            # Add cover art
            if cover:
                audio = MP3((os.splitext(region_name)[0] + '.mp3'), ID3=ID3)
                # Add ID3 tag if it doesn't exist
                try:
                    audio.add_tags()
                except error:
                    pass
                audio.tags.add(
                    APIC(
                        encoding=3, # 3 is for utf-8
                        mime='image/jpg', # image/jpeg or image/png
                        type=3, # 3 is for the cover image
                        desc=u'Cover',
                        data=open(cover).read()
                    )
                )
                audio.save()
            else:
                print 'No cover found for item #', input_filename
        else:            
            region = wave.open(region_name, 'w')
            region.setparams(ifile_params)                                
            region.writeframes(ifile.readframes(region_length))
            region.close()
    ifile.close()
Exemple #11
0
    def get_all_repos(self):
        """
        get all repos 
        """
        #{{{
        repos = []
        for f in os.listdir(g_cfg_repo_root_path):
            if not os.path.isfile( f ):
                continue
            (repo,ext) = os.splitext(f)
            if ext is not '.git':
                continue
            repos.append(repo)

        return repos;
Exemple #12
0
def take_screenshot(device, save_path=current_time, interactive=False):
    if interactive:
        raw_input(
            'Press ENTER followed by CTRL+D to take a screenshot (CTRL+C to stop)\n'
        )
    screenshot = device.takeSnapshot()
    if save_path is current_time:
        path = os.path.join(settings.LIVE_DATA_PATH, '%s.png' % current_time())
    else:
        if not os.path.splitext(save_path)[1]:
            path = '%s.png' % os.splitext(save_path)[0]
        else:
            path = save_path
    print 'Saving at "%s"' % path
    screenshot.writeToFile(path, 'png')
    return screenshot
Exemple #13
0
 def call(self, bamfn, output=None, ploidy=1, **args):
     args = args if args != None else dict()
     if ploidy and "-p" not in args:
         args["-p"] = ploidy
     args = list(map(str, reduce(operator.add, args.items(), tuple())))
     cmd = ["freebayes", "-f", self.reference] + args + [bamfn]
     if output == None:
         output = os.splitext(bamfn)[0] + ".vcf"
     proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
     with open(output, 'w') as fout:
         fout.write(proc.stdout.read())
     proc.wait()
     if proc.returncode != 0:
         msg = "The command '%s' did not exit cleanly (return code = %d)" % (str.join(' ', cmd), proc.returncode)
         raise RuntimeError, msg
     return output
Exemple #14
0
 def call(self, bamfn, output=None, ploidy=1, **args):
     args = args if args != None else dict()
     if ploidy and "-p" not in args:
         args["-p"] = ploidy
     args = list(map(str, reduce(operator.add, args.items(), tuple())))
     cmd = ["freebayes", "-f", self.reference] + args + [bamfn]
     if output == None:
         output = os.splitext(bamfn)[0] + ".vcf"
     proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
     with open(output, 'w') as fout:
         fout.write(proc.stdout.read())
     proc.wait()
     if proc.returncode != 0:
         msg = "The command '%s' did not exit cleanly (return code = %d)" % (
             str.join(' ', cmd), proc.returncode)
         raise RuntimeError, msg
     return output
Exemple #15
0
    def _filename(self):
        """Determine the name of the configuration file."""
        filename = "{name}.{ext}".format(name=Preferences.PROGRAM_NAME,
                                         ext=Preferences.XML)
        if os.name == "posix":
            # Unix style naming.
            filename = os.path.join("etc", Preferences.PROGRAM_NAME, filename)
        elif os.name == "nt":
            # Windows style naming
            filename = os.path.join(os.path.expanduser("~"), filename)
        else:
            # Other - use current directory.
            filename = "{name}.{ext}".format(name=os.splitext(program)[0],
                                             ext=Preferences.XML)

        print(filename)
        return filename
def make_mp3s(audio_serial):
    audio_path_full = audio_path + '_clean.wav'
    mp3_img = os.path.join(images_folder, audio_serial).rsplit('_')[0] + '.jpg'
    print 'mp3:', audio_serial
    # Standard
    if 'std' in row[4].lower():
        mp3_track = AudioSegment.from_wav(audio_path_full)
        mp3_track.export(mp3track, format='mp3', bitrate='192', tags={title:row[2], artist:row[1], album:row[2]})
        # Add cover art
        audio = MP3((os.splitext(audio_path_full)[0] + '.mp3'), ID3=ID3)
        # Add ID3 tag if it doesn't exist
        try:
            audio.add_tags()
        except error:
            pass
        audio.tags.add(
            APIC(
                encoding=3, # 3 is for utf-8
                mime='image/jpg', # image/jpeg or image/png
                type=3, # 3 is for the cover image
                desc=u'Cover',
                data=open(mp3_img).read()
            )
        )
        audio.save()
    # Deluxe/45
    else:        
        # Get track listing
        tracks = []
        for i in range(30):
            if row[11 + i]:
                tracks.append(row[11 + i])
            else:
                break
        split.split_file(audio_path_full, format='mp3', bitrate='192', tracks=tracks, artist=row[1], album=row[2], cover=mp3_img)
    # Embed cover thumbnails
    for each_mp3 in os.listdir(mp3_folder):
        if each_mp3.startswith(audio_serial):
            pass # do the thing
Exemple #17
0
def add_suffix(fname, suffix, range=None):
    """Add suffix to file name
    Parameters
    ----------
    fname: str
        The file name to add the suffix to
    suffix: str
        The suffix to add_suffix
    range: range
        If specified, the set of indexes will be added to the
        outputs.
    Returns
    -------
    fname, fname_with_suffix
        2-tuple of the original file name and name with suffix.
        If `range` is defined, `fname_with_suffix` will be a list.
    """
    fname_root, fname_ext = os.splitext(fname)
    if range is None:
        with_suffix = ''.join([
            fname_root,
            '_',
            suffix,
            fname_ext
        ])
    else:
        with_suffix = []
        for idx in range:
            with_suffix.append(''.join([
                fname_root,
                '_',
                str(idx),
                '_',
                suffix,
                fname_ext
            ]))

    return fname, with_suffix
Exemple #18
0
        def read_images_labels():
            # Get all the images and labels in directory/label/*.jpg
            files_and_labels = []
            for imgfile in os.listdir(datapath):
                label = os.splitext(imgfile)[0].split('_')[2]

                files_and_labels.append((datapath+imgfile, label))

            filenames, labels = zip(*files_and_labels)
            filenames = list(filenames)
            labels = list(labels)

            cls_dict ={}
            label_set = set(labels)
            for cls_id, cls_name in enumerate(label_set):
                cls_dict[cls_name] = cls_id

            labels = [cls_dict[label] for label in labels]

            filenames = np.array(filenames, dtype=np.str)
            labels = np.array(labels, dtype=np.uint16)

            return filenames, labels
Exemple #19
0
def read_seqabun(infile):
    """Will read in sequence abundance table in either TSV, BIOM, or mothur
    shared format."""

    # First check extension of input file. If extension is "biom" then read in
    # as BIOM table and return. This is expected to be the most common input.
    in_name, in_ext = os.splitext(infile)
    if in_ext == "biom":
        return biom_to_pandas_df(biom.load_table(infile))

    # Next check if input file is a mothur shared file or not by read in first
    # row only.
    mothur_format = False
    try:
        in_test = pd.read_table(filepath_or_buffer=infile, sep="\t", nrows=1)
        in_test_col = list(in_test.columns.values)
        if len(in_test_col) >= 4 and (
            in_test_col[0] == "label"
            and in_test_col[1] == "Group"
            and in_test_col[2] == "numOtus"
        ):
            mothur_format = True
    except Exception:
        pass

    # If identified to be mothur format then remove extra columns, set "Group"
    # to be index (i.e. row) names and then transpose.
    if mothur_format:
        input_seqabun = pd.read_table(filepath_or_buffer=infile, sep="\t")
        input_seqabun.drop(labels=["label", "numOtus"], axis=1, inplace=True)
        input_seqabun.set_index(
            keys="Group", drop=True, inplace=True, verify_integrity=True
        )
        input_seqabun.index.name = None
        return input_seqabun.transpose()
    else:
        return biom_to_pandas_df(biom.load_table(infile))
Exemple #20
0
 def run(self) :
     logging.info("Adding " + self.config["plugins"] + " to working directory sert.")
     try :
         sys.path.append(self.config["plugins"])
     except :
         logging.fatal("Could not append plugin directory to working set.")
         os.sys.exit(1)
     plugins = []
     logging.info("MonAgent run")
     for n in os.listdir(self.config["plugins"]) :
         ncfg = os.path.join(self.config["pluginsconf"], os.splitext(os.path.split(n)[1]) + ".cfg")
         if os.path.exists(n) and os.path.exists(ncfg) :
             plugins.append((os.path.join(self.config.plugins,n), ncfg))
         else :
             logging.info("Check plugin and its configuration file. In particular: " + os.path.split(n)[1])
     while True :
         results = []
         for m in plugins :
             try :
                 results.append(executecheck(m))
             except CheckError :
                 syslog.syslog("Check " + os.path.split(m)[1] + "failed to execute." )
                 if self.config.logging :
                     logging.warning(e)
         try :
             clientconn = rpyc.connect(self.config["monmaster"], 20200)
             try :
                 async = rpyc.async(clientconn.modules.submitcheck((socket.gethostname(),socket.gethostbyname_ex(socket.gethostname())[2][0],results)))
                 async.start()
             except :
                 #Call couldn't be done...
                 ### To-Do, extend error output
                 pass
         except :
             logging.info("Failed to contact master server. Sleepting...")
             time.sleep(5)
         time.sleep(self.config['interval'])
Exemple #21
0
                                     (blur_size + 1, blur_size + 1), 0)
    img = inv_soft_mask * inv_restored_remove_border + (1 -
                                                        inv_soft_mask) * img
    # float32, [0, 255]
    return img


if __name__ == '__main__':
    import os

    from facexlib.detection import init_detection_model
    from facexlib.utils.face_restoration_helper import get_largest_face
    from facexlib.visualization import visualize_detection

    img_path = '/home/wxt/datasets/ffhq/ffhq_wild/00009.png'
    img_name = os.splitext(os.path.basename(img_path))[0]

    # initialize model
    det_net = init_detection_model('retinaface_resnet50', half=False)
    img_ori = cv2.imread(img_path)
    h, w = img_ori.shape[0:2]
    # if larger than 800, scale it
    scale = max(h / 800, w / 800)
    if scale > 1:
        img = cv2.resize(img_ori, (int(w / scale), int(h / scale)),
                         interpolation=cv2.INTER_LINEAR)

    with torch.no_grad():
        bboxes = det_net.detect_faces(img, 0.97)
    if scale > 1:
        bboxes *= scale  # the score is incorrect
Exemple #22
0
 def __python_mods_in(path):
     mods = set()
     for item in os.dir(path):
         is_python.match(item)
         mods.add(os.splitext(item)[0])
     return mods
Exemple #23
0
 def list(self):
     """ Returns a list of exiting stop-words """
     return [os.splitext(el)[0] for el in os.listdir(str(self.model_dir))]
def read(fname):
    return open(os.path.join(os.path.dirname(__file__), fname)).read()


setup(
    name='sagemaker-chainer-container',
    version='1.0',
    description=
    'Open source library template for creating containers to run on Amazon SageMaker.',
    packages=find_packages(where='src', exclude='test'),
    package_dir={
        'sagemaker_chainer_container': 'src/sagemaker_chainer_container'
    },
    py_modules=[
        os.splitext(os.basename(path))[0] for path in glob('src/*.py')
    ],
    long_description=read('README.rst'),
    author='Amazon Web Services',
    license='Apache License 2.0',
    classifiers=[
        "Development Status :: 5 - Production/Stable",
        "Intended Audience :: Developers",
        "Natural Language :: English",
        "License :: OSI Approved :: Apache Software License",
        "Programming Language :: Python",
        'Programming Language :: Python :: 2.7',
        'Programming Language :: Python :: 3.5',
    ],
    # Temporarily freeze sagemaker-containers version to 2.2.5 until we have a proper fix
    # freeze numpy version because of the python2 bug
Exemple #25
0
import os

from setuptools import find_packages, setup


def read(fname):
    return open(os.path.join(os.path.dirname(__file__), fname)).read()


setup(
    name='sagemaker-chainer-container',
    version='1.0',
    description='Open source library template for creating containers to run on Amazon SageMaker.',
    packages=find_packages(where='src', exclude='test'),
    package_dir={'sagemaker_chainer_container': 'src/sagemaker_chainer_container'},
    py_modules=[os.splitext(os.basename(path))[0] for path in glob('src/*.py')],
    long_description=read('README.rst'),
    author='Amazon Web Services',
    license='Apache License 2.0',
    classifiers=[
        "Development Status :: 5 - Production/Stable",
        "Intended Audience :: Developers",
        "Natural Language :: English",
        "License :: OSI Approved :: Apache Software License",
        "Programming Language :: Python",
        'Programming Language :: Python :: 2.7',
        'Programming Language :: Python :: 3.5',
    ],
    install_requires=['sagemaker-containers==2.1.0', 'chainer >= 4.0.0', 'retrying==1.3.3',
                      'numpy >= 1.14'],
Exemple #26
0
    def fileExtension(self) -> str:
        '''
		Return the file extension, if there is one.
		'''
        return os.splitext(self.filename)[1]
def _main():
    parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('-r', '--region', choices=config.AMI_ID_BY_REGION.keys(), help="Where in the world to create the VPN (default: %(default)s)", default='eu-west-1')
    parser.add_argument('-v', '--vpn-type', choices=vpntypes.TYPES.keys(), help="The type of feed to fetch (default: %(default)s)", default='ipsec')
    parser.add_argument('-a', '--api-key', help="Your AWS API key")
    parser.add_argument('-s', '--secret-key', action='store_true', help="Prompts for your AWS secret key secret_key interactively (no echo)")
    parser.add_argument('-S', '--no-prompt-secret', help="Your AWS secret key will be read from the arguments")
    parser.add_argument('-i', '--identity-file', help="The path to your key pair (not required by all VPN types)")
    parser.add_argument('-I', '--key-name', help="The name of the keypair to use. ")
    parser.add_argument('-c', '--config', help="Path to alternate config file", default=os.path.expanduser('~/.ephemvpnrc'))
    parser.add_argument('-V', '--verbose', action='store_true', help="Be more verbose")
    parser.add_argument('-q', '--quiet', action='store_true', help="Output nothing. For non-interactive unattended launches, assumes yes to all questions")
    parser.add_argument('-t', '--running-time', help="The time at or after which the VPN will terminate itself (default: %(default)s)", default='1h')
    parser.add_argument('-G', '--generate-config', action='store_true', help='Path to create a config file at (recommended {})'.format(os.path.expanduser('~/.ephemvpnrc')))
    args = parser.parse_args()

    log.info(args)
    if args.generate_config:
        return _gen_config(args.generate_config)

    # parse config file
    cf = _parse_conf(args.config)
    secret_key = None
    if args.secret_key:
        secret_key = getpass.getpass()
    elif args.no_prompt_secret:
        secret_key = args.no_prompt_secret

    if args.api_key:
        config.AWS_API_KEY = args.api_key
    if secret_key:
        config.AWS_SECRET_KEY = secret_key

    if args.identity_file:
        config.LOCAL_AWS_KEY_FILE = args.identity_file
        if args.key_name:
            config.AWS_KEY_FILE = args.key_name
        else:
           name, ext = os.splitext(os.basename(args.identity_file))
           config.AWS_KEY_FILE = name
    elif args.key_name:
        config.AWS_KEY_FILE = name

    # sanity check
    print config.AWS_API_KEY
    if not config.AWS_API_KEY or not config.AWS_SECRET_KEY or not len(config.AWS_API_KEY) > 0 or not len(config.AWS_SECRET_KEY) > 0:
        log.info("AWS Credentials required")
        parser.print_usage()
        return 1

    if not config.AWS_KEY_FILE:
        log.info("AWS Keypair name required")
        parser.print_usage()
        return 1

    # calculate running time
    d_min, shutdown_t = _parse_running_time(args.running_time)

    # at scheduler doesn't support subminute periods
    if d_min < 1:
        log.error("Error:running time must be greater than one minute")
        return 1

    log.info("ephemvpn v{}".format(ephemvpn.__version__))
    log.info("summoning one {} vpn".format(args.vpn_type))
    if not args.quiet and not _confirm_time(shutdown_t):
        return 1

    config.AT_RUNNING_MINUTES = d_min

    try:
        vpn = vpntypes.VPN(args.vpn_type, cf, config.AT_RUNNING_MINUTES)
    except ValueError:
        log.error("this vpn type is broken")
        return 1

    if vpn.needs_post_configure() and not args.identity_file:
        log.error("vpn type {} requires sshing, so an identity file (private key) is required".format(args.vpn_type))
        parser.print_usage()
        return 1

    host = launch(vpn, args.region)
    if host is not None:
        configure(host, vpn)

    log.info("\nephemeral vpn summoned\n")

    info = vpn.human_readable_data()
    info['Hostname'] = host

    longest = max(map(len, info.keys()))
    fmt = "{0:>{width}}: {1}"
    for k,v in info.iteritems():
        log.info(fmt.format(k, v, width=longest))
    # a new line for aesthetics
    log.info('\n')
    return 0
Exemple #28
0
import os
import shutil
import time

pathname = input("Enter the path name: ")

vartime = time.time()

existancetime = input("How old des the file have to be (in months): ")

if os.path.exists(pathname) == true:
     files = os.listdir(pathname)

     for i in files:
           name, extention = os.splitext(i)

           if os.path.exists(pathname+"/"+name+"."+extention):
                  fullnameofthefile = pathname+"/"+name+"."+extention
                  fileage = os.stat(pathname+"/"+name+"."+extention).st_ctime
                  fileageins = time.gmtime(pathname+"/"+name+"."+extention)
                  fileageinm = int(time.strftime("%m", fileageins))

                  if fileageinm<= existancetime:
                        os.remove(fullnameofthefile)
                        print("DELETED")
                    else:
                          continue
           else:
                 continue
else:
      print("this is not a directory")
def create_ps_maps(dname):
    for root, dirs, files in os.walk(dname):
        for f in files:
            fname, ext = os.splitext(f)
            if ext == 'dot':
                run_dot(root, fname)
Exemple #30
0
import os
import sys

if len(sys.argv) >= 2:
    kmlfilein = sys.argv[1]
    if len(sys.argv) >= 3:
        kmlfileout = sys.argv[2]
    else:
        dotext = kmlfilein[kmlfilein.rfind(os.extsep):]
        fname, ext = os.splitext(kmlfilein)
        kmlfileout = fname + '_flattened' + ext
else:
    kmlfilein = 'doc.kml'
    kmlfileout = 'doc_flattened.kml'


with open(kmlfilein, 'r') as fi:
    with open(kmlfileout, 'w') as fo:
        folder, skip = False, False
        for line in fi.readlines():
            stline = line.strip() #remove indents, but hang onto original line.
            if not folder: #copy up to first <Folder> tag
                if stline == '<Folder>':
                    folder, skip = True, True
                    fo.write(line) # write the first folder!
                else:
                    fo.write(line)
            if folder: #copy lines except for the <Folder> tags and the <name> tag immediately proceeding
                if skip:
                    skip = False
                elif stline == '</Document>':
from glob import glob
import os

from setuptools import setup, find_packages


def read(fname):
    return open(os.path.join(os.path.dirname(__file__), fname)).read()


setup(name="sagemaker",
      version="1.9.2",
      description="Open source library for training and deploying models on Amazon SageMaker.",
      packages=find_packages('src'),
      package_dir={'': 'src'},
      py_modules=[os.splitext(os.basename(path))[0] for path in glob('src/*.py')],
      long_description=read('README.rst'),
      author="Amazon Web Services",
      url='https://github.com/aws/sagemaker-python-sdk/',
      license="Apache License 2.0",
      keywords="ML Amazon AWS AI Tensorflow MXNet",
      classifiers=[
          "Development Status :: 5 - Production/Stable",
          "Intended Audience :: Developers",
          "Natural Language :: English",
          "License :: OSI Approved :: Apache Software License",
          "Programming Language :: Python",
          "Programming Language :: Python :: 2.7",
          "Programming Language :: Python :: 3.5",
      ],
Exemple #32
0
 "Scout",
 "author_email":
 "*****@*****.**",
 "license":
 "MIT",
 "zip_safe":
 False,
 "python_requires":
 ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
 "packages":
 find_packages("src"),
 "package_dir": {
     "": "src"
 },
 "py_modules":
 [os.splitext(os.basename(path))[0] for path in glob("src/*.py")],
 "ext_modules": [
     Extension("scout_apm.core.objtrace",
               ["src/scout_apm/core/ext/objtrace.c"])
 ],
 "entry_points": {
     "console_scripts":
     ["core-agent-manager = scout_apm.core.cli.core_agent_manager:main"]
 },
 "install_requires": ["psutil", "requests"],
 "keywords":
 "apm performance monitoring development",
 "classifiers": [
     "Development Status :: 5 - Production/Stable",
     "Framework :: Django",
     "Framework :: Django :: 1.8",
Exemple #33
0
def save_picture():
    random_hex = secrets.token_hex(8)
    fname, f_ext = os.splitext()
    picture_fn = random_hex + f_ext
    picture_path = os.path.join(app.root_path, 'static/posts', picture_fn)