예제 #1
0
def standarize_actions(in_path, out_path):
    action_frame = actions.read_action_frame(in_path)
    utils.make_dir(out_path)
    for action in action_frame["Action"]:
        print(action)
        discretize_action(action)
        actions.save_action(out_path, action)
예제 #2
0
파일: wrf_cloner.py 프로젝트: islenv/wrfxpy
    def clone_wps(self, tgt, vtables, with_files):
        """
        Clone the WPS installation directory (self.wps_idir) together with the chosen table files vtables
        and additional files with_files.  The WPS clone is created in directory tgt.

        :param tgt: target directory into which WPS is cloned
        :param vtables: a dictionary with keys from list ['geogrid_vtable', 'ungrib_vtable', 'metgrid_vtable'],
                        which contain paths of the variable tables relative to 'etc/vtables'
        :param with_files: a list of files from the WPS source directory that should be symlinked
        :return:
        """
        src = self.wps_idir
        vtable_locs = self.vtable_locations

        # build a list of all files that are simply symlinked
        symlinks = list(self.wps_exec_files)
        symlinks.extend(with_files)

        # create target directory (and all intermediate subdirs if necessary)
        make_dir(tgt)

        # clone all WPS executables
        map(lambda x: symlink_unless_exists(osp.join(src, x), osp.join(tgt, x)), symlinks)

        # clone all vtables (build symlink name, ensure directories exist, create the symlink)
        for vtable_id, vtable_path in vtables.iteritems():
            # build path to link location
            symlink_path = osp.join(tgt, vtable_locs[vtable_id])

            if not osp.exists(symlink_path):
                symlink_tgt = osp.join(self.sys_idir, "etc/vtables", vtable_path)
                symlink_unless_exists(symlink_tgt, ensure_dir(symlink_path))
예제 #3
0
 def save(self,path):
     utils.make_dir(path)
     name=utils.get_name(path)
 	for proj_type in DIRS:
 	    utils.make_dir(path+'/'+proj_type)
     for i,frame in enumerate(self.frames):
         frame.save(path,name+str(i))
예제 #4
0
def main(asmstats, outdir, size_field=False, names=False, colors=False, shapes=False):
    #DTYPE = [('sum_purest_bases', '<f8'), ('sum_bases', '<i8'), ('kmer_type', '|S3'), ('n50', '<i8'), ('trim_n', '<i8'), ('kmax', '|S3'), ('trim_n_mapping', '<i8'), ('l50', '<i8'), ('name', '|S6'), ('global_purity', '<f8'), ('cut_off', '<i8'), ('aln_purity', '<f8'), ('kmin', '|S3'), ('sum_ref_lengths', '<i8'), ('metagenome_cov', '<f8'), ('kmer_size', '|S3'), ('aln_ratio', '<f8'), ('asm_type', '|S3'), ('max_contig_length', '<i8')]
    a = np.genfromtxt(asmstats, names=True, dtype=None, delimiter=DELIMITER, missing_values=MISSING_VALUE, usemask=True)

    #avalues = []
    #for asms in asmstats:
    #    avalues.append(tuple(open(asms).readlines()[1].strip().split(DELIMITER)))
    #a = np.array(avalues, dtype=DTYPE)

    # Determine l50 limit before filtering names
    assert (max(a["l50"]) / 10) > 0
    l50xlim = [0, max(a["l50"]) + (max(a["l50"]) / 10)]

    # If names are specified, filter by given names
    if names:
        a = a[np.in1d(a["name"], names)]

    make_dir(outdir)
    p1 = plot_per_row_legend_n_save(a, "l50", "global_purity", "name", "L50", "Global purity", outdir, xlim=l50xlim, ylim=[0, 1], size_field=size_field, colors=colors, shapes=shapes, names=names)
    p2 = plot_per_row_legend_n_save(a, "l50", "aln_purity", "name", "L50", "Alignment purity", outdir, xlim=l50xlim, ylim=[0, 1], size_field=size_field, colors=colors, shapes=shapes, names=names)
    p3 = plot_per_row_legend_n_save(a, "l50", "aln_ratio", "name", "L50", "Alignment ratio", outdir, xlim=l50xlim, ylim=[0, 1], size_field=size_field, colors=colors, shapes=shapes, names=names)
    p4 = plot_per_row_legend_n_save(a, "l50", "metagenome_cov", "name", "L50", "Metagenome coverage", outdir, xlim=l50xlim, ylim=[0, 1], size_field=size_field, colors=colors, shapes=shapes, names=names)

    # Output plots in HTML
    sdir = os.path.dirname(os.path.realpath(__file__))
    template = open(sdir + '/template/cmp-asm-template.html').read()
    with open(outdir + '/index.html', 'w') as fh:
        fh.write(template.format(assemblies=" ".join(np.unique(a["name"]).tolist()),
                                 plot1=p1,
                                 plot2=p2,
                                 plot3=p3,
                                 plot4=p4))
def main():
    with tf.variable_scope('input') as scope:
        # use variable instead of placeholder because we're training the intial image to make it
        # look like both the content image and the style image
        input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
    
    utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
    utils.make_dir('checkpoints')
    utils.make_dir('outputs')
    model = vgg_model.load_vgg(VGG_MODEL, input_image)
    model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')

    content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    content_image = content_image - MEAN_PIXELS
    style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    style_image = style_image - MEAN_PIXELS

    model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, 
                                                    input_image, content_image, style_image)
    ###############################
    ## TO DO: create optimizer
    model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'], 
                                                            global_step=model['global_step'])
    ###############################
    model['summary_op'] = _create_summary(model)

    initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
    train(model, input_image, initial_image)
예제 #6
0
def plot_perm_ttest_results(events_id, inverse_method='dSPM', plot_type='scatter_plot'):
    print('plot_perm_ttest_results')
    all_data = defaultdict(dict)
    fsave_vertices = [np.arange(10242), np.arange(10242)]
    fs_pts = mne.vertex_to_mni(fsave_vertices, [0, 1], 'fsaverage', LOCAL_SUBJECTS_DIR) # 0 for lh
    for cond_id, cond_name, patient, hc, data in patients_hcs_conds_gen(events_id, True, inverse_method):
        all_data[patient][hc] = data[()]
    print(all_data.keys())
    for patient, pat_data in all_data.iteritems():
        print(patient)
        fol = op.join(LOCAL_ROOT_DIR, 'permutation_ttest_results', patient)
        utils.make_dir(fol)
        if op.isfile(op.join(fol, 'perm_ttest_points.npz')):
            d = np.load(op.join(fol, 'perm_ttest_points.npz'))
            if plot_type == 'scatter_plot':
                points, values = d['points'][()], d['values'][()]
            elif plot_type == 'pysurfer':
                vertices, vertives_values = d['vertices'][()], d['vertives_values'][()]
        else:
            points, values, vertices, vertives_values = calc_points(pat_data, fs_pts)
            np.savez(op.join(fol, 'perm_ttest_points'), points=points, values=values, vertices=vertices, vertives_values=vertives_values)
        max_vals = 8 # int(np.percentile([max(v) for v in values.values()], 70))
        print(max_vals)
        fol = op.join(fol, '{}_figures'.format(plot_type))
        utils.make_dir(fol)
        if plot_type == 'scatter_plot':
            scatter_plot_perm_ttest_results(points, values, fs_pts, max_vals, fol)
        elif plot_type == 'pysurfer':
            pysurfer_plot_perm_ttest_results(vertices, vertives_values, max_vals, fol)
예제 #7
0
    def __init__(self, report_dir=None,
                 plot_map_params=None, save_params=None, safe_dir=True):
        self.report_dir = report_dir or tempfile.mkdtemp(prefix='report_')

        make_dir(self.report_dir, safe=safe_dir, strict=False)
        self.plot_map_params = _check_plot_map_params(plot_map_params)
        self.save_params = _check_save_params(save_params)
예제 #8
0
 def save(self,path,name):
     for proj,postfix in zip(self.projections,DIRS):
         proj_path=path+"/"+postfix
         utils.make_dir(proj_path)
         full_path=proj_path+name
         print(full_path)
         utils.save_img(full_path,proj)
예제 #9
0
def save_action(path,action):
    action_path=path+str(action)+"/"
    utils.make_dir(action_path)
    for i,img in enumerate(action.images):
        img_path=action_path+str(action)+"_"+str(i)
        img=np.reshape(img,(80,40))
        utils.save_img(img_path,img)
def train_model(model, batch_gen, num_train_steps, weights_fld):
    saver = tf.train.Saver() # defaults to saving all variables - in this case embed_matrix, nce_weight, nce_bias

    initial_step = 0
    utils.make_dir('checkpoints')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
        # if that checkpoint exists, restore from checkpoint
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        total_loss = 0.0 # we use this to calculate late average loss in the last SKIP_STEP steps
        writer = tf.summary.FileWriter('improved_graph/lr' + str(LEARNING_RATE), sess.graph)
        initial_step = model.global_step.eval()
        for index in range(initial_step, initial_step + num_train_steps):
            centers, targets = next(batch_gen)
            feed_dict={model.center_words: centers, model.target_words: targets}
            loss_batch, _, summary = sess.run([model.loss, model.optimizer, model.summary_op], 
                                              feed_dict=feed_dict)
            writer.add_summary(summary, global_step=index)
            total_loss += loss_batch
            if (index + 1) % SKIP_STEP == 0:
                print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
                total_loss = 0.0
                saver.save(sess, 'checkpoints/skip-gram', index)
예제 #11
0
파일: engine.py 프로젝트: Logmytech/trackma
    def _load(self, account):
        self.account = account

        # Create home directory
        utils.make_dir("")
        self.configfile = utils.get_root_filename("config.json")

        # Create user directory
        userfolder = "%s.%s" % (account["username"], account["api"])
        utils.make_dir(userfolder)

        self.msg.info(
            self.name,
            "Trackma v{0} - using account {1}({2}).".format(utils.VERSION, account["username"], account["api"]),
        )
        self.msg.info(self.name, "Reading config files...")
        try:
            self.config = utils.parse_config(self.configfile, utils.config_defaults)
        except IOError:
            raise utils.EngineFatal("Couldn't open config file.")

        # Load hook file
        if os.path.exists(utils.get_root_filename("hook.py")):
            import sys

            sys.path[0:0] = [utils.get_root()]
            try:
                self.msg.info(self.name, "Importing user hooks (hook.py)...")
                global hook
                import hook

                self.hooks_available = True
            except ImportError:
                self.msg.warn(self.name, "Error importing hooks.")
            del sys.path[0]
예제 #12
0
    def __init__(self, app):
        """Create webpages and save them as HTML files
        """
        #index.html
        print "\n- Creazione pagina web:\nindex.html"
        homePage = os.path.join("html", "index.html")
        homePageCode = Homepage(app).code
        self.save_html_file(homePage, homePageCode)

        #Subpages with lists of errors or GPX files links
        #Read errors per region from database
        print "\n- Creazione sottopagine:"
        for check in app.checks.values():
            print "  %s" % check.name
            if "Lista" in check.output or "Mappa" in check.output:
                if "Lista" in check.output:
                    #Subpage displays errors as a list of JOSM remote links
                    subPage = os.path.join("html", "%s.html" % check.name)
                    subPageCode = ListSubpage(check).code
                if "Mappa" in check.output:
                    #Subpage displays errors on a clickable map with JOSM remote links
                    subPageDir = os.path.join("html", check.name)
                    utils.make_dir(subPageDir)
                    subPage = os.path.join(subPageDir, "%s.html" % check.name)
                    subPageCode = MapSubpage(check).code
                self.save_html_file(subPage, subPageCode)

        if not app.args.NOFX:
            homepage = os.path.join("html", "index.html")
            call("firefox %s" % homepage, shell=True)
예제 #13
0
 def purge_account(self, num):
     """
     Renames stale cache files for account number **num**.
     """
     account = self.accounts['accounts'][num]
     userfolder = "%s.%s" % (account['username'], account['api'])
     utils.make_dir(userfolder + '.old')
     utils.regex_rename_files('(.*.queue)|(.*.info)|(.*.list)|(.*.meta)', userfolder, userfolder + '.old')
예제 #14
0
def show_category(dim,size,params):
    out_path=params['out_path']
    utils.make_dir(out_path)
    actions=data.read_actions(params['action_path'])
    extr=sda.read_sda(params['cls_path'],params['conf_path'])
    for i in range(size):
        full_path=out_path+"cls"
        print(full_path)
        apply_cls(dim,i,params,actions,extr)
예제 #15
0
def get_all_clusters(action_path,conf,out_path,n_cls=10):
    actions=get_actions(action_path,conf.nn,conf.cls)
    symbols=actions[0].symbols
    for i in range(n_cls):
        cls_symbol=symbols[i]
        full_path=out_path+cls_symbol+"/"
        print(full_path)
        utils.make_dir(full_path)
        get_cluster(i,actions,full_path)
예제 #16
0
def transform_files(in_path,out_path,transform,dirs=False):
    utils.make_dir(out_path)
    if(dirs):
        names=utils.get_dirs(in_path)
    else:
        names=utils.get_files(in_path)
    for name in names:
        full_in_path=in_path+name
        full_out_path=out_path+name
        transform(full_in_path,full_out_path)
예제 #17
0
 def save_projection(self,out_path):
     utils.make_dir(out_path)
     paths=['xy/','zx/','zy/']
     paths=[out_path+path for path in paths]
     [utils.make_dir(path) for path in paths]
     imgs_xy=self.get_imgs(pc.ProjectionXY())
     utils.save_images(paths[0],imgs_xy)
     imgs_xz=self.get_imgs(pc.ProjectionXZ())
     utils.save_images(paths[1],imgs_xz)
     imgs_zy=self.get_imgs(pc.ProjectionYZ())
     utils.save_images(paths[2],imgs_zy)
예제 #18
0
def reconstruct_images(img_frame,ae,out_path):
    utils.make_dir(out_path)
    imgs=img_frame['Images']
    #cats=img_frame['Category']
    for i,img in enumerate(imgs):
        img=np.reshape(img,(1,3200))
        rec_image=ae.get_image(img)
        rec_image*=200
        img2D=np.reshape(rec_image,(80,40))
        img_path=out_path+"img"+str(i)+".png"
        print(img_path)
        scipy.misc.imsave(img_path,img2D)
예제 #19
0
def create_time_series(conf,dim=0):
    action_path=conf['action']
    cls_path=conf['cls_ts']
    cls_config=conf['cls_config']
    out_path=conf['series']
    actions=data.read_actions(action_path)
    extractor=sda.read_sda(cls_path,cls_config)
    all_t_series=[make_action_ts(extractor,action,dim) for action in actions]
    utils.make_dir(out_path)
    for action_ts in all_t_series:
        full_path=out_path+action_ts.name
        utils.save_object(action_ts,full_path)
예제 #20
0
def get_max(in_path,out_path):
    actions=read_actions(in_path)
    named_imgs=[]
    for i,action_i in enumerate(actions):
        dim_x=action_i.get_dim(0)
        max_array=np.zeros(dim_x[0].shape)
        maxim=[np.argmax(img_i)  for img_i in dim_x]
        max_array[maxim]=i*10#1.0
        max_array=np.reshape(max_array,(60,60))
        named_imgs.append((action_i.name,max_array))
    utils.make_dir(out_path)
    utils.save_images(out_path,named_imgs)
예제 #21
0
def combine_images_into_groups():
    labels, groups = get_groups()
    fol = os.path.join(subjects_dir, subject, 'label', '{}_groups_figures'.format(aparc_name))
    utils.make_dir(fol)
    for group in groups:
        group_im = Image.new('RGB', (800,800))
        group_images = get_group_images(group)
        for view_image_file, coo in zip(group_images, [(0, 0), (0, 400), (400, 0), (400,400)]):
            view_img = Image.open(view_image_file)
            view_img.thumbnail((400,400))
            group_im.paste(view_img, coo)
        group_im.save(os.path.join(fol, '{}-{}.jpg'.format(subject, group)))
def main():
    vocab = (
            " $%'()+,-./0123456789:;=?ABCDEFGHIJKLMNOPQRSTUVWXYZ"
            "\\^_abcdefghijklmnopqrstuvwxyz{|}")
    seq = tf.placeholder(tf.int32, [None, None])
    temp = tf.placeholder(tf.float32)
    loss, sample, in_state, out_state = create_model(seq, temp, vocab)
    global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    optimizer = tf.train.AdamOptimizer(LR).minimize(loss, global_step=global_step)
    utils.make_dir('checkpoints')
    utils.make_dir('checkpoints/arvix')
    training(vocab, seq, loss, optimizer, global_step, temp, sample, in_state, out_state)
예제 #23
0
 def _run(self):
     hold_jid = ''
     if self.concall_file_ok:
         self._skip_msg('calling')
         self._skip_msg('af_calc')
         self._skip_msg('con_call')
     else:
         make_dir(self.qerr_dir)
         make_dir(self.qout_dir)
         jids = (caller.run(self.clone, self.tissue) for caller in self.caller)
         hold_jid = ','.join(jid for jid in jids if jid != '')
         hold_jid = self._concall(hold_jid)
         self._run_msg('con_call')
     return hold_jid
예제 #24
0
def test_assemblyvalidation():
    bamasm = get_testfile('cm-500pgun-asm-b2mv31-bam')
    contigfa = get_testfile('cm-500pgun-asm-b2mv31-fa')
    bamref = get_testfile('cm-500pgun-ref-bam')
    refstatsfile = get_testfile('cm-500pgun-ref-stats')
    refphylfile = get_testfile('cm-ref-phyl')
    nucmercoords = get_testfile('cm-500pgun-val-nucmer')

    val = AssemblyValidation(bamref, bamasm, refphylfile, refstatsfile, contigfa, nucmercoords)
    assert(len(val.contigs) == int(get_shell_output("grep -c '^>' " + contigfa)[0]))

    make_dir(get_outdir() + "masm")
    val.write_contig_purity(get_outdir() + "masm" + "/contig-purity.tsv")
    val.write_general_stats(get_outdir() + "masm" + "/asm-stats.tsv")
    val.write_genome_contig_cov(get_outdir() + "masm" + "/genome-contig-coverage.tsv")
def build_vocab(words, vocab_size):
    """ Build vocabulary of VOCAB_SIZE most frequent words """
    dictionary = dict()
    count = [('UNK', -1)]
    count.extend(Counter(words).most_common(vocab_size - 1))
    index = 0
    utils.make_dir('processed')
    with open('processed/vocab_1000.tsv', "w") as f:
        for word, _ in count:
            dictionary[word] = index
            if index < 1000:
                f.write(word + "\n")
            index += 1
    index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return dictionary, index_dictionary
예제 #26
0
def apply_cls(dim,s_cat,params,actions,extr):
    imgs=data.get_named_projections(dim,actions)
    img_cats=[]
    for img_i in imgs: 
        img_j=np.reshape(img_i[1],(1,img_i[1].size))
        cat=extr.test(img_j)
        if(s_cat==cat):
        	img_cats.append(img_i)
    out_path=params['out_path']
    out_path=out_path+str(s_cat)+"/"
    utils.make_dir(out_path)
    for cat,img_i in img_cats:
    	full_path=out_path+cat
        img_i=np.reshape(img_i, (60,60))
        utils.save_img(full_path,img_i)
예제 #27
0
    def __init__(self, SCRIPTDIR):
        """Read configuration from './configuration/config' file
        """
        configFile = os.path.join("configuration", "config")
        configParser = ConfigParser.RawConfigParser()
        configParser.read(configFile)

        #directory with OSM data
        self.OSMDIR = configParser.get("general", "OSM_DIR")
        if self.OSMDIR == "":
            sys.exit("\nScrivi nel file './configuration/config' il percorso della directory in cui scaricare i dati OSM.")
        self.country = configParser.get("general", "country")
        self.countryPBF = os.path.join(self.OSMDIR, "%s-latest.osm.pbf" % self.country)
        self.countryO5M = os.path.join(self.OSMDIR, "%s-latest.o5m" % self.country)
        self.oldCountryO5M = os.path.join(self.OSMDIR, "%s.o5m" % self.country)
        self.countryPOLY = os.path.join("boundaries", "poly", "%s.poly" % self.country)
        #databaseAccess
        self.user = configParser.get("database_access", "user")
        self.password = configParser.get("database_access", "password")
        if self.user == "" or self.password == "":
            sys.exit("\nScrivi il nome e la password dello user del database PostGIS in:%s" % configFile)
        self.databaseAccess = (self.user, self.password)

        #Read databases configuration
        self.databases = dbConfig.AllDatabases().databases
        #Read checks configuration
        self.checks = checksConfig.AllChecks().checks

        #Optional
        #directory of Tilemill projects
        self.TILEMILLDIR = configParser.get("general", "TILEMILL_DIR")
        #dropbox directory
        self.DROPBOXDIR = configParser.get("general", "DROPBOX_DIR")

        #Make directories
        for directory in (self.OSMDIR,
                          "false_positives",
                          os.path.join("false_positives", "from_users"),
                          "output",
                          os.path.join("output", "gpx"),
                          os.path.join("output", "old_gpx"),
                          os.path.join("output", "geojson"),
                          os.path.join("html", "gpx"),
                          "stats",
                          self.DROPBOXDIR,
                          self.TILEMILLDIR):
            if directory:
                utils.make_dir(directory)
def sort_files(path, filetype='gif'):
    """
    Sort files into a specific folder while *not* maintaining existing 
    file structure patterns.
    """
    target_dir = "_{}s".format(filetype.lower())
    target = os.path.join(path, target_dir)
    for root, dirnames, filenames in os.walk(path):
        for filename in filenames:
            if ".ds_store" in filename.lower():
                continue
            extension = utils.get_extension(filename).strip('.')
            if extension == filetype.lower():
                utils.make_dir(target)
                new_name = utils.find_untaken_name(filename, target)
                os.rename(os.path.join(root, filename), os.path.join(target, new_name))
예제 #29
0
 def _chromfile(self):
     chrom_file = "{}.call/genomic_regions_chrom.txt".format(
         self.worker_name)
     if not os.path.isfile(chrom_file):
         make_dir(os.path.dirname(chrom_file))
         with open(chrom_file, 'w') as out:
             with open(self.refidx) as f:
                 chroms = []
                 for line in f:
                     chrom, chrom_size = line.split()[:2]
                     chrom_size = int(chrom_size)
                     chroms.append((chrom, chrom_size))
                 chroms.sort(key=lambda chrom:chrom[1], reverse=True)
             for chrom, end in chroms:
                 out.write('{}:1-{}\n'.format(chrom, end))
     return chrom_file
예제 #30
0
 def event_logs(self, job_dict):
     # All the event logs windows generates
     logs = glob.glob('c:\\windows\\system32\\winevt\\logs\\*.evtx')
     job_dict['results']['event_logs'] = []
     check = make_dir('c:\\tmp')
     if check:
         job_dict['results']['event_logs'].append(check)
         return job_dict
 
     with zipfile.ZipFile('c:\\tmp\\evt_logs.zip', 'w', zipfile.ZIP_DEFLATED) as zipped:
         for log in logs:
             log_name = log.rpartition('\\')[2]
             log = '"' + log + '"'
             cmd = 'wevtutil epl "' + log_name.rpartition('.')[0].replace('%4', '/') + '" "c:\\tmp\\' + log_name + '"'
             handle_popen(cmd)
             zipped.write('c:\\tmp\\' + log_name)
 
     if job_dict.get('exe'):
         exe_path, exe_name = split_path_name(job_dict.get('exe'))
         dst = job_dict.get('dest_vm') + '/' + job_dict.get('date_stamp') + '/' + job_dict.get('job') + '/' + exe_name + '_evtlogs_win7_gaddn.zip'
     else:
         dst = job_dict.get('dest_vm') + '/' + job_dict.get('date_stamp') + '/' + job_dict.get('job') + '/' + job_dict.get('time_stamp') + '_evtlogs_win7_gaddn.zip'
     pscp_push('c:\\tmp\\evt_logs.zip', dst)
     job_dict['results']['event_logs'] = ['', cmd]
     return job_dict
def plot_degree_distribution(degrees, counts, title, path):
    """
    Plot the degree distribution given by degrees and counts.

    :param - degrees: list of unique degree values
    :param - counts: list of counts for each degree value
    :param - title: title for the plot
    :param - path: save path for plot
    """
    plt.close('all')
    plt.plot(degrees, counts)
    plt.xlabel('Node Degree')
    plt.ylabel('Count')
    plt.title(title)
    plt.tight_layout()
    utils.make_dir('images')
    plt.savefig(path)
예제 #32
0
    def __init__(self, conf):
        self._batch_size = conf.batch_size
        self._emb_size = conf.emb_size  # ??
        self._epoch = conf.epoch
        self._gamma = conf.gamma
        self._gpu = conf.gpu
        self._lr = conf.lr
        self._momentum = conf.momentum
        self._num_gpu = conf.num_gpu
        self._split_size = conf.split_size
        self._test_frac = conf.test_frac
        self._weight_decay = conf.weight_decay  # ??
        self._type_loss_weight = conf.type_loss_weight

        self._train_data_path = os.path.join(conf.data_dir, 'train_data.p')
        self._model_dir = os.path.join(conf.data_dir, 'models')
        make_dir(self._model_dir)
예제 #33
0
    def download_mnist(self, path):

        utils.make_dir(path)

        # file download information
        url = 'http://yann.lecun.com/exdb/mnist'

        filenames = [
            "train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
            "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"
        ]

        expected_bytes = [9912422, 28881, 1648877, 4542]

        for filename, byte in zip(filenames, expected_bytes):
            download_url = posixpath.join(url, filename)
            local_path = os.path.join(path, filename)
            utils.download_file(download_url, local_path, byte, unzip=True)
예제 #34
0
def run_structure(mainparams, extraparams, k, config, run_per_k=3):
    print 'run structure'
    outdir = os.path.join(config['report'], 'structure')
    utils.make_dir(outdir)
    output = os.path.join(outdir, 'output_k%s' % k)
    utils.check_file(mainparams)
    utils.check_file(extraparams)
    if re.match(r'\d+', config.get('replicate', '')):
        print 'matched'
        run_per_k = int(config['replicate'])
        logging.info('Got replicate %s for each K', run_per_k)
    for r in range(run_per_k):
        logging.info('Running structure at K %s for round %s', k, r)
        subprocess.call(
            "/home/pub/software/Structure/bin/structure -m %s -e %s -K %d -o %s_run%s"
            % (mainparams, extraparams, k, output, r),
            shell=True)
    return None
예제 #35
0
def main(args):
    # Load and standardize data.
    embedding_file_path = 'node2vec/embeddings/' + args.input
    X_train, X_test, y_train, y_test = load_splits(embedding_file_path)
    X_train, X_test = standardize_data(X_train, X_test)

    # Train classifier and make predictions.
    optimal_svc = hyperparameter_search(SVC(), X_train, y_train)
    print('Cross Validation Accuracy:', optimal_svc.best_score_)
    print('Optimal parameters:', optimal_svc.best_params_)
    predictions = optimal_svc.predict(X_test)

    # Report results.
    utils.make_dir('images/svc')
    cm_path = 'images/svc/cm_' + args.input[:-4] + '.png'
    utils.accuracy(predictions, y_test)
    utils.confusion_matrix(predictions, y_test, 'Confusion Matrix - SVC',
                           cm_path)
예제 #36
0
class BaseConfig(object):
    PROJECT = "rootio"
    SECRET_KEY = 'some random key'
    CONTENT_DIR = "/var/content"
    LOG_FOLDER = os.path.join(INSTANCE_FOLDER_PATH, 'logs')
    make_dir(LOG_FOLDER)
    ZMQ_BIND_ADDR = "tcp://127.0.0.1:55777"
    ZMQ_SOCKET_TYPE = "PUB"
    ACCEPT_LANGUAGES = {'en': 'English'}
예제 #37
0
    def __init__(self,
                 fold,
                 bs,
                 device="cpu",
                 val_size=0.2,
                 eps=1e-10,
                 eval=True):
        self.name = "asas_sn"
        self.fold = fold
        self.eps = eps
        self.val_size = val_size
        self.bs = bs
        self.device = device

        self.data_path = "processed_data/{}".format(self.name)
        make_dir("processed_data")
        make_dir(self.data_path)

        if not eval:
            self.load_data()
            self.x_train, self.x_train_folded, self.m_train, self.s_train = self.normalize(
                self.x_train, self.x_train_folded)
            self.x_test, self.x_test_folded, self.m_test, self.s_test = self.normalize(
                self.x_test, self.x_test_folded)
            self.x_val, self.x_val_folded, self.m_val, self.s_val = self.normalize(
                self.x_val, self.x_val_folded)
            self.x_train, self.x_train_folded = self.compute_dt(
                self.x_train, self.x_train_folded)
            self.x_test, self.x_test_folded = self.compute_dt(
                self.x_test, self.x_test_folded)
            self.x_val, self.x_val_folded = self.compute_dt(
                self.x_val, self.x_val_folded)
            self.seq_len_train, self.p_train = self.time_series_features(
                self.x_train, self.p_train)
            self.seq_len_test, self.p_test = self.time_series_features(
                self.x_test, self.p_test)
            self.seq_len_val, self.p_val = self.time_series_features(
                self.x_val, self.p_val)
            self.save_processed_data()
        else:
            self.load_processed_data()
            self.compute_seq_len()

        self.define_datasets()
예제 #38
0
def make_train_test_ds(ds_path, resize_factor=2):
    bts_path = utils.make_dir(ds_path, BTS_SUBDIR)
    bts_data_path = utils.make_dir(ds_path, BTS_SUBDIR, BTS_DATA_SUBDIR)
    res = []
    for cam, image_path, depth_path, image_name, depth_name, image, depth in iterate_files(
            ds_path):
        print(image_name, depth_name)

        if resize_factor != 1:
            new_size = (image.size[0] // resize_factor,
                        image.size[1] // resize_factor)
            # Resize and save image
            image = image.resize(new_size, Image.BILINEAR)

        bts_image_name = image_name
        bts_image_path = os.path.join(bts_data_path, bts_image_name)
        image.save(bts_image_path)

        bts_depth_name = 'none'
        if depth is not None:
            # Fix depth
            depth_excess_inds = np.where(depth > MAX_DEPTH)
            if len(depth_excess_inds[0]):
                for p in zip(*depth_excess_inds):
                    fix_depth(depth, p)
            depth *= 1000
            depth = depth.astype(np.int32)
            depth_i = Image.fromarray(depth, 'I')
            # Resize and save depth as png
            depth_i = depth_i.resize(new_size)
            bts_depth_name = os.path.splitext(depth_name)[0] + '.png'
            bts_depth_path = os.path.join(bts_data_path, bts_depth_name)
            depth_i.save(bts_depth_path)

        res.append((bts_image_name, bts_depth_name,
                    cam['intrinsics']['fx'] / resize_factor))

    res_file_name = 'meta.txt'
    res_file_path = os.path.join(bts_path, res_file_name)
    with open(res_file_path, 'w') as fres:
        for item in res:
            fres.write('%s %s %.4f\n' % item)

    split_train_test(ds_path)
예제 #39
0
    def gather_random_trajectories(self,num_traj):
        if self.save_data:
            work_dir = '/content/drive/My Drive/MsPacman-data' + '/' + self.ts +'_capacity-' + str(self.buffer_capacity) +'_grayscale-'+ str(self.grayscale) + '_walls_present-'+ str(self.walls_present)
            work_dir = make_dir(work_dir)

        for n in range(num_traj):
            if n % 10 ==0:
                print('trajectory number:',n)
                # Initial set up
            #self.env.seed(0)

            self.env = gym.make(self.ENV_NAME) # Due to error in code, I reinstantiate the env each time
            self.env = custom_wrapper(self.env,grayscale = self.grayscale,frame_stack=self.frame_stack,frames = self.frames)
            obs = self.env.reset()

            self.repeated_end = False
            info_labels = self.env.labels() # Nawid - Used to get the current state
            state = self.state_conversion(info_labels) # Used to get the initial state
            prev_action = None # Initialise prev action has having no action

            while True:
                sampled_action, infeasible_action_one_hot = self.random_action_selection(state,prev_action)
                sampled_action_one_hot = self.one_hot(sampled_action)

                next_obs, reward, done, next_info = self.env.step(sampled_action)
                next_info_labels = next_info['labels']

                next_state = self.state_conversion(next_info_labels)
                state_change = next_state -  state

                self.check_state(state,next_state)
                self.check_all_agents(info_labels, next_info_labels) # need to use the info labels to predict the state as the info labels have all the informaiton

                if not self.repeated_end:
                    if infeasible_action_one_hot is not None and self.walls_present:
                        fake_next_state = np.zeros_like(state) #  Need to instantiate a new version each time to prevent updating a single variable which will affect all places(eg lists) where the variable is added
                        fake_next_state[0:-2] = next_state[0:-2].copy() # the enemy position of the fake next state is the current enemy position
                        fake_next_state[-2:] = state[-2:].copy() # The agent position for the fake next state is the state before any action was taken
                        fake_state_change = fake_next_state - state
                        self.replay_buffer.add(obs,infeasible_action_one_hot, next_obs) # THERE IS NOTHING SUCH AS A FAKE NEXT_OBS SINCE IT IS A IMAGE - THE CLOSEST THING WOULD BE AN ACTION WHERE NOTHING OCCURS

                    self.replay_buffer.add(obs,sampled_action_one_hot, next_obs)
                else:
                    done = True

                obs = next_obs # do not need to copy as a new variable of obs is instantiated at each time step.
                state = next_state.copy()
                info_labels = next_info_labels.copy()
                prev_action = sampled_action

                if done:
                    break
                if self.replay_buffer.full:
                    if self.save_data:
                        self.replay_buffer.save(work_dir)
                    return
예제 #40
0
def main():
    graph = utils.load_graph()
    position = utils.get_positions(graph)
    utils.make_dir('images/spectral')

    true_communities = utils.get_labels(graph, list(graph.nodes))
    utils.plot_communities(graph, position, true_communities, labels=True, title='Butterfly Similarity Network - True Communities', path='images/spectral/communities_true.png')

    node_assignments = spectral_clustering(graph)
    nodes_to_communities = {k:v for (k,v) in zip(range(len(node_assignments)), node_assignments)}
    communities = utils.group_communities(nodes_to_communities)
    utils.plot_communities(graph, position, communities, labels=False, title='Butterfly Similarity Network - Spectral Communities', path='images/spectral/communities_spectral.png')

    graph_nodes = sorted(list(graph.nodes))
    predictions = utils.predict_majority_class(graph, communities)
    preds = [predictions[n] for n in graph_nodes]
    labels = [graph.nodes[n]['label'] for n in graph_nodes]
    utils.accuracy(preds, labels)
    utils.confusion_matrix(preds, labels, 'Confusion Matrix - Spectral Clustering', 'images/spectral/cm_spectral.png')
def train_model(args, HL_replay_buffer, high_level_planning):
    model_save_dir = utils.make_dir(
        os.path.join(args.save_dir +
                     '/trial_%s' % str(args.seed))) if args.save else None
    logger = Logger(model_save_dir, name='train')
    # HL_replay_buffer.load_buffer(model_save_dir )
    high_level_planning.load_mean_var(model_save_dir + '/buffer_data')

    high_level_planning.update_model(HL_replay_buffer, logger)
    high_level_planning.save_data(model_save_dir)
def plot_degree_distribution_by_species(species, degrees, title, path):
    """
    Plot the average degree for each species.

    :param - species: list of species names
    :param - degrees: list of average degree for each species
    :param - title: title for the plot
    :param - path: save path for plot
    """
    plt.close('all')
    labels = range(len(species))
    plt.bar(labels, degrees)
    plt.xticks(labels, species, rotation=45, ha='right', fontsize=10)
    plt.xlabel('Species')
    plt.ylabel('Average Degree')
    plt.title(title)
    plt.tight_layout()
    utils.make_dir('images')
    plt.savefig(path)
예제 #43
0
def save_exp_results(model, perf_dict, data_str, course_str, model_str, fold,
                     concept_dim, lambda_t, lambda_q, lambda_bias, slr, lr,
                     max_iter, validation):

    if not validation:
        model_dir_path = "saved_models/{}/{}/{}/fold_{}".format(
            data_str, course_str, model_str, fold)
        make_dir(model_dir_path)
        para_str = "concept_{}_lt_{}_lq_{}_lbias_{}_slr_{}_" \
                   "lr_{}_max_iter_{}".format(concept_dim,lambda_t, lambda_q,lambda_bias, slr, lr, max_iter)
        model_file_path = "{}/{}_model.pkl".format(model_dir_path, para_str)
        pickle.dump(model, open(model_file_path, "wb"))

    result_dir_path = "results/{}/{}/{}".format(data_str, course_str,
                                                model_str)
    make_dir(result_dir_path)

    if validation:
        result_file_path = "{}/fold_{}_cross_val.json".format(
            result_dir_path, fold)
    else:
        result_file_path = "{}/fold_{}_test_results.json".format(
            result_dir_path, fold)

    if not os.path.exists(result_file_path):
        with open(result_file_path, "w") as f:
            pass

    result = {
        'concept_dim': concept_dim,
        'lambda_t': lambda_t,
        'lambda_q': lambda_q,
        'lambda_bias': lambda_bias,
        'student_learning_rate': slr,
        'learning_rate': lr,
        'max_iter': max_iter,
        'perf': perf_dict
    }

    output_lock.acquire()
    with open(result_file_path, "a") as f:
        f.write(json.dumps(result) + "\n")
    output_lock.release()
예제 #44
0
    def clone_wrf(self, tgt, with_files):
        """
        Clone the WRFV3 directory (self.wrf_idir) into tgt together with the additional files with_files.

        :param tgt: target directory into which WRF is cloned
        :param with_files: a list of files from the WPS source directory that should be symlinked
        :return:
        """
        src = osp.join(self.wrf_idir, "run")

        # gather all files to symlink in one place
        symlinks = list(self.wrf_files)
        symlinks.extend(with_files)

        # create target directory (and all intermediate subdirs if necessary)
        make_dir(tgt)

        # symlink all at once
        map(lambda x: symlink_unless_exists(osp.join(src, x), osp.join(tgt, x)), symlinks)
def main(args):
    # Initialize environment
    env = init_env(args)
    model_dir = utils.make_dir(os.path.join(args.work_dir, 'model'))
    video_dir = utils.make_dir(os.path.join(args.work_dir, 'video'))
    video = VideoRecorder(video_dir if args.save_video else None,
                          height=448,
                          width=448)

    # Prepare agent
    assert torch.cuda.is_available(), 'must have cuda enabled'
    cropped_obs_shape = (3 * args.frame_stack, 84, 84)
    agent = make_agent(obs_shape=cropped_obs_shape,
                       action_shape=env.action_space.shape,
                       args=args)
    agent.load(model_dir, args.load_checkpoint)

    # Evaluate agent without PAD
    print(
        f'Evaluating {args.work_dir} for {args.pad_num_episodes} episodes (mode: {args.mode})'
    )
    eval_reward = evaluate(env, agent, args, video)
    print('eval reward:', int(eval_reward))

    # Evaluate agent with PAD (if applicable)
    pad_reward = None
    if args.use_inv or args.use_curl or args.use_rot:
        env = init_env(args)
        print(
            f'Policy Adaptation during Deployment of {args.work_dir} for {args.pad_num_episodes} episodes '
            f'(mode: {args.mode})')
        pad_reward = evaluate(env, agent, args, video, adapt=True)
        print('pad reward:', int(pad_reward))

    # Save results
    results_fp = os.path.join(args.work_dir, f'{args.mode}_pad.pt')
    torch.save(
        {
            'args': args,
            'eval_reward': eval_reward,
            'pad_reward': pad_reward
        }, results_fp)
    print('Saved results to', results_fp)
예제 #46
0
def main(config):
    dataset_train = get_dataset_train()
    model = get_unet()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    loss_fn = nn.MSELoss()

    make_dir(config.result_dir)
    make_dir(config.sample_dir)
    make_dir(config.model_dir)
    make_dir(config.log_dir)

    print("Start training...")

    for epoch in range(config.epochs):
        SAVE_IMAGE_DIR = "{}/{}".format(config.sample_dir, epoch)
        make_dir(SAVE_IMAGE_DIR)
        train_loss = []

        for i, (image, mask) in enumerate(
                DataLoader(dataset_train,
                           batch_size=config.batch_size,
                           shuffle=True)):

            image = image.to(device)
            mask = mask.to(device)

            y_pred = model(image)
            loss = loss_fn(y_pred, mask)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss.append(loss.item())

            if i % 50 == 0:
                save_image(y_pred, "{}/{}.png".format(SAVE_IMAGE_DIR, i))
                print(train_loss[-1])

        print("Epoch: %d, Train: %.3f" % (epoch, np.mean(train_loss)))
        if epoch % 5 == 0:
            print("Saved model... {}.pth".format(epoch))
            save_checkpoint("{}/{}.pth".format(config.model_dir, epoch), model,
                            optimizer)
예제 #47
0
def train(model, x_train_dir, y_train_dir, x_val_dir, y_val_dir, batch_size,
          epochs):
    # Running on multi GPU
    print('Tensorflow backend detected; Applying memory usage constraints')
    ss = K.tf.Session(config=K.tf.ConfigProto(gpu_options=K.tf.GPUOptions(
        allow_growth=True),
                                              log_device_placement=True))
    K.set_session(ss)
    ss.run(K.tf.global_variables_initializer())
    K.set_learning_phase(1)

    # print("Getting data.. Image shape: {}. Masks shape : {}".format(x.shape,
    #                                                                 y.shape))
    # print("The data will be split to Train Val: 80/20")

    # saving weights and logging
    filepath = 'weights/' + model.name + '.{epoch:02d}-{loss:.2f}.hdf5'
    make_dir(filepath)
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=1,
                                 save_weights_only=False,
                                 save_best_only=True,
                                 mode='auto',
                                 period=1)
    tensor_board = TensorBoard(log_dir='logs/')

    # history = model.fit(x=x, y=y, batch_size=batch_size, epochs=epochs,
    #                     verbose=1, callbacks=[checkpoint, tensor_board], validation_split=0.2)

    # getting image data generator
    seed = 1
    train_generator = myGenerator(x_train_dir, y_train_dir, batch_size, seed)
    val_generator = myGenerator(x_val_dir, y_val_dir, batch_size, seed)

    history = model.fit_generator(train_generator,
                                  callbacks=[checkpoint, tensor_board],
                                  steps_per_epoch=1000 / batch_size,
                                  epochs=epochs,
                                  validation_steps=1000 / batch_size,
                                  validation_data=val_generator)

    return history
예제 #48
0
def save():
    rmse_th = float(request.args.get("rmse_th"))
    min_th = float(request.args.get("min_th"))
    max_th = float(request.args.get("max_th"))

    result_dir = ["result"]
    shutil.rmtree("./result")
    mix_dir = "result/mix"
    utils.make_dir(["result", mix_dir])
    for k, v in result_dict.items():
        copy_dir = "result/" + k
        utils.make_dir([copy_dir])
        for directory, v1 in v.items():
            if v1["param"] <= max_th and min_th <= v1["param"]:
                if v1["rmse"] < rmse_th:
                    tmp = directory.split("/")[-1]
                    shutil.copyfile(directory, copy_dir + "/" + tmp)
                    shutil.copyfile(directory, mix_dir + "/" + tmp)
    return
예제 #49
0
def collect_args_full_skew():
    parser = argparse.ArgumentParser()
    parser.add_argument('--attribute1', type=int, default=31)
    parser.add_argument('--attribute2', type=int, default=20)
    parser.add_argument('--real_data_dir', type=str, default='data/celeba')
    parser.add_argument('--random_seed', type=int, default=0)
    parser.add_argument('--test_mode', type=bool, default=False)
    parser.add_argument('--opp', type=bool, default=False)
    parser.set_defaults(cuda=True)

    opt = vars(parser.parse_args())

    attr_list = utils.get_all_attr()
    opt['attr_name1'] = attr_list[opt['attribute1']]
    opt['attr_name2'] = attr_list[opt['attribute2']]
    if torch.cuda.is_available():
        opt['device'] = torch.device('cuda')
    else:
        opt['device'] = torch.device('cpu')
    opt['dtype'] = torch.float32
    opt['total_epochs'] = 20
    params_real_train = {'batch_size': 32, 'shuffle': True, 'num_workers': 0}

    params_real_val = {'batch_size': 64, 'shuffle': False, 'num_workers': 0}

    data_setting = {
        'path': opt['real_data_dir'],
        'params_real_train': params_real_train,
        'params_real_val': params_real_val,
        'attribute1': opt['attribute1'],
        'attribute2': opt['attribute2'],
        'augment': True
    }
    opt['data_setting'] = data_setting
    if opt['opp']:
        opt['save_folder'] = 'record/full_skew/attr_{}_{}_opp/'.format(
            opt['attribute1'], opt['attribute2'])
    else:
        opt['save_folder'] = 'record/full_skew/attr_{}_{}/'.format(
            opt['attribute1'], opt['attribute2'])
    utils.make_dir('record/full_skew')
    utils.make_dir(opt['save_folder'])
    return opt
예제 #50
0
    def __init__(self, env_name, log_dir, decimate_step=250) -> None:
        super().__init__()
        self.env_name = env_name
        self.log_dir = log_dir
        self.decimate_step = decimate_step
        self.data_dir = join(self.log_dir, self.env_name)
        self.fig_dir = self.base_dir = join(
            dirname(dirname(abspath(__file__))), join("figures",
                                                      self.env_name))
        make_dir(self.fig_dir)

        self.params_df = pd.read_csv(join(self.data_dir, "params.tsv"), "\t")

        self.logs = {}

        mean_reward = []
        mean_feat_std = []
        mean_proxy = []

        # load trainings
        for timestamp in self.params_df.timestamp:
            self.logs[timestamp] = TemporalLogger(self.env_name, timestamp,
                                                  self.log_dir,
                                                  *["rewards", "features"])
            self.logs[timestamp].load(
                join(self.data_dir, f"time_log_{timestamp}"),
                self.decimate_step)

            # calculate statistics
            mean_reward.append(
                self.logs[timestamp].__dict__["rewards"].mean.mean())
            mean_feat_std.append(
                self.logs[timestamp].__dict__["features"].std.mean())
            mean_proxy.append(mean_reward[-1] * mean_feat_std[-1])

        # append statistics to df
        self.params_df["mean_reward"] = pd.Series(mean_reward,
                                                  index=self.params_df.index)
        self.params_df["mean_feat_std"] = pd.Series(mean_feat_std,
                                                    index=self.params_df.index)
        self.params_df["mean_proxy"] = pd.Series(mean_proxy,
                                                 index=self.params_df.index)
예제 #51
0
    def create_models(self, input_path, output_path, flag_summary): 
        ''' Create the gold standard corpus (manual summaries) '''
        if flag_summary == "E": 
            type_summary = "Extrativos"
        elif flag_summary == "A":
            type_summary = "Abstrativos"
        else:
            raise ValueError("Invalid option")

        models_name = ["A", "B", "C", "D", "E"]
        folders = sorted(os.listdir(input_path))

        for folder in folders:
            file_list = os.listdir(os.path.join(input_path, folder, type_summary))
            make_dir(os.path.join(output_path, folder, "models"))
            
            for i in range(len(models_name)):
                shutil.copy(os.path.join(input_path, folder, type_summary, file_list[i]), os.path.join(output_path, folder, "models", "1_model%s.txt" % models_name[i]))
                if flag_summary == "E":
                    self.__replace_id(os.path.join(output_path, folder, "models", "1_model%s.txt" % models_name[i]))
예제 #52
0
    def _load(self, account):
        self.account = account

        # Create home directory
        utils.make_dir('')
        self.configfile = utils.get_root_filename('config.json')

        # Create user directory
        userfolder = "%s.%s" % (account['username'], account['api'])
        utils.make_dir(userfolder)

        self.msg.info(
            self.name, 'Trackma v{0} - using account {1}({2}).'.format(
                utils.VERSION, account['username'], account['api']))
        self.msg.info(self.name, 'Reading config files...')
        try:
            self.config = utils.parse_config(self.configfile,
                                             utils.config_defaults)
        except IOError:
            raise utils.EngineFatal("Couldn't open config file.")
예제 #53
0
def download(base_url, hyperlink_regex, output_dir):
    if not base_url.endswith('/'):
        base_url += '/'
    hrx = re.compile(hyperlink_regex)
    with urllib.request.urlopen(base_url) as response:
        html = response.read()
    link_fname_tuples = []
    for link in BeautifulSoup(html, "html.parser", parse_only=SoupStrainer('a')):
        if link.has_attr('href') and hrx.match(link['href']):
            link_fname_tuples.append((base_url + link['href'], link['href']))
    print()
    pprint([fname for _, fname in link_fname_tuples])
    if utils.confirm('\nDownload these files?'):
        utils.make_dir(output_dir)
        for link, fname in link_fname_tuples:
            output_path = os.path.join(output_dir, fname)
            print('saving %s to %s...' % (fname, output_path), end='')
            urllib.request.urlretrieve(link, os.path.join(output_dir, fname))
            print('done')
    return
예제 #54
0
def build_vocab(words, vocab_size):
    """ Build vocabulary of VOCAB_SIZE most frequent words """
    dictionary = dict()
    count = [('UNK', -1)]
    #列表中的第一个元素为元组('UNK', -1)
    count.extend(Counter(words).most_common(vocab_size - 1))
    #将频率最高的前vocab_size-1个词以(Word,Frequency)元组形式进行存储
    index = 0
    utils.make_dir('processed')
    with open('processed/vocab_1000.tsv', "w") as f:
        #将前1000词频的词以tsv格式进行存储(text mode)(实际为前999,第一个为UNK)
        for word, _ in count:
            dictionary[word] = index
            #构建键值对(word,index)
            if index < 1000:
                f.write(word + "\n")
            index += 1
    index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    #返回一个zip对象,构建(index,word)词典
    return dictionary, index_dictionary
예제 #55
0
    def load(self, account):
        self.account = account

        # Create home directory
        utils.make_dir('')
        self.configfile = utils.get_root_filename('config.json')

        # Create user directory
        userfolder = "%s.%s" % (account['username'], account['api'])
        utils.make_dir(userfolder)
        self.userconfigfile = utils.get_filename(userfolder, 'user.json')

        self.msg.info(self.name, 'Reading config files...')
        try:
            self.config = utils.parse_config(self.configfile,
                                             utils.config_defaults)
            self.userconfig = utils.parse_config(self.userconfigfile,
                                                 utils.userconfig_defaults)
        except IOError:
            raise utils.EngineFatal("Couldn't open config file.")
예제 #56
0
파일: video.py 프로젝트: kylehkhsu/drq
 def __init__(self, view, root_dir, height=256, width=256, fps=10):
     self.view = view
     self.save_dir = utils.make_dir(root_dir, 'video') if root_dir else None
     self.height = height
     self.width = width
     self.fps = fps
     if str(self.view) == 'both':
         self.frames1 = []
         self.frames3 = []
     else:
         self.frames = []
예제 #57
0
class BaseConfig(object):
    PROJECT = "rootio"

    DEBUG = False
    TESTING = False

    ADMINS = ['*****@*****.**','*****@*****.**','*****@*****.**']

    # http://flask.pocoo.org/docs/quickstart/#sessions
    SECRET_KEY = 'SeekritKey'

    LOG_FOLDER = os.path.join(INSTANCE_FOLDER_PATH, 'logs')
    make_dir(LOG_FOLDER)

    # Fild upload, should override in production.
    # Limited the maximum allowed payload to 16 megabytes.
    # http://flask.pocoo.org/docs/patterns/fileuploads/#improving-uploads
    MAX_CONTENT_LENGTH = 16 * 1024 * 1024
    UPLOAD_FOLDER = os.path.join(INSTANCE_FOLDER_PATH, 'uploads')
    make_dir(UPLOAD_FOLDER)
예제 #58
0
def clear_previous_dirs():
    print("#################################")
    print("Clearing previous directories...")
    print("#################################\n")
    shutil.rmtree(const.IMAGES_PATH)

    for path in [const.IMAGES_PATH, const.TRAINING_PATH, const.TESTING_PATH]:
        utils.make_dir(path)

    print("#################################")
    print("Assert directories exist...")
    print("#################################\n")
    for path in [const.IMAGES_PATH, const.TRAINING_PATH, const.TESTING_PATH]:
        assert os.path.exists(path)
        print(path, "exists.")

    assert len(os.listdir(const.IMAGES_PATH)) == 2
    assert len(os.listdir(const.TRAINING_PATH)) == 0
    assert len(os.listdir(const.TESTING_PATH)) == 0
    print("\nDirectories empty.")
예제 #59
0
    def clone_wps(self, tgt, with_files):
        """
        Clone the WPS installation directory (self.wps_idir) together with the chosen table files vtables
        and additional files with_files.  The WPS clone is created in directory tgt.

        :param tgt: target directory into which WPS is cloned
        :param with_files: a list of files from the WPS source directory that should be symlinked
        :return:
        """
        src = self.wps_idir

        # build a list of all files that are simply symlinked
        symlinks = list(self.wps_exec_files)
        symlinks.extend(with_files)

        # create target directory (and all intermediate subdirs if necessary)
        make_dir(tgt)

        # clone all WPS executables
        map(lambda x: symlink_unless_exists(osp.join(src, x), osp.join(tgt, x)), symlinks)
예제 #60
0
def train():
    make_dir(LOG_PATH)
    log_file = open(os.path.join(LOG_PATH,
                                 '{}_{}.txt'.format(args.train, args.test)),
                    mode='w')

    for epoch in range(1, EPOCH + 1):
        iter_str = '{:03}'.format(epoch)
        print('\nloop : ', iter_str, file=log_file, flush=True)
        print(datetime.datetime.now(), file=log_file, flush=True)

        log_lists = np.zeros(4)

        for i in range(ITER_PER_EPOCH):
            print(i, '\r', end='')

            lr2, upcubic2, hr2 = next(ds2_it)
            lr3, upcubic3, hr3 = next(ds3_it)
            lr4, upcubic4, hr4 = next(ds4_it)

            input2 = lr2, upcubic2, 2
            input3 = lr3, upcubic3, 3
            input4 = lr4, upcubic4, 4

            next_lr = tf.constant(cosine_decay(ITER_PER_EPOCH * (epoch - 1) +
                                               i),
                                  dtype=tf.float32)
            cost, db2, db3, db4 = _train_step(next_lr, input2, input3, input4,
                                              hr2, hr3, hr4)
            log_lists += np.asarray([cost, db2, db3, db4])

        print(list(log_lists / ITER_PER_EPOCH), file=log_file, flush=True)

        if 1:  #epoch % 10 == 0:
            net.save_model()
            for scale in SCALES:
                print('scale',
                      scale,
                      validate(scale),
                      file=log_file,
                      flush=True)