def make_dirs(self,
               audio_path,
               frames_path,
               remake_audio_dir=False,
               remake_frames_dir=False):
     utils.make_dirs(audio_path, remake_audio_dir)
     utils.make_dirs(frames_path, remake_frames_dir)
Beispiel #2
0
    def __init__(self, config, model, experiments):
        self.config = config
        self.model = model
        self.experiments = experiments
        self.trainloader, _ = data_loader(config)

        # checkpoint
        self.checkpoint_dir = make_dirs(
            os.path.join(self.config.result_path, self.config.checkpoint_path))
        self.ckpt = tf.train.Checkpoint(enc=self.model.enc,
                                        dec=self.model.dec,
                                        optim=self.model.optim,
                                        epoch=self.model.global_epoch)
        self.ckpt_manager = tf.train.CheckpointManager(
            self.ckpt,
            directory=self.checkpoint_dir,
            checkpoint_name='ckpt',
            max_to_keep=2)

        # tensorboard
        self.tensorboard_dir = make_dirs(
            os.path.join(self.config.result_path,
                         self.config.tensorboard_path))
        self.summary_writer = tf.summary.create_file_writer(
            self.tensorboard_dir)
Beispiel #3
0
    def generate_files_from_template(self) -> None:
        src_dir_path = self.template.root_dir
        file_pathes = utils.get_files(src_dir_path,
                                      excepts='/templates/manual')

        for src_file_path in file_pathes:
            src_file = Path(src_file_path)

            if src_file.is_file():
                relative_file_path = str(src_file.relative_to(src_dir_path))

                dest_file_path = path.join(self.config.output_pj_path,
                                           relative_file_path)
                dest_file_dir_path = path.dirname(dest_file_path)

                # if the file's dir not exist, make it
                utils.make_dirs([dest_file_dir_path])

                if 'tpl' in path.basename(src_file_path) and path.basename(
                        src_file_path)[0] != '.':
                    relative_src_file_path = str(
                        src_file.relative_to(self.template.root_dir))
                    self.template.generate(relative_src_file_path,
                                           dest_file_dir_path)
                else:
                    shutil.copy2(src_file_path, dest_file_path)
def start_browser_fuzz(browser, fuzz_type, limit=-1):
    """Start Fuzzing
    Limit: -1 (Infinite fuzz)
    """

    print "[INFO] Running Browser Fuzzer"
    print "\n[DETAILS]"
    print "Browser: " + browser
    print "Fuzzer Type: " + fuzz_type
    print "Fuzz Iteration: " + str(limit)
    utils.make_dirs()
    utils.adb_connection_int(settings.BROWSERS)
    iteration = 1
    fuzz_server_url = "http://" + settings.FUZZ_IP + ":" + str(
        settings.SERVER_PORT)
    if fuzz_type == "domato":
        while True:
            url = fuzz_server_url + "/fuzz_html/" + str(iteration)
            browser_fuzz(browser, fuzz_type, iteration, url)
            if iteration == limit:
                break
            iteration += 1
    elif fuzz_type == "pregenerated":
        for html in get_htmls():
            html_file_name = os.path.basename(html)
            url = fuzz_server_url + "/html/" + html_file_name
            browser_fuzz(browser, fuzz_type, iteration, url)
            if iteration == limit:
                break
            iteration += 1
    print "[INFO] Browser Fuzzing Completed!"
    print "\n[Status]"
    print "Browser: " + browser
    print "Fuzzer Type: " + fuzz_type
    print "Fuzz Iteration: " + str(limit)
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input_path", type=str, required=True)
    parser.add_argument("-o", "--output_path", type=str, required=True)
    args = parser.parse_args()

    dirs = ["trainval/", "test/"]
    dirs = [args.output_path + d for d in dirs]
    utils.make_dirs(dirs)

    conf = configuration.load_configuration()
    images = utils.collect_images(os.path.dirname(args.input_path))

    # reproducible datasets
    random.seed(1337)
    random.shuffle(images)

    rois = roi_metadata.read_metadata(args.input_path)

    features_train = set(
        os.path.basename(i)
        for i in images[0:int(len(images) * conf.split_ratio)])
    generate_dataset(rois, os.path.dirname(args.input_path), features_train,
                     dirs[0])

    features_test = set(
        os.path.basename(i)
        for i in images[int(len(images) * conf.split_ratio):])
    generate_dataset(rois, os.path.dirname(args.input_path), features_test,
                     dirs[1])
Beispiel #6
0
 def prepare(self):
     """remove temporary files, create the directory structure"""
     utils.rmtree(self.work_path)
     utils.make_dirs(os.path.join(self.work_iso, 'seedbank/etc/runonce.d'))
     utils.make_dirs(self.work_initrd)
     utils.run('bsdtar -C "%s" -xf "%s"' % (self.work_iso, self.iso_file))
     utils.run('chmod -R u+w "%s"' % self.work_iso)
def before_request():
    """ Ensure input/output directories exist """
    config = app.config['soccer']
    make_dirs(
        config['input_dir'],
        config['output_dir']
    )
Beispiel #8
0
 def _touch(self, directory, path):
     if os.path.exists(path):
         os.utime(path, None)    
     else:          
         utils.make_dirs(directory)
         f = open(path, 'w')
         f.close()
Beispiel #9
0
 def copy_dir_contents(self, src, dst):
     """find and copy all files from src to dst"""
     utils.make_dirs(dst)
     files = [os.path.join(root, file_name) for root, _, files in
         os.walk(src) if files for file_name in files]
     for src in files:
         utils.file_copy(src, dst)
Beispiel #10
0
    def __init__(self, config, model, experiments):
        self.config = config
        self.model = model
        self.experiments = experiments
        self.trainloader, _ = data_loader(config)

        # checkpoint
        self.checkpoint_dir = make_dirs(
            os.path.join(self.config.result_path, self.config.checkpoint_path))
        self.ckpt = tf.train.Checkpoint(gen=self.model.gen,
                                        g_optim=self.model.g_optim,
                                        dis=self.model.dis,
                                        d_optim=self.model.d_optim,
                                        epoch=self.model.global_epoch)
        self.ckpt_manager = tf.train.CheckpointManager(
            self.ckpt,
            directory=self.checkpoint_dir,
            checkpoint_name='ckpt',
            max_to_keep=1)

        # tensorboard
        self.tensorboard_dir = make_dirs(
            os.path.join(self.config.result_path,
                         self.config.tensorboard_path))
        self.summary_writer = tf.summary.create_file_writer(
            self.tensorboard_dir)
        tf.summary.trace_on(graph=True, profiler=True)
Beispiel #11
0
def transform_multiplex_networks(layer_info_file, edge_list_file, outdir, drop_weight=False):

    make_dirs(outdir)

    networks = pd.read_csv(layer_info_file, sep='\s+')
    edge_df = pd.read_csv(edge_list_file,
                          names=['layerID', 'source_node', 'target_node', 'weight'], sep='\s+')

    columns_to_output = ['source_node', 'target_node', 'weight']
    if drop_weight:
        columns_to_output = columns_to_output[:-1]

    graph_files = []

    for i, (net_id, net_name) in networks.iterrows():
        out_fname = os.path.join(outdir, net_name + ".edgelist")
        edge_df.loc[edge_df['layerID'] == net_id, columns_to_output].to_csv(
            out_fname,
            header=False,
            index=False,
            sep=' '
        )

        graph_files.append(out_fname)

    return graph_files
Beispiel #12
0
def read_flickr_lastfm_data(test_ratio, k_nearest, input_dir="data/flickr_vs_lastfm/"):

    tg_data_dirname = os.path.join(input_dir, 'tg_datasets', 'tr_' + str(test_ratio))

    if os.path.isdir(tg_data_dirname):
        return select_top_k_anchors(load_tg_dataset(tg_data_dirname), k_nearest)

    graph_files = glob.glob(os.path.join(input_dir, 'edgelist_data', "*.edgelist"))

    graph_data = []
    attributes = []

    for gf in graph_files:

        g = nx.read_edgelist(gf)
        g.graph['name'] = os.path.basename(gf).replace('.edgelist', '')
        g.graph['centrality_file'] = os.path.join(os.path.dirname(gf), g.graph['name'] + '.centrality')

        if os.path.isfile(g.graph['centrality_file']):
            g.graph['centrality'] = unpickle_data(g.graph['centrality_file'])
        else:
            g.graph['centrality'] = None

        attributes.append(np.load(gf.replace(".edgelist", ".attr.npy")))

        graph_data.append(g)

    tg_data = from_nx_to_tg_graphs(graph_data, attributes=attributes, test_ratio=test_ratio)

    make_dirs(tg_data_dirname)
    save_tg_dataset(tg_data, tg_data_dirname)

    return select_top_k_anchors(tg_data, k_nearest)
Beispiel #13
0
def read_synthetic_network_dataset(input_dir, test_ratio, k_nearest):

    tg_data_dirname = os.path.join(input_dir, 'tg_datasets', 'tr_' + str(test_ratio))

    if os.path.isdir(tg_data_dirname):
        return select_top_k_anchors(load_tg_dataset(tg_data_dirname), k_nearest)

    graph_files = glob.glob(os.path.join(input_dir, 'edgelist_data', "*.edgelist"))

    graph_data = []

    for gf in sorted(graph_files):
        g = nx.read_edgelist(gf)
        g.graph['name'] = os.path.basename(gf).replace('.edgelist', '')
        g.graph['centrality_file'] = os.path.join(os.path.dirname(gf), g.graph['name'] + '.centrality')

        if os.path.isfile(g.graph['centrality_file']):
            g.graph['centrality'] = unpickle_data(g.graph['centrality_file'])
        else:
            g.graph['centrality'] = None

        graph_data.append(g)

    tg_data = from_nx_to_tg_graphs(graph_data, test_ratio=test_ratio)

    make_dirs(tg_data_dirname)
    save_tg_dataset(tg_data, tg_data_dirname)

    return select_top_k_anchors(tg_data, k_nearest)
def generate_vocabs(base_folder, all_vocab_folder, data):
    all_diffs_vocab_filename = os.path.join(all_vocab_folder,
                                            DIFFS_VOCAB_FILENAME)
    all_msgs_vocab_filename = os.path.join(all_vocab_folder,
                                           MSGS_VOCAB_FILENAME)
    for folder, diffs_filename, msgs_finame, diffs_at_least, msgs_at_least in data:
        diffs_word_freq = word_freq(
            os.path.join(base_folder, folder, diffs_filename),
            os.path.join(base_folder, folder, DIFFS_VOCAB_FILENAME),
        )
        new_diffs_vocab = generate_new_vocab(diffs_word_freq,
                                             all_diffs_vocab_filename,
                                             diffs_at_least)

        msgs_word_freq = word_freq(
            os.path.join(base_folder, folder, msgs_finame),
            os.path.join(base_folder, folder, MSGS_VOCAB_FILENAME),
        )
        new_msgs_vocab = generate_new_vocab(msgs_word_freq,
                                            all_msgs_vocab_filename,
                                            msgs_at_least)

        output_folder = os.path.join(base_folder, "vocabs", folder)
        if not os.path.isdir(output_folder):
            make_dirs(output_folder)
        save_file(os.path.join(output_folder, DIFFS_VOCAB_FILENAME),
                  new_diffs_vocab)
        save_file(os.path.join(output_folder, MSGS_VOCAB_FILENAME),
                  new_msgs_vocab)
Beispiel #15
0
    def __init__(self, args):
        super().__init__()
        self.lr = args.learning_rate
        self.z_dim = 100
        self.eval_interval = 100
        self.eval_size = 16
        self.data_dir = args.data_dir
        self.device = "cpu" if args.no_cuda else "cuda"
        # evaluate generator based pn fixed noise during training
        self.fixed_z = to_tensor(
            np.random.normal(0, 1, size=(self.eval_size, self.z_dim)),
            False).to(self.device)

        self.label_smooth = args.label_smooth
        self.G_loss = []
        self.D_loss = []

        self.path = args.path
        self.batch_size = args.batch_size
        self.checkpoint_path = os.path.join(self.path, "checkpoint")
        self.images_path = os.path.join(self.path, "images")
        self.train_images_path = os.path.join(self.images_path, "train_images")
        self.val_images_path = os.path.join(self.images_path, "val_images")
        make_dirs(self.checkpoint_path, self.train_images_path,
                  self.val_images_path)
Beispiel #16
0
def generate_faces():

    # Test Path #
    make_dirs(config.inference_path)

    # Prepare Generator #
    G = Generator().to(device)
    G.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'Face_Generator_Epoch_{}.pkl'.format(config.num_epochs))))
    G.eval()

    # Start Generating Faces #
    count = 1

    while (True):

        # Prepare Fixed Noise and Generator #
        noise = torch.randn(config.batch_size, config.noise_dim).to(device)
        generated = G(noise)

        for i in range(config.batch_size):
            save_image(
                denorm(generated[i].data),
                os.path.join(config.inference_path,
                             "Generated_CelebA_Faces_{}.png".format(count)),
            )
            count += 1

        if count > config.limit:
            print("Generating fake CelebA faces is finished.")
            break
Beispiel #17
0
def transform_cosnet_files_to_standard_format(g1_files, g2_files, mapping_file, output_dir):

    make_dirs(output_dir)

    map_df = pd.read_csv(mapping_file, sep="\s+", names=['g1', 'g2'])

    g1_nodes = pd.read_csv(g1_files['nodes'], sep='\t', names=['id', 'username'])
    g2_nodes = pd.read_csv(g2_files['nodes'], sep='\t', names=['id', 'username'])

    g1_nx = nx.read_edgelist(g1_files['edges'], nodetype=int)
    g2_nx = nx.read_edgelist(g2_files['edges'], nodetype=int)

    g1 = generate_subgraph(g1_nx, list(g1_nodes.loc[g1_nodes['username'].isin(map_df['g1']), 'id'].values), g1_nodes)
    g2 = generate_subgraph(g2_nx, list(g2_nodes.loc[g2_nodes['username'].isin(map_df['g2']), 'id'].values), g2_nodes)

    print(g1.number_of_nodes())
    print(g2.number_of_nodes())

    map_df = map_df.loc[map_df['g1'].isin(list(g1.nodes())) & map_df['g2'].isin(list(g2.nodes()))]

    if 'attributes' in g1_files:
        attr_mat = create_attribute_files(g1_files['attributes'], list(g1.nodes()))
        np.save(os.path.join(output_dir, g1_files['name'] + ".attr"), attr_mat)

    if 'attributes' in g2_files:
        attr_mat = create_attribute_files(g2_files['attributes'], list(g2.nodes()))
        np.save(os.path.join(output_dir, g2_files['name'] + ".attr"), attr_mat)


    map_dict = dict(zip(map_df['g1'], map_df['g2']))

    nx.relabel_nodes(g1, map_dict, copy=False)

    nx.write_edgelist(g1, os.path.join(output_dir, g1_files['name'] + ".edgelist"))
    nx.write_edgelist(g2, os.path.join(output_dir, g2_files['name'] + ".edgelist"))
Beispiel #18
0
 def _move(self, dst):
     """search and move all files from a given directory"""
     utils.make_dirs(dst)
     files = (os.path.join(root, file_name) for root, _, files in
         os.walk(self.temp) if files for file_name in files)
     for src in files:
         utils.file_copy(src, dst)
def inference():

    # Inference Path #
    make_dirs(config.inference_path)

    # Prepare Data Loader #
    test_loader_selfie, test_loader_anime = get_selfie2anime_loader('test', config.batch_size)

    # Prepare Generator #
    G_A2B = Generator(image_size=config.crop_size, num_blocks=config.num_blocks).to(device)

    G_A2B.load_state_dict(torch.load(os.path.join(config.weights_path, 'U-GAT-IT_G_A2B_Epoch_{}.pkl'.format(config.num_epochs))))

    # Inference #
    print("U-GAT-IT | Generating Selfie2Anime images started...")
    with torch.no_grad():
        for i, (selfie, anime) in enumerate(zip(test_loader_selfie, test_loader_anime)):

            # Prepare Data #
            real_A = selfie.to(device)

            # Generate Fake Images #
            fake_B = G_A2B(real_A)[0]

            # Save Images (Selfie -> Anime) #
            result = torch.cat((real_A, fake_B), dim=0)
            save_image(denorm(result.data),
                       os.path.join(config.inference_path, 'U-GAT-IT_Selfie2Anime_Results_%03d.png' % (i + 1))
                       )

    # Make a GIF file #
    make_gifs_test("U-GAT-IT", "Selfie2Anime", config.inference_path)
Beispiel #20
0
 def prepare(self):
     """remove temporary files, create the directory structure"""
     utils.rmtree(self.work_path)
     utils.make_dirs(os.path.join(self.work_iso, 'seedbank/etc/runonce.d'))
     utils.make_dirs(self.work_initrd)
     utils.run('bsdtar -C "%s" -xf "%s"' % (self.work_iso, self.iso_file))
     utils.run('chmod -R u+w "%s"' % self.work_iso)
Beispiel #21
0
    def __init__(self, graph: Graph, output_path: str,
                 archs: Dict[str, str]) -> None:
        self.archs = archs
        self.graph = graph
        self.root_path = path.join(output_path, 'tvm_runtime', 'lib')

        for arch in archs:
            utils.make_dirs(path.join(self.root_path, arch))
Beispiel #22
0
 def write(self, contents):
     """write the pxe boot file"""
     file_name = os.path.join(cfg['paths']['tftpboot'], 'pxelinux.cfg',
                              self.address)
     directory = os.path.dirname(file_name)
     utils.make_dirs(directory)
     utils.file_delete('%s.disabled' % file_name)
     utils.file_write(file_name, contents)
Beispiel #23
0
 def write(self, contents):
     """write the pxe boot file"""
     file_name = os.path.join(cfg['paths']['tftpboot'], 'pxelinux.cfg',
         self.address)
     directory = os.path.dirname(file_name)
     utils.make_dirs(directory)
     utils.file_delete('%s.disabled' % file_name)
     utils.file_write(file_name, contents)
Beispiel #24
0
 def __init__(self, log, exit_callback=None):
     self._log = log
     self._server_proxy = None
     self._data_file = None
     self._data_file_path = None
     self._data_file_state = 1
     self._data_file_hdr_row = 'Year,Month,Day,Hour,Minute,Second,Modem_on,FG_on,SC_on,CASES_on,HF_On,Htr_On,Garmin_GPS_on,Overcurrent_status_on,T_batt_1,T_batt_2,T_batt_3,T_FG_electronics,T_FG_sensor,T_router,V_batt_1,V_batt_2,V_batt_3,I_input,P_input,lat,long,sys_time_error_secs,UTC_sync_age_secs,Uptime_secs,CPU_load_1_min,CPU_load_5_min,CPU_load_15_min\n'
     utils.make_dirs(super_config.hskp_temp_dir, self._log)
Beispiel #25
0
 def run(self, task_context):
     for root in [
             task_context['root-prefix-path'],
             task_context['root-build-path']
     ]:
         for platform in task_context['platforms']:
             path = join(root, platform)
             print('Creating directory %s' % path)
             make_dirs(path)
Beispiel #26
0
 def copy_dir_contents(self, src, dst):
     """find and copy all files from src to dst"""
     utils.make_dirs(dst)
     files = [
         os.path.join(root, file_name) for root, _, files in os.walk(src)
         if files for file_name in files
     ]
     for src in files:
         utils.file_copy(src, dst)
Beispiel #27
0
 def _download(self, src, dst_path):
     """download a file"""
     src_file = os.path.basename(src)
     dst = os.path.join(dst_path, src_file)
     if os.path.isfile(dst):
         logging.info('"%s" already exists, download skipped', dst)
     else:
         utils.make_dirs(dst_path)
         utils.download(src, dst)
Beispiel #28
0
 def run(self, task_context):
     for dir in [
             task_context['root-src-path'], task_context['root-build-path'],
             task_context['root-prefix-path'],
             task_context['root-output-path'],
             task_context['root-android-output-path'],
             task_context['root-ios-output-path']
     ]:
         make_dirs(dir)
Beispiel #29
0
 def _download(self, src, dst_path):
     """download a file"""
     src_file = os.path.basename(src)
     dst = os.path.join(dst_path, src_file)
     if os.path.isfile(dst):
         logging.info('"%s" already exists, download skipped', dst)
     else:
         utils.make_dirs(dst_path)
         utils.download(src, dst)
def main():
    remove_dir("java_template")
    make_dirs("java_template")
    generate_template("original/java/train.4186.diff",
                      "java_template/train.4186.diff.new", "train")
    generate_template("original/java/test.436.diff",
                      "java_template/test.436.diff.new", "test")
    generate_template("original/java/valid.453.diff",
                      "java_template/valid.453.diff.new", "valid")
Beispiel #31
0
 def _pxe_default(self):
     """manage the pxelinux.cfg default file"""
     src = os.path.join(self.cfg['paths']['templates'], 'pxe_default')
     directory = os.path.join(self.cfg['paths']['tftpboot'], 'pxelinux.cfg')
     dst = os.path.join(directory, 'default')
     if os.path.isfile(dst):
         return
     logging.info('created default pxelinux.cfg file "%s"', dst)
     utils.make_dirs(directory)
     utils.file_copy(src, dst)
Beispiel #32
0
 def _pxe_default(self):
     """manage the pxelinux.cfg default file"""
     src = os.path.join(self.cfg['paths']['templates'], 'pxe_default')
     directory = os.path.join(self.cfg['paths']['tftpboot'], 'pxelinux.cfg')
     dst = os.path.join(directory, 'default')
     if os.path.isfile(dst):
         return
     logging.info('created default pxelinux.cfg file "%s"', dst)
     utils.make_dirs(directory)
     utils.file_copy(src, dst)
Beispiel #33
0
 def _extract(self, prefix, files, src, dst, target):
     """extract files to the seedbank temp directory and move those"""
     archive = os.path.join(dst, os.path.basename(src))
     files = (os.path.join(prefix, file_name) for file_name in files)
     temp_manage = os.path.join(self.temp, 'manage')
     if os.path.isdir(temp_manage):
         utils.rmtree(temp_manage)
     utils.make_dirs(temp_manage)
     utils.untar_files(archive, files, temp_manage)
     self.copy_dir_contents(temp_manage, target)
     utils.rmtree(temp_manage)
Beispiel #34
0
 def _extract(self, prefix, files, src, dst, target):
     """extract files to the seedbank temp directory and move those"""
     archive = os.path.join(dst, os.path.basename(src))
     files = (os.path.join(prefix, file_name) for file_name in files)
     temp_manage = os.path.join(self.temp, 'manage')
     if os.path.isdir(temp_manage):
         utils.rmtree(temp_manage)
     utils.make_dirs(temp_manage)
     utils.untar_files(archive, files, temp_manage)
     self.copy_dir_contents(temp_manage, target)
     utils.rmtree(temp_manage)
Beispiel #35
0
 def _debian_firmware(self, name):
     """integrate Debian non free firmware"""
     temp_initrd = os.path.join(self.temp, 'initrd')
     initrd = os.path.join(self.cfg['paths']['tftpboot'], 'seedbank', name,
         'initrd.gz')
     utils.make_dirs(temp_initrd)
     utils.initrd_extract(temp_initrd, initrd)
     dst = os.path.join(self.temp, 'initrd/lib/firmware')
     self._add_firmware(name, dst)
     utils.initrd_create(temp_initrd, initrd)
     utils.rmtree(temp_initrd)
def inference():

    # Inference Path #
    make_dirs(config.inference_path)

    # Prepare Data Loader #
    val_loader = get_edges2shoes_loader(purpose='val',
                                        batch_size=config.val_batch_size)

    # Prepare Generator #
    G_A2B = Generator().to(device)
    G_B2A = Generator().to(device)

    G_A2B.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'DiscoGAN_Generator_A2B_Epoch_{}.pkl'.format(
                    config.num_epochs))))
    G_B2A.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'DiscoGAN_Generator_B2A_Epoch_{}.pkl'.format(
                    config.num_epochs))))

    # Test #
    print("DiscoGAN | Generating Edges2Shoes images started...")
    for i, (real_A, real_B) in enumerate(val_loader):

        # Prepare Data #
        real_A = real_A.to(device)
        real_B = real_B.to(device)

        # Generate Fake Images #
        fake_B = G_A2B(real_A)
        fake_A = G_B2A(real_B)

        # Generated Reconstructed Images #
        fake_ABA = G_B2A(fake_B)
        fake_BAB = G_A2B(fake_A)

        # Save Images #
        result = torch.cat(
            (real_A, fake_A, fake_BAB, real_B, fake_B, fake_ABA), dim=0)
        save_image(denorm(result.data),
                   os.path.join(
                       config.inference_path,
                       'DiscoGAN_Edges2Shoes_Results_%03d.png' % (i + 1)),
                   nrow=8,
                   normalize=True)

    # Make a GIF file #
    make_gifs_test("DiscoGAN", config.inference_path)
Beispiel #37
0
def inference():

    # Inference Path #
    make_dirs(config.inference_path)

    # Prepare Data Loader #
    val_loader = get_edges2handbags_loader('val', config.val_batch_size)

    # Prepare Generator #
    G = Generator(z_dim=config.z_dim).to(device)
    G.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'BicycleGAN_Generator_Epoch_{}.pkl'.format(
                    config.num_epochs))))
    G.eval()

    # Fixed Noise #
    fixed_noise = torch.randn(config.test_size, config.num_images,
                              config.z_dim).to(device)

    # Test #
    print("BiCycleGAN | Generating Edges2Handbags Images started...")
    for iters, (sketch, ground_truth) in enumerate(val_loader):

        # Prepare Data #
        N = sketch.size(0)
        sketch = sketch.to(device)
        results = torch.FloatTensor(N * (1 + config.num_images), 3,
                                    config.crop_size, config.crop_size)

        # Generate Fake Images #
        for i in range(N):
            results[i * (1 + config.num_images)] = sketch[i].data

            for j in range(config.num_images):
                image = sketch[i].unsqueeze(dim=0)
                noise_to_generator = fixed_noise[i, j, :].unsqueeze(dim=0)

                out = G(image, noise_to_generator)
                results[i * (1 + config.num_images) + j + 1] = out

            # Save Images #
            save_image(
                denorm(results.data),
                os.path.join(
                    config.inference_path,
                    'BicycleGAN_Edges2Handbags_Results_%03d.png' %
                    (iters + 1)),
                nrow=(1 + config.num_images),
            )

    make_gifs_test("BicycleGAN", config.inference_path)
Beispiel #38
0
    def setup(self):
        utils.make_dirs()
        random.seed(args.manual_seed)
        torch.manual_seed(args.manual_seed)
        torch.cuda.manual_seed_all(args.manual_seed)

        # import a custom data loader
        Datawrapper = getattr(__import__(args.datamodule, fromlist=[None]),
                              args.datawrapper)
        self.train_data = Datawrapper(args.train_path)
        # self.epoch_len   = self.train_data.epoch_len()
        self.test_data = Datawrapper(args.test_path)
Beispiel #39
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input_path", type=str, required=True)
    parser.add_argument("-o", "--output_path", type=str, required=True)

    args = parser.parse_args()

    utils.make_dirs([args.output_path + "/annotations/"])

    rois = dataset.load_all_rois(args.input_path, dataset.no_false_positives)
    generate_rois(os.path.dirname(args.input_path), rois,
                  args.output_path + "/annotations/")
Beispiel #40
0
 def iso(self, name):
     """download ISOs"""
     dst = os.path.join(self.cfg['paths']['isos'], name + '.iso')
     if os.path.isfile(dst):
         logging.info('nothing to do, "%s" already exists', dst)
         return
     distribution = name.split('-', 1)[0]
     if distribution == 'ubuntu':
         url = self.iso_ubuntu(name)
     elif distribution == 'debian':
         url = self.iso_debian(name)
     utils.make_dirs(self.cfg['paths']['isos'])
     utils.download(url, dst)
def handle_asset(asset, handle_formats, dir, flip, objMesh):
	for id, obj in asset.objects.items():
		try:
			otype = obj.type
		except Exception as e:
			error("[Error] %s" % (e))
			continue

		if otype not in handle_formats:
			continue

		d = obj.read()
		save_path = os.path.join(dir, obj.type, d.name)
		utils.make_dirs(save_path)

		if otype == "Mesh":
			try:
				mesh_data = None

				if not objMesh:
					mesh_data = BabylonMesh(d).export()
					utils.write_to_file(save_path + ".babylon", mesh_data, mode="w")

				mesh_data = OBJMesh(d).export()
				utils.write_to_file(save_path + ".obj", mesh_data, mode="w")
			except (NotImplementedError, RuntimeError) as e:
				error("WARNING: Could not extract %r (%s)" % (d, e))
				mesh_data = pickle.dumps(d._obj)
				utils.write_to_file(save_path + ".Mesh.pickle", mesh_data, mode="wb")

		elif otype == "TextAsset":
			if isinstance(d.script, bytes):
				utils.write_to_file(save_path + ".bin", d.script, mode="wb")
			else:
				utils.write_to_file(save_path + ".txt", d.script)

		elif otype == "Texture2D":
			filename = d.name + ".png"
			try:
				image = d.image
				if image is None:
					info("WARNING: %s is an empty image" % (filename))
					utils.write_to_file(save_path + ".empty", "")
				else:
					info("Decoding %r" % (d))
					img = image
					if flip:
						img = ImageOps.flip(image)
					img.save(save_path + ".png")
			except Exception as e:
				error("Failed to extract texture %s (%s)" % (d.name, e))
Beispiel #42
0
    def pimp(self, seeds, overlay, manifests):
        """pimp the seed file template"""
        commands = self.cfg['commands']
        values = self.cfg['seed']

        if self.target == 'iso':
            cmd_overlay = commands['iso_overlay']
            cmd_early = commands['iso_early_command']
            cmd_late = commands['iso_late_command']
            values['late_command'] += commands['iso_mount_command']
        elif self.target == 'pxe':
            cmd_overlay = commands['pxe_overlay']
            cmd_puppet_manifest = commands['pxe_puppet_manifest']
            cmd_early = commands['pxe_early_command']
            cmd_late = commands['pxe_late_command']

        if overlay:
            values['late_command'] += cmd_overlay
        if self.target == 'pxe' and manifests:
            values['late_command'] += commands['pxe_puppet_manifests']
        for manifest in manifests:
            values['manifest'] = manifest
            if self.target == 'pxe':
                puppet_command = commands_merge(cmd_puppet_manifest, values)
                values['late_command'] += [puppet_command]
            elif self.target == 'iso':
                src = os.path.join(self.cfg['paths']['templates'],
                self.cfg['templates']['puppet_manifest'])
                path = os.path.join(self.cfg['paths']['temp'], 'seedbank',
                    values['fqdn'], 'iso/iso/seedbank/etc/runonce.d')
                utils.make_dirs(path)
                dst = os.path.join(path, 'puppet_manifest_%s.enabled' %
                    manifest)
                utils.write_template(values, src, dst)
        
        values['early_command'] += cmd_early
        values['early_command'] = commands_merge(values['early_command'],
            values)
        values['late_command'] += cmd_late
        values['late_command'] = commands_merge(values['late_command'], values)

        seed_file = self._merge_seeds(seeds, values)
        logging.debug(seed_file)
        logging.info('%(fqdn)s - generated preseed file', values)
        return seed_file
Beispiel #43
0
def collection_qa_analyzer(col_name, env='active', fields=None):
    # parse configuration file
    cfg_fpath = os.path.join(basepath, "db_and_fpath.cfg")
    parser = ConfigParser()
    parser.read(cfg_fpath)

    # get detailed info from configuration
    mongo_uri = parser.get(env, 'uri')
    db_name = parser.get(env, 'db_name')
    io_fpath = parser.get(env, 'io_fpath')

    # get current time in string
    current_time = get_current_time(flag='short')

    # create directories if not existing, and return the collection's path
    col_fpath = make_dirs(io_fpath, current_time, db_name, col_name)

    # get all file paths that are required
    basic_stat_fpath = os.path.join(col_fpath, 'basic_stat.json')
    domain_fpath = os.path.join(col_fpath, 'domain.json')
    existing_checking_fpath = os.path.join(col_fpath, 'existing.json')
    field_fpath = os.path.join(col_fpath, 'fields.json')
    type_checking_fpath = os.path.join(col_fpath, 'type_checking.json')
    nullable_checking_fpath = os.path.join(col_fpath, 'nullable.json')
    empty_checking_fpath = os.path.join(col_fpath, 'empty.json')
    coverage_fpath = os.path.join(col_fpath, 'coverage.json')
    merger_fpath = os.path.join(col_fpath, 'merger.txt')

    # MongoEye
    mongoEye = MongoEye(mongo_uri, db_name, col_name)

    # get detailed STAT
    mongoEye.save_existing_checking(existing_checking_fpath)
    mongoEye.save_fields(existing_checking_fpath, field_fpath)
    mongoEye.save_type_checking(type_checking_fpath, field_fpath)
    mongoEye.save_nullable_checking(field_fpath, nullable_checking_fpath)
    mongoEye.save_empty_checking(type_checking_fpath, empty_checking_fpath)
    mongoEye.save_coverage(existing_checking_fpath,
                           nullable_checking_fpath,
                           empty_checking_fpath,
                           coverage_fpath)
    mongoEye.merger(field_fpath, type_checking_fpath,
                    existing_checking_fpath, nullable_checking_fpath,
                    empty_checking_fpath, coverage_fpath, merger_fpath)
    
    # get basic STAT
    mongoEye.save_domain_counters(domain_fpath)
    mongoEye.get_basic_stat(merger_fpath, basic_stat_fpath, domain_fpath)
    
    # test count unique domains
    #print mongoEye.count_domains()

    #===============================================================================
    # 获得某一个字段值的分布(distribution)
    #===============================================================================
    if fields:
        field_dist_collector(mongoEye, fields, col_fpath)
Beispiel #44
0
 def _debian_firmware(self, target):
     """download and integrate the debian non free firmware"""
     distribution, release, _ = target.split('-')
     path = 'firmware-' + distribution + '-' + release
     dst = os.path.join(self.cfg['paths']['archives'], path)
     temp_initrd = os.path.join(self.temp, 'initrd')
     temp_firmware = os.path.join(self.temp, 'firmware')
     firmware = os.path.join(dst, 'firmware.tar.gz')
     initrd = os.path.join(self.cfg['paths']['tftpboot'], 'seedbank', target,
         'initrd.gz')
     url = self.cfg['urls']['debian_firmware'].replace('${release}', release)
     self._download(url, dst)
     utils.untar(firmware, temp_firmware)
     self._extract_debs(temp_firmware)
     utils.make_dirs(temp_initrd)
     utils.initrd_extract(temp_initrd, initrd)
     src = os.path.join(temp_firmware, 'temp', 'lib/firmware')
     dst = os.path.join(self.temp, 'initrd/lib/firmware')
     utils.file_move(src, dst)
     self._disable_usb(temp_initrd)
     utils.initrd_create(temp_initrd, initrd)
max_runtime="2-00:00"                    # Two days. Each script needs ~10-15 minutes, 30 is recommended for buffer
memory="32000"                           # 16000 might also work
submission_command="sbatch"                  # Your cluster submission command, eg sbatch, qsub
submission_args="-p russpold --qos russpold" # Does not need spaces to left and right


# Get the base and present working directory
base = get_base()
here = get_pwd()

data = os.path.abspath("%s/data" %(base))
results = os.path.abspath("%s/results" %(base))
output_folder = "%s/permutations" %results  

# Make the output directory
make_dirs(output_folder,reason="for permutation results.")

# Images by Concepts data frame
labels_tsv = "%s/concepts_binary_df.tsv" %results
images = pandas.read_csv(labels_tsv,sep="\t",index_col=0)
image_lookup = "%s/image_nii_lookup.pkl" %results

# We will need these folders to exist for job and output files
log_folders = ["%s/.out" %here,"%s/.job" %here]
make_dirs(log_folders)    

# Image metadata with number of subjects included
contrast_file = "%s/filtered_contrast_images.tsv" %results

for image1_holdout in images.index.tolist():
    print "Parsing %s" %(image1_holdout)
Beispiel #46
0
    problem = request.form['level']
    if '_' not in problem:
        return ''
    category, name = problem.split('_', 1)
    if category in problems and name in problems[category]['problems']:
        return load_code(problem, source)
    else:
        return ''

if __name__ == '__main__':
    print ' - Loading init.sql'
    init_sql_file = open(INIT_SQL, 'rb')
    init_sql_data = init_sql_file.read()
    init_sql_file.close()

    make_dirs(OUTPUT_PATH)

    with app.app_context():
        print ' -  Executing init.sql'
        get_db().executescript(init_sql_data)
        get_db().commit()
        print ' -  Execution success: init.sql'
        load_problem() # load problem

    app.jinja_env.globals.update(issolved=issolved)
    app.jinja_env.globals.update(load_instruction=load_instruction)
    app.jinja_env.globals.update(load_example=load_example)

    app.debug = True
    app.run(host='0.0.0.0', port=3333)
    window_size = 4
    num_feats=3

    base_dir = os.path.dirname(os.path.realpath(__file__))
    data_dir = os.path.join(base_dir, 'data')

    ann_dir = os.path.join(base_dir, 'annotation/coloncancer')
    plain_dir = os.path.join(base_dir, 'original')

    train_dir = os.path.join(data_dir, 'train')
    dev_dir = os.path.join(data_dir, 'dev')
    test_dir = os.path.join(data_dir, 'test')

    
    make_dirs([train_dir, dev_dir, test_dir])


    preprocess_data(os.path.join(ann_dir, "Train"), os.path.join(plain_dir, "train"), 
        train_dir, window_size, num_feats)
    
    preprocess_data(os.path.join(ann_dir, "Dev"), os.path.join(plain_dir, "dev"), 
        dev_dir, window_size, num_feats)

    ann_dir_2 = os.path.join(base_dir, 'thymedata-1.2.0-coloncancer-test-event-time/coloncancer')
    preprocess_test_data_phase2(os.path.join(plain_dir, "test"), os.path.join(ann_dir_2, "Test"), test_dir, window_size, num_feats)


    build_vocab(
        glob.glob(os.path.join(data_dir, '*/*.toks')),
        os.path.join(data_dir, 'vocab-cased.txt'),
import pandas
import shutil
import sys

if sys.version_info < (3, 0):
    from exceptions import ValueError

# Get the base and present working directory
base = get_base()
here = get_pwd()

data_directory = os.path.abspath("%s/data" %(base))
results_directory = os.path.abspath("%s/results" %(base))

folders = [data_directory,results_directory]
make_dirs(folders)

# Get all collections
collections = api.get_collections()

# Filter images to those that have a DOI
collections = collections[collections.DOI.isnull()==False]

# Useless, but might as well save it
collections.to_csv("%s/collections_with_dois.tsv" %(results_directory),encoding="utf-8",sep="\t")


# Get image meta data for collections
images = api.get_images(collection_pks=collections.collection_id.tolist())

# load list of included image IDs (curated by Poldracklab) and exclude others