示例#1
0
文件: pixel.py 项目: NealJMD/gala
def temp_dir_verify(options_parser, options, master_logger):
    """
    If a base temporary directory has been specified, make sure it exists or
    can be created.
    """
    if options.temp_dir is not None:
        util.make_dir(options.temp_dir)
示例#2
0
文件: pixel.py 项目: nagyist/gala
def temp_dir_verify(options_parser, options, master_logger):
    """
    If a base temporary directory has been specified, make sure it exists or
    can be created.
    """
    if options.temp_dir is not None:
        util.make_dir(options.temp_dir)
示例#3
0
 def create_state(self, ts, flow, isn):
     new_state = flow_state()
     new_state.flow = flow
     new_state.isn = isn
     new_state.ts = ts
     new_state.last_access = self.current_time + 1
     self.current_time += 1
     index = self.fhash(new_state.flow)
     # figure out which honeypot belongs to this flow
     hps = self.hs.getOptions()["honeypots"]
     ips = [flow.src, flow.dst]
     new_state.honeypot = [i for i in hps if i in ips][0]
     if new_state.honeypot == flow.src:
         new_state.direction = "incoming"
     else:
         new_state.direction = "outgoing"
     # path and name the file appropriately
     new_state.outdir = self.outdir % new_state.honeypot
     new_state.outdir += "/" + new_state.direction
     make_dir(new_state.outdir)
     new_state.fname = self._flow_filename(new_state)
     if index in self.flow_hash:
         tmp = self.flow_hash[index]
         new_state.next = tmp
         self.flow_hash[index] = new_state
     else:
         self.flow_hash[index] = new_state
     return new_state
示例#4
0
 def setOutdir(self, dir):
     self.outdir = dir
     self.states.setOutdir(dir)
     hps = self.hs.getOptions()["honeypots"]
     for i in hps:
         o = self.outdir % i
         make_dir(o)
示例#5
0
 def setOutdir(self, dir):
     self.outdir = dir
     self.states.setOutdir(dir)
     hps = self.hs.getOptions()["honeypots"]
     for i in hps:
         o = self.outdir % i
         make_dir(o)
示例#6
0
文件: data.py 项目: PPinto22/PROMOS
def parse_args():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('data_file', nargs='+', help='path(s) to data file(s) to be encoded and/or sampled',
                        metavar='DATA'),
    parser.add_argument('-o', '--outdir', dest='out_dir', default='.',
                        help='directory where to save output files', metavar='DIR')
    parser.add_argument('-v', '--val', dest='val', type=util.ratio, default=0, metavar='RATIO',
                        help='use this fraction of the data as a validation data-set')
    parser.add_argument('-t', '--test', dest='test', type=util.ratio, default=0, metavar='RATIO',
                        help='use this fraction of the data as a test data-set')
    parser.add_argument('-E', '--encoder', dest='encoder', metavar='FILE', default=None,
                        help='configuration file for numeric encoding. The encoding is performed over the training'
                             'data-set only. The test and validation data-sets are generated by mapping their raw'
                             'values to the corresponding codification in the training data-set')
    parser.add_argument('--id', dest='id', metavar='ID', default=None,
                        help='identifier used to name the output files (e.g., ID_train.csv, ID_val.csv, ID_test.csv)')
    parser.add_argument('--seed', dest='seed', metavar='S', type=util.uint, default=None,
                        help='specify an RNG integer seed')

    options = parser.parse_args()

    assert options.encoder is not None or options.val > 0 or options.test > 0

    util.make_dir(options.out_dir)
    if options.seed is not None:
        random.seed(options.seed)

    return options
def exact_solve(structure, system_size, fill, interaction_shape,
                interaction_radius, path):
    radius_dir_path = '{}/radius_{}'.format(path, interaction_radius)
    util.make_dir(radius_dir_path)
    start = time.time()
    prob = maxcut.initialize_problem(structure, system_size, fill,
                                     interaction_shape, interaction_radius)
    min_energy, num_ground_states, sample_ground_state, num_total_states, all_energies = classical_algorithms.BruteForce(
    ).solve(prob)
    if structure != 'free':
        with open('{}/ground_state_partitions.csv'.format(radius_dir_path),
                  'w') as output_file:
            header = sample_ground_state.keys()
            dict_writer = csv.DictWriter(output_file, header)
            dict_writer.writeheader()
            dict_writer.writerow(sample_ground_state)
    with open('{}/energies.csv'.format(radius_dir_path), 'w') as output_file:
        writer = csv.writer(output_file)
        writer.writerow(['energy'])
        min_gap = float('inf')
        prev_energy = 0.0
        for energy in sorted(all_energies):
            writer.writerow([energy])
            if energy != prev_energy:
                min_gap = min(abs(energy - prev_energy), min_gap)
                prev_energy = energy
    end = time.time()
    return {
        'radius': interaction_radius,
        'min energy': min_energy,
        '# ground states': num_ground_states,
        '# states': num_total_states,
        'min energy gap': min_gap,
        'runtime': end - start
    }
示例#8
0
 def setOutdir(self, dir):
     """
     Set output directory for IRC log
     If you just want output to stdout, don't call this function
     """
     make_dir(dir) 
     self.dir = dir
示例#9
0
 def create_state(self, ts, flow, isn):
     new_state = flow_state()
     new_state.flow = flow
     new_state.isn  = isn  
     new_state.ts   = ts
     new_state.last_access = self.current_time+1
     self.current_time +=1
     index = self.fhash(new_state.flow)
     # figure out which honeypot belongs to this flow
     hps = self.hs.getOptions()["honeypots"]
     ips = [flow.src, flow.dst]
     new_state.honeypot = [i for i in hps if i in ips][0]
     if new_state.honeypot == flow.src:
         new_state.direction = "incoming"
     else:
         new_state.direction = "outgoing"
     # path and name the file appropriately
     new_state.outdir = self.outdir % new_state.honeypot
     new_state.outdir += "/"+new_state.direction
     make_dir(new_state.outdir)
     new_state.fname = self._flow_filename(new_state)
     if index in self.flow_hash:
         tmp = self.flow_hash[index]
         new_state.next = tmp
         self.flow_hash[index] = new_state
     else:
         self.flow_hash[index] = new_state
     return new_state
示例#10
0
文件: nmt-ndp.py 项目: okigan/nmt
def mount_cloud(source: str, tmp: str = "~/tmp", mode='r') -> MountedFile:
    if mode == 'r':
        result = urllib.parse.urlparse(source)
        bucket_name = result.netloc
        key_name = result.path[1:]

        # only useful to reads
        # response = s3.head_object(Bucket=bucket_name, Key=key_name)
        # size = response['ContentLength']
        # print(size)

        prefix = os.path.dirname(key_name)

        local_dir = os.path.join(tmp, bucket_name, prefix)
        local_dir = os.path.expanduser(local_dir)
        local_dir = os.path.abspath(local_dir)
        local_file = os.path.join(local_dir, os.path.basename(key_name))

        if not os.path.exists(local_file):
            util.make_dir(local_dir)
            command = 'goofys --debug_s3 {0}:/{1} {2}'.format(
                bucket_name, prefix, local_dir)
            i = util.run(command)

        return MountedFile(local_file, mode)
    elif mode == 'w':
        import tempfile
        mktemp = tempfile.mktemp("temptocloud", dir=tmp)
        return MountedFile(mktemp, mode)
示例#11
0
 def setOutdir(self, dir):
     """
     Set output directory for IRC log
     If you just want output to stdout, don't call this function
     """
     make_dir(dir)
     self.dir = dir
示例#12
0
def parse_args():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('data_file', help='path to data file to be encoded and/or divided by windows', metavar='DATA'),
    parser.add_argument('-o', '--outdir', dest='out_dir', default='.',
                        help='directory where to save output files', metavar='DIR')
    parser.add_argument('-W', '--window', dest='width', metavar='W', type=util.ufloat, default=120,
                        help='Sliding window width (train + test) in hours')
    parser.add_argument('-w', '--test-window', dest='test_width', metavar='W', type=util.ufloat, default=24,
                        help='Test sliding window width in hours')
    parser.add_argument('-S', '--shift', dest='shift', metavar='S', type=util.ufloat, default=24,
                        help='Sliding window shift in hours')
    parser.add_argument('-E', '--encoder', dest='encoder', metavar='FILE', default=None,
                        help='configuration file for numeric encoding. The encoding is performed over the training'
                             'data-set only. The test and validation data-sets are generated by mapping their raw'
                             'values to the corresponding codification in the training data-set')
    parser.add_argument('--id', dest='id', metavar='ID', default='windows',
                        help='identifier used to name the output files '
                             '(e.g., ID_train(1).csv, ID_test(1).csv, where 1 is the window index)')
    parser.add_argument('--seed', dest='seed', metavar='S', type=util.uint, default=None,
                        help='specify an RNG integer seed')

    options = parser.parse_args()

    util.make_dir(options.out_dir)
    if options.seed is not None:
        random.seed(options.seed)

    return options
示例#13
0
def param_search(structure, system_size, fill, interaction_shape,
                 interaction_radius, ensemble, path, exact_min_energy,
                 exact_min_gap):
    radius_dir_path = '{}/radius_{}'.format(path, interaction_radius)
    util.make_dir(radius_dir_path)
    start = time.time()
    algorithm = classical_algorithms.SimulatedAnnealing()
    cooling_schedules = maxcut.get_cooling_schedules(
        problem=maxcut.initialize_problem(structure, system_size, fill,
                                          interaction_shape,
                                          interaction_radius))
    radius_sols = []
    sample_best_probs = {}
    for init_temp, cool_rate in cooling_schedules:
        radius_sols.append(
            run_trials(structure, system_size, fill, interaction_shape,
                       interaction_radius, algorithm, ensemble,
                       radius_dir_path, exact_min_energy, exact_min_gap,
                       init_temp, cool_rate, sample_best_probs))

    with open('{}/param_results.csv'.format(radius_dir_path),
              'w') as output_file:
        header = radius_sols[0].keys()
        dict_writer = csv.DictWriter(output_file, header)
        dict_writer.writeheader()
        dict_writer.writerows(radius_sols)

    opt_sol = min(radius_sols, key=lambda sol: sol['step_from_exact'])
    opt_init_temp = opt_sol['init_temp']
    opt_cool_rate = opt_sol['cool_rate']
    opt_step_from_exact = opt_sol['step_from_exact']
    opt_step_from_entropy = opt_sol['step_from_entropy']
    opt_prob_ground_state_per_run = opt_sol['prob_ground_state_per_run']

    if structure != 'free':
        sample_best_prob = sample_best_probs[(opt_init_temp, opt_cool_rate)]
        sample_best_partition_hist = sample_best_prob.get_partition_history()
        sample_best_energy_hist = sample_best_prob.get_energy_history()
        with open('{}/ground_state_partitions.csv'.format(radius_dir_path),
                  'w') as output_file:
            header = sample_best_partition_hist[0].keys()
            dict_writer = csv.DictWriter(output_file, header)
            dict_writer.writeheader()
            dict_writer.writerows(sample_best_partition_hist)
        with open('{}/ground_state_energies.csv'.format(radius_dir_path),
                  'w') as output_file:
            writer = csv.writer(output_file)
            writer.writerow(['energy'])
            for energy in sample_best_energy_hist:
                writer.writerow([energy])
    end = time.time()
    return dict(interaction_radius=interaction_radius,
                init_temp=opt_init_temp,
                cool_rate=opt_cool_rate,
                step_from_exact=opt_step_from_exact,
                step_from_entropy=opt_step_from_entropy,
                prob_ground_state_per_run=opt_prob_ground_state_per_run,
                search_runtime=(end - start),
                exact_min_energy=exact_min_energy)
 def __init__(self, keyword, save_dir):
     options = Options()
     options.add_argument("--headless")
     self.browser = webdriver.Chrome(ChromeDriverManager().install(),
                                     options=options)
     self.keyword = keyword
     self.save_dir = save_dir
     make_dir(f'./{save_dir}')
 def __init__(self, urls):
     self.sizes = [480, 960, 1366, 1920]
     self.browser_height = 1027
     self.browser = webdriver.Chrome(ChromeDriverManager().install())
     self.browser.maximize_window()
     self.urls = urls
     self.dir = 'responsive_test_result'
     make_dir(f'./{self.dir}')
示例#16
0
文件: data.py 项目: PPinto22/PROMOS
 def save(self, file_path):
     util.make_dir(file_path=file_path)
     with open(file_path, 'w') as file:
         writer = csv.writer(file, quoting=csv.QUOTE_NONNUMERIC)
         header = ([self.timestamp_label] if self.has_timestamps else []) + [self.target_label] + self.input_labels
         writer.writerow(header)
         for inputs, target, timestamp in zip(self.inputs, self.targets, self.timestamps):
             row = ([timestamp] if self.has_timestamps else []) + [target] + list(inputs)
             writer.writerow(row)
示例#17
0
def execute(in_dir, out_dir, record_id, algorithms, feature_selection, survival, oversampling, undersampling):
	'''executes the learning task on the data in in_dir with the algorithms in algorithms.
		The results are written to out_dir and subdirectories,
	    and the record_ and target_ids are used to differentiate attributes and non-attributes'''
	print ('### executing learning algorithms on... ###')
	
	# get the files
	files = util.list_dir_csv(in_dir)

	# stop if no files found
	if not files:
		print ('No appropriate csv files found. Select an input directory with appropriate files')
		return

	files_test = files

	# create directory
	util.make_dir(out_dir)

	# execute each algorithm
	for alg in algorithms:
		print ('...{}'.format(alg))
	
		util.make_dir(out_dir+'/'+alg+'/')
		results_list = []	

		# list which will contain the results
	
		# run algorithm alg for each file f
		for f, f_test in zip(files,files_test):
			fname = in_out.get_file_name(f, extension=False)
			print (' ...{}'.format(fname))
	
			# get data, split in features/target. If invalid stuff happened --> exit
			X, y, headers, target_list = in_out.import_data(f, record_id, survival) # assumption: first column is patientnumber and is pruned, last is target
			if type(X) == bool: return
	

			print ('  ...instances: {}, attributes: {}'.format(X.shape[0], X.shape[1]))

			# train model and return model and best features
			model, best_features, results = execute_with_algorithm(alg, X, y, fname, headers, out_dir+'/'+alg+'/', record_id, feature_selection, oversampling, survival, undersampling)
			results_list.append(results)

		try:
			in_out.save_ROC(out_dir+'/'+alg+'/'+"roc.png", results_list, title='ROC curve')
		except IndexError:
			pass
		
		try:
			in_out.save_ROC(out_dir+'/'+alg+'_test/'+"roc.png", results_list2, title='ROC curve')
		except NameError:
			pass

	# notify user
	print ('## Learning Finished ##')
示例#18
0
 def dump_replay_memory_images_to_disk(self, directory):
   print ">dump_replay_memory_images_to_disk", directory
   util.make_dir(directory)
   for idx in range(self.state_buffer_size):
     if idx == 0:
       pass  # dummy zero state
     elif idx in self.state_free_slots:
       print "idx", idx, "in free slots; ignore"
     else:
       with open("%s/%05d.png" % (directory, idx), "wb") as f:
         f.write(util.rgb_to_png_bytes(self.state[idx]))
示例#19
0
 def archive(self, data_type, file_names):
     ds = datetime.now().strftime("%Y-%m-%d")
     ts = datetime.now().strftime("%Y-%m-%dT%H%M%S")
     ark_dir = "{}/archive/{}/{}".format(self.data_dir, data_type, ds)
     make_dir(ark_dir)
     files = [file_names] if type(file_names) is not list else file_names
     for file in files:
         _, naked_file_name, stem, ext = parse_file_name(file)
         ark_file_name = "{}/{}--{}.{}".format(ark_dir, stem, ts, ext)
         copyfile(file, ark_file_name)
         self.log.write("INFO [{}] {} archived to {}".format(
             data_type, naked_file_name, ark_file_name))
示例#20
0
def write_data(base_dir, categories):
    '''
    Writes the text data in according to the
    above defined category document structure.
    '''
    util.make_dir(base_dir)
    for cat in categories:
        util.make_dir(base_dir + '/' + cat)
        for doc in categories[cat]:
            new = '{}/{}/{}'.format(base_dir, cat, doc + '.txt')
            util.write_file(new, categories[cat][doc].encode('utf-8'))

    return True
示例#21
0
    def archive_v1(self,
                   data_type,
                   json_file_name,
                   csv_file_name,
                   copy_to_archive=True,
                   copy_to_current=True,
                   copy_to_master=True,
                   excelize=False,
                   xlsx_formats=None):

        if copy_to_archive:
            # make json/csv copies for archive
            ds = datetime.now().strftime("%Y-%m-%d")
            ts = datetime.now().strftime("%Y-%m-%dT%H%M%S")
            ark_dir = "{}/archive/{}/{}".format(self.data_dir, data_type, ds)
            make_dir(ark_dir)
            ark_file_name = "{}/{}--{}.json".format(ark_dir, data_type, ts)
            copyfile(json_file_name, ark_file_name)
            self.log.write("INFO [{}] JSON archived to {}".format(
                data_type, ark_file_name))
            ark_file_name = "{}/{}--{}.csv".format(ark_dir, data_type, ts)
            copyfile(csv_file_name, ark_file_name)
            self.log.write("INFO [{}] CSV archived to {}".format(
                data_type, ark_file_name))

        if copy_to_current:
            # copy csv to current
            target_file_name = csv_file_name.replace("/processing/default/",
                                                     "/current/")
            if os.path.exists(target_file_name):
                os.remove(target_file_name)
            copyfile(csv_file_name, target_file_name)
            self.log.write("INFO [{}] CSV copied to {}".format(
                data_type, target_file_name))
            if excelize:
                # make xlsx copy
                formats = [] if xlsx_formats is None else xlsx_formats
                xlsx_file_name = csv_2_xlsx(target_file_name, "", formats)
                self.log.write("INFO [{}] CSV re-saved as XLSX {})".format(
                    data_type, xlsx_file_name))

        if copy_to_master:
            # copy csv to master
            target_file_name = csv_file_name.replace("/processing/default/",
                                                     "/master/")
            if os.path.exists(target_file_name):
                os.remove(target_file_name)
            copyfile(csv_file_name, target_file_name)
            self.log.write("INFO [{}] CSV copied to {}".format(
                data_type, target_file_name))
示例#22
0
def set_dirs(PATHS):
  MOVEEXEC = True
  PATHS['BUILD'] = 'SRC'
  PATHS['PROB'] = os.getcwd()
  for n in range(len(sys.argv)):
    if sys.argv[n] == '-dir' and n < len(sys.argv) - 1:
      MOVEEXEC = False
      PATHS['BUILD'] = util.sanitize_path(sys.argv[n+1])
  PATHS['SRC'] = os.path.join(PATHS['BUILD'], 'source','')

  for key in list(PATHS):
    if PATHS[key][0] != '/':
      PATHS[key] = os.path.join(PATHS['PROB'], PATHS[key], '')
    util.make_dir(PATHS[key])

  return MOVEEXEC
示例#23
0
def main():
    usage = 'usage: %prog [options] <motifs> <cell_line>'
    parser = OptionParser(usage)
    (options, args) = parser.parse_args()

    if len(args) != 2:
        parser.error('Must provide motifs and cell line.')
    else:
        motifs = args[0].split(',')
        cell_line = int(args[1])

    print('Processing')
    print(motifs)
    # load and get model layer
    run_path = 'paper_runs/new_models/32_res/run-20211023_095131-w6okxt01'
    layer = -3
    model, bin_size = read_model(run_path, compile_model=False)
    aux_model = tf.keras.Model(inputs=model.inputs,
                               outputs=model.layers[layer].output)
    output_dir = util.make_dir('paper_GIA_csvs')
    # load and threshold data
    testset, targets = tfr_evaluate.collect_whole_testset(coords=True)
    C, X, Y = util.convert_tfr_to_np(testset, 3)
    for testset_type in [
            'all threshold', 'cell line low coverage',
            'cell line high coverage'
    ]:
        print(testset_type)
        selected_X = select_set(testset_type, C, X, Y, cell_line=cell_line)
        gi = quant_GIA.GlobalImportance(model, targets)
        gi.occlude_all_motif_instances(selected_X, motifs, func='mean')
        df = gi.summary_remove_motifs[0]
        file_prefix = '{}_in_{}_{}'.format(df['motif pattern'].values[0],
                                           targets[cell_line], testset_type)
        df.to_csv(os.path.join(output_dir, file_prefix + 'csv'), index=None)
示例#24
0
    def initUI(self):
        self.ui.setWindowTitle('Visualization')
        self.ui.show()

        make_dir("./results")

        logTextBox = QTextEditLogger(self.plainTextEdit)
        # You can format what is printed to text box
        logTextBox.setFormatter(
            logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
        logging.getLogger().addHandler(logTextBox)
        # You can control the logging level
        logging.getLogger().setLevel(logging.DEBUG)

        self.input_btn.clicked.connect(self.get_input_image)
        self.cls_btn.clicked.connect(self.selc_cls)
        self.start_btn.clicked.connect(self.start)
示例#25
0
def categorize_20newsgroup(base_dir):
    corpus = \
    fetch_20newsgroups(subset='train',
                       remove=('headers', 'footers', 'quotes'))

    util.make_dir(base_dir)

    for cat in range(20):
        util.make_dir('{}/{}'.format(base_dir, 'c' + str(cat)))

    for index in range(len(corpus.data)):
        new = '{}/{}/{}'.format(base_dir,
                                'c' + str(list(corpus.target)[index]),
                                str(index) + '.txt')

        util.write_file(new, corpus.data[index].encode('utf-8'))

    return True
示例#26
0
    def setup_directories(self):
        if self.local_dir and 'symlink' == self.link_type:
            if not os.path.islink(self.package_dir) and os.path.isdir(self.package_dir):
                shutil.rmtree(self.package_dir)
            util.symlink(self.package_dir, self.local_dir)
        else:
            if os.path.islink(self.package_dir):
                os.unlink(self.package_dir)
            util.make_dir(self.package_dir)

        if self.version_dir:
            if os.path.islink(self.version_package_dir) or os.path.isfile(self.version_package_dir):
                os.unlink(self.version_package_dir)
            elif os.path.isdir(self.version_package_dir):
                shutil.rmtree(self.version_package_dir)
            if 'symlink' == self.link_type:
                util.symlink(self.version_package_dir, os.path.relpath(self.package_dir, self.version_dir))
            else: # hardlink
                util.make_dir(self.version_package_dir)
示例#27
0
def inference(cf, data_path, USE_CUDA):
    checkpoints_dir = cf['data']['checkpoints_dir']
    checkpoints_dir = make_dir(checkpoints_dir, cf)
    files = os.listdir(data_path)
    for f in files:
        if f[-3:] == "mp4" and "no_ois" not in f and "no_shutter" not in f and "gimbal" not in f.lower(
        ) and "grid" not in f.lower() and "flo" not in f.lower():
            video_name = f[:-4]

    # Define the model
    model = Model(cf)
    load_model = cf["model"]["load_model"]

    print("------Load Pretrined Model--------")
    if load_model is not None:
        checkpoint = torch.load(load_model)
        print(load_model)
    else:
        load_last = os.path.join(checkpoints_dir,
                                 cf['data']['exp'] + '_last.checkpoint')
        checkpoint = torch.load(load_last)
        print(load_last)
    model.net.load_state_dict(checkpoint['state_dict'])
    model.unet.load_state_dict(checkpoint['unet'])

    if USE_CUDA:
        model.net.cuda()
        model.unet.cuda()

    print("-----------Load Dataset----------")
    test_loader = get_inference_data_loader(cf, data_path, no_flo=False)
    data = test_loader.dataset.data[0]

    start_time = time.time()
    virtual_queue = run(model, test_loader, cf, USE_CUDA=USE_CUDA)

    virtual_data = np.zeros((1, 5))
    virtual_data[:, 1:] = virtual_queue[0, 1:]
    virtual_data[:, 0] = data.frame[0, 0]
    virtual_queue = np.concatenate((virtual_data, virtual_queue), axis=0)

    print(virtual_queue.shape)
    time_used = (time.time() - start_time) / 60

    print("Time_used: %.4f minutes" % (time_used))

    virtual_path = os.path.join("./test", cf['data']['exp'],
                                data_path.split("/")[-1] + '.txt')
    np.savetxt(virtual_path, virtual_queue, delimiter=' ')

    print("------Start Warping Video--------")
    grid = get_grid(test_loader.dataset.static_options, \
        data.frame[:data.length], data.gyro, data.ois, virtual_queue[:data.length,1:], no_shutter = False)
    return data, virtual_queue, video_name, grid
示例#28
0
def add_dir(item: UpdateDir, q: str = Query(None)):
    safe_path = safe_path_join(path=q, root=os.environ["ROOT_DIR"])
    dir_path = util.make_dir(safe_path, item.name)
    items = util.get_dir(dir_path)
    items_json_encoded = jsonable_encoder(items)
    resp = {
        "path": get_host_path(dir_path),
        "count": len(items),
        "items": items_json_encoded,
    }
    return JSONResponse(content=resp)
示例#29
0
def main():
    """Set everything off and handle files/stdin etc"""
    print_help, options, args = parseOptions()
    if len(sys.argv)>1:
        if options['honeypots'] is None:
            print "No honeypots specified. Please use either -H or config file to specify honeypots.\n"
            sys.exit(2)
        hsingleton = HoneysnapSingleton.getInstance(options)
        if not os.path.exists(options['output_data_directory']):
            make_dir(options['output_data_directory'])
        if os.path.exists(options['output_data_directory']):
            for i in options["honeypots"]:
                make_dir(options["output_data_directory"]+"/"+i)
        # by default treat args as files to be processed
        # handle multiple files being passed as args
        if len(args):
            for f in args:
                if os.path.exists(f) and os.path.isfile(f): 
                    processFile(f)
                else:
                    print "File not found: %s" % f
                    sys.exit(2)
        # no args indicating files, read from stdin
        else:
            # can't really do true stdin input, since we repeatedly parse
            # the file, so create a tempfile that is read from stdin
            # pass it to processFile
            fh = sys.stdin
            tmph, tmpf = tempfile.mkstemp()
            tmph = open(tmpf, 'wb')
            for l in fh:
                tmph.write(l)
            tmph.close()
            processFile(tmpf)
            # all done, delete the tmp file
            os.unlink(tmpf)
        cleanup(options)
    else:
        print_help()  
示例#30
0
def main():
    """Set everything off and handle files/stdin etc"""
    print_help, options, args = parseOptions()
    if len(sys.argv) > 1:
        if options['honeypots'] is None:
            print "No honeypots specified. Please use either -H or config file to specify honeypots.\n"
            sys.exit(2)
        hsingleton = HoneysnapSingleton.getInstance(options)
        if not os.path.exists(options['output_data_directory']):
            make_dir(options['output_data_directory'])
        if os.path.exists(options['output_data_directory']):
            for i in options["honeypots"]:
                make_dir(options["output_data_directory"] + "/" + i)
        # by default treat args as files to be processed
        # handle multiple files being passed as args
        if len(args):
            for f in args:
                if os.path.exists(f) and os.path.isfile(f):
                    processFile(f)
                else:
                    print "File not found: %s" % f
                    sys.exit(2)
        # no args indicating files, read from stdin
        else:
            # can't really do true stdin input, since we repeatedly parse
            # the file, so create a tempfile that is read from stdin
            # pass it to processFile
            fh = sys.stdin
            tmph, tmpf = tempfile.mkstemp()
            tmph = open(tmpf, 'wb')
            for l in fh:
                tmph.write(l)
            tmph.close()
            processFile(tmpf)
            # all done, delete the tmp file
            os.unlink(tmpf)
        cleanup(options)
    else:
        print_help()
def main(args=None):
    config_file = args.config
    dir_path = args.dir_path
    cf = yaml.load(open(config_file, 'r'))

    USE_CUDA = cf['data']["use_cuda"]

    checkpoints_dir = cf['data']['checkpoints_dir']
    checkpoints_dir = make_dir(checkpoints_dir, cf)

    data_name = sorted(os.listdir(dir_path))
    for i in range(len(data_name)):
        print("Running: " + str(i + 1) + "/" + str(len(data_name)))
        inference(cf, os.path.join(dir_path, data_name[i]), USE_CUDA)
    return
示例#32
0
def main():
    rstDic = readResultCSV()
    predictDic = readPredictResult()
    ac_num = 0  #正确检测分屏数量
    split_num = 0  #分屏的数量
    lost_num = 0  #漏掉检测分屏的数量
    error_num = 0  #错误检测为分屏的数量
    total_num = len(predictDic.keys())
    no_split_num = 0  #没分屏数量
    no_split_right_num = 0  #没分屏预测正确

    path_1_1 = os.path.join(TO_PATH, '1_1')
    path_1_0 = os.path.join(TO_PATH, '1_0')
    path_0_1 = os.path.join(TO_PATH, '0_1')
    path_0_0 = os.path.join(TO_PATH, '0_0')
    util.make_dir(path_1_1)
    util.make_dir(path_1_0)
    util.make_dir(path_0_1)
    util.make_dir(path_0_0)

    for key in predictDic.keys():
        frompath = os.path.join(ORI_PATH, key)
        if int(rstDic[key]) == 2:
            split_num += 1
            if int(predictDic[key]) == 1:
                ac_num += 1
                util.run_command('cp %s %s' % (frompath, path_1_1))
            else:
                lost_num += 1
                util.run_command('cp %s %s' % (frompath, path_1_0))

        else:
            no_split_num += 1
            if int(predictDic[key]) == 1:
                error_num += 1
                util.run_command('cp %s %s' % (frompath, path_0_1))
            else:
                no_split_right_num += 1
                #util.run_command('cp %s %s' % (frompath, path_0_0))

    util.log('tatal:%d accuracy: %d  lost: %d  error: %d right_nosplit:%d' %
             (total_num, ac_num, lost_num, error_num, no_split_right_num))
示例#33
0
sys.path.append('../')
import util

ratio_step = 10
rounds = 5
ratios = [r / 100 for r in range(0, 100 + ratio_step, ratio_step)]
# for the main program
iterations = list(itertools.product(*[ratios, [1.0], range(rounds)]))[:11]
model_name = 'nn'

dataframe = util.load_wildlab_df()
columns = [c for c in util.LABELS if c in dataframe.columns]
dataframe = dataframe[columns]
res_dir = '{}/exp-labDiffPackedBenign/{}'.format(exp_util.RES_ROOT, model_name)

util.make_dir(res_dir)
database = '{}/exp.db'.format(res_dir)

n_workers = 1
cores_per_worker = -1

sizes = dict(training_ratio=0.7,
             testing_packed_benign_ratio=0.5,
             testing_packed_malicious_ratio=1)


def process_dataset(df, seed):
    '''
    Process the entire dataset just one time to save memory
    param df pandas dataframe
    :rtype: Tuple(pandas.dataframe)
示例#34
0
        label.set_fontproperties(font)
    fig.savefig(dir_name + "{}.png".format(file_name))
    plt.close()


if __name__ == '__main__':
    args = sys.argv
    sns.set()

    if len(args) == 2:
        dir_png_name = "png/" + args[1] + "/"
        dir_csv_name = "csv/" + args[1] + "/"
    else:
        dir_png_name = "png/"
        dir_csv_name = "csv/"
    make_dir("/" + dir_png_name)
    make_dir("/" + dir_csv_name)

    rewards = pd.read_csv(dir_csv_name + "rewards.csv")
    regrets = pd.read_csv(dir_csv_name + "regrets.csv")
    notGreedyCounts = pd.read_csv(dir_csv_name + "notGreedyCounts.csv")

    agent_rewards = dfToNumpy(rewards)
    agent_regrets = dfToNumpy(regrets)
    agent_notGreedyCounts = dfToNumpy(notGreedyCounts)

    # print(agent_rewards)

    result_plot(agent_rewards["times"],
                agent_rewards,
                dir_png_name,
np.set_printoptions(precision=5, threshold=10000, suppress=True, linewidth=10000)

parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--width', type=int, default=160, help="render width")
parser.add_argument('--height', type=int, default=120, help="render height")
parser.add_argument('--input-dir', type=str, default="runs/14/d/imgs/ep_00007")
parser.add_argument('--output-dir', type=str, default="/tmp")
agents.add_opts(parser)
models.add_opts(parser)
replay_memory.add_opts(parser)
util.add_opts(parser)
opts = parser.parse_args()
#opts.ckpt_dir = "runs/14/d/ckpts"  # last known good
print >>sys.stderr, "OPTS", opts

util.make_dir(opts.output_dir)

# init our rl_agent
agent_cstr = eval("agents.NafAgent")
agent = agent_cstr(opts)
an = agent.network

# prepare three plots; one for each of block on left, in center, or on right
R = np.arange(-1, 1.25, 0.25)
X, Y = np.meshgrid(R, R)

for img_file in sorted(os.listdir(opts.input_dir)):
  # prep background; img will be on left, surface on right
  background = Image.new('RGB',
                         (10+320+10+320+10, 10+240+10),
                         (0, 0, 0))
示例#36
0
 def setOutdir(self, dir):
     make_dir(dir)
     self.outdir = dir
     self.fp = open(dir + "/socks.txt", "w")
示例#37
0
 def setOutdir(self, dir):
     make_dir(dir)
     self.fp = open(dir + "/sebek.txt", "a")
示例#38
0
 def setOutdir(self, dir):
     make_dir(dir)             
     if self.direction == "queried":   
         self.fp = open(dir + "/dns_queries.txt", "a")
     else:                          
         self.fp = open(dir + "/dns_served.txt", "a")
示例#39
0
 def setOutdir(self, dir):
     self.outdir = dir
     make_dir(dir)
示例#40
0
def processFile(file):
    """
    Process a pcap file.
    file is the pcap file to parse
    This function will run any enabled options for each pcap file
    """
    hs = HoneysnapSingleton.getInstance()
    options = hs.getOptions()

    tmpf, deletetmp = check_pcap_file(file)
    options["tmpf"] = tmpf

    try:
        if options["filename"] is not None:
            out = rawPathOutput(options["filename"], mode="a+")
        else:
            out = outputSTDOUT()
    except IOError:
        # we have some error opening the file
        # there is something at that path. Is it a directory?
        if not os.path.isdir(options["output_data_directory"]):
            print "Error: output_data_directory exists, but is not a directory."
        else:
            print "Unknown Error creating output file"
            sys.exit(1)

    out("\n\nAnalysing file: %s\n\n" % file)

    if options["do_pcap"] == "YES":
        out("Pcap file information:\n")
        pi = pcapInfo(tmpf)
        pi.setOutput(out)
        pi.getStats()
        myout = rawPathOutput(options["output_data_directory"] +"/pcapinfo.txt")
        pi.setOutput(myout)
        pi.getStats()
        out("\n")

    if options["do_packets"] == "YES":
        out("\nIP packet summary for common ports:\n\n")
        out("%-40s %10s\n" % ("Filter", "Packets"))
        filters = setFilters(options)
        for i in filters:
            key, filt = i
            out(key+"\n")
            for hp in options['honeypots']:
                p = pcap.pcap(tmpf)
                c = Counter(p)
                c.setOutput(out)   
                f = filt % hp
                c.setFilter(f)
                c.count()
                del p
            out("\n")

    if options["do_incoming"] == "YES":
        for hp in options["honeypots"]:
            out("Counting incoming connections for %s\n" % hp)
            outdir = options["output_data_directory"] + "/%s/conns" % hp
            make_dir(outdir)
            p = pcap.pcap(tmpf)
            s = Summarize(p)
            filt = 'dst host %s' % hp
            s.setFilter(filt)
            s.start()
            fileout = rawPathOutput(outdir+"/incoming.txt", mode="a")
            if options["print_verbose"] == "YES":
                outputs = (fileout, out)
            else:
                outputs = (fileout,)
            for output in outputs:
                s.setOutput(output)
                s.doOutput("\nIncoming connections for %s\n" % hp)
                s.writeResults(limit=options["flow_count_limit"])
            del p


    if options["do_outgoing"] == "YES":
        for hp in options["honeypots"]:
            out("\nCounting outgoing connections for %s\n" % hp)
            outdir = options["output_data_directory"] + "/%s/conns" % hp
            make_dir(outdir)
            p = pcap.pcap(tmpf)
            s = Summarize(p)
            filt = 'src host %s' % hp
            s.setFilter(filt)
            s.start()
            fileout = rawPathOutput(outdir+"/outgoing.txt", mode="a")
            if options["print_verbose"] == "YES":
                outputs = (fileout, out)
            else:
                outputs = (fileout,)
            for output in outputs:
                s.setOutput(output)
                s.doOutput("\nOutgoing connections for %s\n" % hp)
                s.writeResults(limit=options["flow_count_limit"])
            del p

    if options["do_dns"] == "YES":
        out("\nExtracting DNS data to file\n\n")
        for hp in options["honeypots"]:
            #out("\nHoneypot %s\n\n" % hp)
            dns = dnsDecode(hp, direction="queried")
            dns.setOutdir(options["output_data_directory"] + "/%s/dns" % hp)
            dns.setOutput(out)
            dns.run()
            del dns
            dns = dnsDecode(hp, direction="served")
            dns.setOutdir(options["output_data_directory"] + "/%s/dns" % hp)
            dns.setOutput(out)
            dns.run()
            del dns

    if options["do_telnet"] == "YES":
        out("\nExtracting telnet data to file\n")
        for hp in options["honeypots"]:
            #out("\nHoneypot %s\n\n" % hp)
            tel = telnetDecode(hp)
            tel.setOutdir(options["output_data_directory"] + "/%s/telnet" % hp)
            tel.setOutput(out)
            tel.run()
            del tel

    if options["do_irc"] == "YES":
        """
        Here we will use PcapRE to find packets on irc_port with "PRIVMSG"
        in the payload. 
        """
        for hp in options["honeypots"]:
            out("\nLooking for packets containing PRIVMSG for %s\n\n" % hp)
            p = pcap.pcap(tmpf)
            r = pcapReCounter(p)
            r.setFilter("host %s and tcp" % hp)
            r.setRE('PRIVMSG')
            r.setOutput(out)
            r.start()                         
            r.writeResults() 
            for port in r.server_ports(options['irc_ports'][hp]):
                if port not in options['irc_ports'][hp]:    
                    if port==80:
                        out("\nSaw PRIVMSG on port 80, but cowardly not adding it to IRC port list - check manually\n")
                    else:
                        out("\nAdding port %s to irc list for %s\n" % (port, hp)) 
                        options['irc_ports'][hp].add(port) 
            del p
            del r

        out("\nAnalysing IRC\n")
        for hp in options["honeypots"]:
            outdir = options["output_data_directory"] + "/%s/irc" % hp
            for port in options["irc_ports"][hp]:
                out("\nHoneypot %s, port %s\n\n" % (hp, port))
                hirc = HoneySnapIRC()
                hirc.connect(tmpf, "host %s and tcp and port %s" % (hp, port))
                hd = ircDecode(hp)
                hd.setOutput(out)
                hd.setOutdir(outdir)
                hd.setOutfile('irclog-%s.txt' % port)
                hirc.addHandler("all_events", hd.decodeCB, -1)
                hirc.ircobj.add_global_handler("all_events", hd.printLines, -1)
                hirc.ircobj.process_once()
                hd.printSummary()
                del hd
                del hirc

    if options["all_flows"] == "YES":
        out("\nExtracting all flows\n")
        p = pcap.pcap(tmpf)
        de = tcpflow.tcpFlow(p)
        filt = "host "
        filt += " or host ".join(options["honeypots"])
        de.setFilter(filt)
        de.setOutdir(options["output_data_directory"]+ "/%s/flows")
        de.setOutput(out)
        de.start()
        de.dump_extract()
        del de
        del p

    if options["do_http"] == "YES":
        out("\nExtracting from HTTP\n\n")
        p = pcap.pcap(tmpf)
        de = tcpflow.tcpFlow(p)
        de.setFilter("port 80")
        de.setOutdir(options["output_data_directory"]+ "/%s/http")
        de.setOutput(out)
        decode = httpDecode.httpDecode()
        decode.setOutput(out)
        de.registerPlugin(decode.decode)
        de.start()
        de.dump_extract()
        decode.print_summary()
        del de
        del p


    if options["do_ftp"] == "YES":
        out("\nExtracting from FTP\n\n")
        p = pcap.pcap(tmpf)
        de = tcpflow.tcpFlow(p)
        de.setFilter("port 20 or port 21")
        de.setOutdir(options["output_data_directory"] + "/%s/ftp")
        de.setOutput(out)
        decode = ftpDecode.ftpDecode()
        decode.setOutput(out)
        de.registerPlugin(decode.decode)
        de.start()
        de.dump_extract()
        decode.print_summary() 
        del de
        del p

    if options["do_smtp"] == "YES":
        out("\nExtracting from SMTP\n\n")
        p = pcap.pcap(tmpf)
        de = tcpflow.tcpFlow(p)
        de.setFilter("port 25")
        de.setOutdir(options["output_data_directory"] + "/%s/smtp")
        de.setOutput(out)
        decode = smtpDecode.smtpDecode()
        decode.setOutput(out)
        de.registerPlugin(decode.decode)
        de.start()
        de.dump_extract()
        decode.print_summary() 
        del de
        del p

    if options["do_sebek"] == "YES":
        out("\nExtracting Sebek data\n")
        for hp in options["honeypots"]:
            out("\nHoneypot %s\n\n" % hp)
            sbd = sebekDecode(hp)
            sbd.setOutdir(options["output_data_directory"] + "/%s/sebek" % hp)
            sbd.setOutput(out)
            sbd.run()
            sbd.print_summary()
            del sbd

    if options["do_socks"] == "YES":
        out("\nExtracting Socks proxy information:\n")
        for hp in options["honeypots"]:
            out("\nHoneypot %s\n\n" % hp)
            p = pcap.pcap(tmpf)
            socks = SocksDecode(p,hp)
            socks.setOutdir(options["output_data_directory"] + "/%s/socks" % hp)
            socks.setOutput(out)
            socks.start()

    # delete the tmp file we used to hold unzipped data
    if deletetmp:
        os.unlink(tmpf)
示例#41
0
 def insert(self, key, attrdict):
     util.make_dir(self._key_dir(key))
     self._write_attributes(key, attrdict)
     self._hg(["commit", "-m", "Added entry %s." % key])
示例#42
0
sys.path.insert(0, '../script/')
sys.path.insert(0, '../script/analysis/')
import util
import hdf5_to_dict as io
TMP_DIR = 'TMP'
TMP_BUILD = 'build_tmp.py'
util.safe_remove(TMP_DIR)

AUTO = False
for arg in sys.argv:
  if arg == '-auto':
    AUTO = True

RES = [16, 32, 64]#, 128]

util.make_dir(TMP_DIR)
os.chdir('../prob/mhdmodes2d/')

copyfile('build.py', TMP_BUILD)
# COMPILE CODE AT MULTIPLE RESOLUTIONS USING SEPARATE BUILD FILE

for n in xrange(len(RES)):
  util.change_cparm('N1TOT', RES[n], TMP_BUILD)
  util.change_cparm('N2TOT', RES[n], TMP_BUILD)
  call(['python', TMP_BUILD, '-dir', TMP_DIR])
  call(['cp', os.path.join(os.getcwd(), TMP_DIR, 'bhlight'),
        '../../test/' + TMP_DIR + '/bhlight_' + str(RES[n])])
copyfile(os.path.join(os.getcwd(), TMP_DIR, 'param_template.dat'), '../../test/' + 
         TMP_DIR + '/param_template.dat')
util.safe_remove(TMP_BUILD)
util.safe_remove(TMP_DIR)
示例#43
0
            o = self.outdir % i
            make_dir(o)

    def setOutput(self, file):
        self.outfile = file

    def dump_extract(self):  
        for s in self.states.getFlowStates():
            s.close()
            for func in self.plugins:
                func(s, self.states)

    def writeResults(self):
        """TODO: I would like to implement some sort of summarization
        of the data files that were written during the run...
        """
        pass

if __name__ == "__main__":                               
    # for testing. Edit suitably
    import sys                                          
    options = { 'honeypots':['192.168.0.1', '192.168.0.2'] }
    hsingleton = HoneysnapSingleton.getInstance(options)
    f = sys.argv[1]
    pcapObj = pcap.pcap(f)
    tflow = tcpFlow(pcapObj)  
    make_dir ('output')
    tflow.setOutdir("output/%s/")
    tflow.setFilter("not port 445")
    tflow.start()
示例#44
0
 def make_output_folders(self):
     util.make_dir(self.output_path)
     for pth in self._output_folders:
         util.make_dir(pth)
示例#45
0
 def __init__(self):
     make_dir(self.RES_DIR)
     make_dir(self.WALLPAPER_DIR)
     make_dir(self.LARGE_DIR    )
     pass