コード例 #1
0
ファイル: pusher.py プロジェクト: obsrvbl/ona
    def _create_archives(self, D_archive):
        """
        Given `D_archive`, a dictionary whose keys are datetime objects
        representing 10-minute bins and whose values are lists of files,
        create one archive per completed bin and then delete the files.
        """
        create_dirs(self.output_dir)

        # Don't touch the most recent 10-minute bin; it may still be active
        file_bins = sorted(D_archive.iterkeys())[:-1]
        for key in file_bins:
            file_list = D_archive[key]

            # Process the files before archiving
            self._process_files(file_list)

            # Create the file archive
            prefix = format(key, self.file_fmt)
            archive_name = '{}.{}'.format(prefix, self.api.hostname)
            archive_path = join(self.output_dir, archive_name)
            logging.info('Creating archive %s', archive_name)
            self._archive_files(file_list, archive_path)

            # Remove the now-archived files
            for file_path in file_list:
                self._remove_file(file_path)
コード例 #2
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([
        config['result_dir'], config['checkpoint_dir'],
        config['checkpoint_dir_lstm']
    ])
    # save the config in a txt file
    save_config(config)
    sess_centralized = tf.Session(config=tf.ConfigProto())
    data = DataGenerator(config)
    model_vae = VAEmodel(config, "Centralized")
    model_vae.load(sess_centralized)
    trainer_vae = vaeTrainer(sess_centralized, model_vae, data, config)
    # here you train your model
    if config['TRAIN_VAE']:
        if config['vae_epochs_per_comm_round'] > 0:
            trainer_vae.train()

    if config['TRAIN_LSTM']:
        # create a lstm model class instance
        lstm_model = lstmKerasModel("Centralized", config)

        # produce the embedding of all sequences for training of lstm model
        # process the windows in sequence to get their VAE embeddings
        lstm_model.produce_embeddings(model_vae, data, sess_centralized)

        # Create a basic model instance
        lstm_nn_model = lstm_model.lstm_nn_model
        lstm_nn_model.summary()  # Display the model's architecture
        # checkpoint path
        checkpoint_path = lstm_model.config['checkpoint_dir_lstm']\
                                        + "cp_{}.ckpt".format(lstm_model.name)
        # Create a callback that saves the model's weights
        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_path, save_weights_only=True, verbose=1)
        # load weights if possible
        # lstm_model.load_model(lstm_nn_model, config, checkpoint_path)

        # start training
        if config['lstm_epochs_per_comm_round'] > 0:
            lstm_model.train(lstm_nn_model, cp_callback)

    sess_centralized.close()
コード例 #3
0
    def calibrate(self, calibration_dir):
        create_dirs([self.debug_dir])
        self.calibration_dir = calibration_dir

        # Load saved calibration parameters.
        try:
            self.load()
            return
        except:
            pass

        # Get calibration images.
        img_paths = sorted(glob(join(self.calibration_dir, '*')))

        # Prepare object points.
        objpoint = np.zeros((self.nx * self.ny, 3), np.float32)
        objpoint[:, :2] = np.mgrid[0:self.ny, 0:self.nx].T.reshape(-1, 2)

        # Need to match 3D coordinates to 2D image coordinates.
        objpoints = []  # 3d coordinates.
        imgpoints = []  # 2d coordinates.

        # Corner extraction termination criteria.
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                    0.001)

        corners_dims = (self.ny, self.nx)
        for img_path in img_paths:
            img = cv2.imread(img_path)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, corners_dims, None)

            # If found, add object points, image points (after refining them)
            if ret is True:
                corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                           criteria)
                imgpoints.append(corners)
                objpoints.append(objpoint)

                # Draw and display the corners
                img = cv2.drawChessboardCorners(img, corners_dims, corners,
                                                ret)

        ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
            objpoints, imgpoints, gray.shape[::-1], None, None)
        self.calib_params = {'mtx': mtx, 'dist': dist}
        self.save()

        print('[d] Successfully calibrated camera params to', self.debug_dir)
コード例 #4
0
def main():
    """Runs the main deep learning pipeline."""
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print('Missing or invalid arguments.')
        exit(0)

    print('Create experiment directories.')
    create_dirs([
        config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir
    ])

    # TODO Refactor this
    print('Create partitions and labels.')
    partition = {}
    all_ids = [
        filename.split('.')[0] for filename in os.listdir('data')
        if filename.endswith('.npy')
    ]
    partition['train'] = all_ids[50:]
    partition['validation'] = all_ids[:50]

    labels_ids = [
        filename.split('.')[0] for filename in os.listdir('data')
        if filename.endswith('.npy')
    ]
    labels_values = [1 if 'swipe_positive_right' in filename \
                     else -1 if 'swipe_positive_left' in filename \
                     else 0 for filename in os.listdir('data') if filename.endswith('.npy')]
    labels = dict(zip(labels_ids, labels_values))

    print('Create the training and validation data generators.')
    training_generator = DeepSwipeDataGenerator(config, partition['train'],
                                                labels)
    validation_generator = DeepSwipeDataGenerator(config,
                                                  partition['validation'],
                                                  labels)
    data_generator = (training_generator, validation_generator)

    print('Create the model.')
    model = DeepSwipeModel(config)

    print('Create the trainer')
    trainer = DeepSwipeTrainer(model.model, data_generator, config)

    print('Start training the model.')
    trainer.train()
コード例 #5
0
ファイル: tcpdump_capturer.py プロジェクト: obsrvbl/ona
    def __init__(self, *args, **kwargs):
        self.bpf_filter = kwargs.pop('bpf_filter')
        self.data_type = kwargs['data_type']
        self.capture_iface = kwargs.pop('capture_iface')
        self.capture_seconds = kwargs.pop('capture_seconds')
        self.pcap_dir = kwargs.pop('pcap_dir')
        self.pps_limit = kwargs.pop('pps_limit')

        if (self.capture_iface == 'any') and ('linux' not in platform):
            self.capture_iface = None

        self.capture_process = None
        create_dirs(self.pcap_dir)
        self.packet_limit = self.capture_seconds * self.pps_limit

        super(TcpdumpCapturer, self).__init__(*args, **kwargs)
コード例 #6
0
def restore_snapshot(folder: Path, json_path: Path):
    snapshot = json.load(str(json_path))
    current_files = {file.name: file for file in folder.rglob("*")}
    missing_files = {
        filename for filename, filepath in snapshot["files"].items()
        if filename not in current_files
    }
    if missing_files:
        print("The following files are missing:")
        print(*missing_files, sep="\n")
    # TODO: only create deepest dirs to avoind unnecessary IO
    create_dirs(snapshot["folders"].values())
    for filename, snap_filepath in snapshot["files"].items():
        if filename in missing_files:
            continue
        move(current_files[filename], snap_filepath)
コード例 #7
0
    def __init__(self, *args, **kwargs):
        self.bpf_filter = kwargs.pop('bpf_filter')
        self.data_type = kwargs['data_type']
        self.capture_iface = kwargs.pop('capture_iface')
        self.capture_seconds = kwargs.pop('capture_seconds')
        self.pcap_dir = kwargs.pop('pcap_dir')
        self.pps_limit = kwargs.pop('pps_limit')

        if (self.capture_iface == 'any') and ('linux' not in platform):
            self.capture_iface = None

        self.capture_process = None
        create_dirs(self.pcap_dir)
        self.packet_limit = self.capture_seconds * self.pps_limit

        super(TcpdumpCapturer, self).__init__(*args, **kwargs)
コード例 #8
0
ファイル: main.py プロジェクト: alexhbnr/CAZySQLite
def main():
    """Scrape content of cazy.org and store in SQLite3 database."""
    # Load config
    with open(Args['config'], "rt") as configfile:
        config = json.load(configfile)

    # Download genomes
    if not Args['nogenomes']:
        genome_categories = config['CAZYGENOMES']
        if Args['noarchaea']:
            del genome_categories['archaea']
        if Args['nobacteria']:
            del genome_categories['bacteria']
        if Args['noeukaryotes']:
            del genome_categories['eukaryotes']
        if Args['noviruses']:
            del genome_categories['viruses']
        if len(genome_categories.keys()) > 0:
            utils.create_dirs(Args['output'])
            protein_dfs = []
            taxids_dfs = []
            for cat in genome_categories.keys():
                print(f"Download genome category {cat}", file=sys.stderr)
                proteins, taxids = download.download_genomes(config, cat)
                protein_dfs.append(proteins)
                taxids_dfs.append(taxids)
            print("Write protein list to SQLite3 database", file=sys.stderr)
            pd.concat(protein_dfs) \
                .to_sql("genomes", sqlite3.connect(Args['output']),
                        index=False, if_exists=Args['tablemode'])
            print("Write taxid list to SQLite3 database", file=sys.stderr)
            pd.concat(taxids_dfs) \
                .to_sql("taxids", sqlite3.connect(Args['output']),
                        index=False, if_exists=Args['tablemode'])
        else:
            print(
                "All genome categories were excluded from downloading. At "
                "least one genome category must be enabled.",
                file=sys.stderr)
            sys.exit(1)

        if not Args['notaxonomy']:
            ncbitaxa.update_taxonomy(Args['updateNCBItaxonomy'])
            unique_taxids = pd.concat(taxids_dfs)['taxid'].unique()
            ncbitaxa.infer_taxonomy_lineage(unique_taxids) \
                .to_sql("ncbitaxonomy", sqlite3.connect(Args['output']),
                        index=False, if_exists=Args['tablemode'])
コード例 #9
0
ファイル: main.py プロジェクト: gzerveas/abject_detector
def setup(args):
    """Prepare training session: read configuration from file (takes precedence), create directories.
    Input:
        args: arguments object from argparse
    Returns:
        config: configuration dictionary
    """

    config = args.__dict__  # configuration dictionary

    if args.config_filepath is not None:
        logger.info("Reading configuration ...")
        try:  # dictionary containing the entire configuration settings in a hierarchical fashion
            config.update(utils.load_config(args.config_filepath))
        except:
            logger.critical(
                "Failed to load configuration file. Check JSON syntax and verify that files exist"
            )
            traceback.print_exc()
            sys.exit(1)

    # Create output directory
    initial_timestamp = datetime.now()
    output_dir = config['output_dir']
    if not os.path.isdir(output_dir):
        raise IOError(
            "Root directory '{}', where the directory of the experiment will be created, must exist"
            .format(output_dir))

    output_dir = os.path.join(output_dir, config['experiment_name'])

    formatted_timestamp = initial_timestamp.strftime("%Y-%m-%d_%H-%M-%S")
    config['initial_timestamp'] = formatted_timestamp
    if (not config['no_timestamp']) or (len(config['experiment_name']) == 0):
        output_dir += "_" + formatted_timestamp
    utils.create_dirs([output_dir])
    config['output_dir'] = output_dir

    # Save configuration as a (pretty) json file
    with open(os.path.join(output_dir, 'configuration.json'), 'w') as fp:
        json.dump(config, fp, indent=4, sort_keys=True)

    logger.info("Stored configuration file in '{}'".format(output_dir))

    return config
コード例 #10
0
    def _check_point_to_csv(self, send_segment, now):
        # Writes files to the "input" directory so the pusher will find them,
        # archive them, and send them out.

        # The input directory may not have been created yet
        create_dirs(self.input_dir)

        segment_data = self.log_node.parsed_data.pop(send_segment, [])
        if not segment_data:
            return

        file_name = '{}_{}.csv.gz'.format(send_segment.strftime(self.file_fmt),
                                          now.strftime(self.file_fmt))
        file_path = join(self.input_dir, file_name)
        with gz_open(file_path, 'wt') as outfile:
            writer = DictWriter(outfile, CSV_HEADER)
            writer.writeheader()
            writer.writerows(self._format_item(x) for x in segment_data)
コード例 #11
0
def main(mode, size=128, file_path=None):
    df_images = load_images(mode)
    df_new = pd.DataFrame(columns=['image_id'] +
                          [str(i) for i in range(size**2)])
    df_new.image_id = df_images.image_id

    for i in range(len(df_images)):
        img = df_images.iloc[i, 1:].values.reshape(137, 236).astype(
            np.uint8) / 255.0
        img = 1 - img
        img = crop_resize(img, size=size)
        df_new.iloc[i, 1:] = img.reshape(-1)

    df_new = df_new.reset_index().drop('index', axis=1)

    if file_path:
        assert file_path.endswith('feather')
        create_dirs(file_path)
        df_new.to_feather(file_path)
コード例 #12
0
ファイル: aggregate.py プロジェクト: jemceach/OpenViEWS
def prep_jobs_agg(jobs, dir_agg):
    dir_agg_stats = dir_agg + "stats/"
    create_dirs([dir_agg_stats])

    path_input = dir_agg + "merged.hdf5"

    for job in jobs:
        if "q" in job.keys():
            job['name'] = job['stat'] + str(job['q'])
        else:
            job['name'] = job['stat']

        path_output = dir_agg_stats + job['name'] + ".hdf5"
        job['path_input'] = path_input
        job['path_output'] = path_output
        job['path_index'] = dir_agg + "index.p"
        job['path_varnames'] = dir_agg + "varnames.p"

    return jobs
コード例 #13
0
ファイル: data.py プロジェクト: skadoodleR/BGRL_Pytorch
 def __init__(self,
              root="data",
              name='cora',
              num_parts=1,
              final_parts=1,
              augumentation=None,
              transform=None,
              pre_transform=None):
     self.num_parts = num_parts
     self.final_parts = final_parts
     self.augumentation = augumentation
     self.root, self.name, self.data_dir = download_data(root=root,
                                                         name=name)
     utils.create_dirs(self.dirs)
     super().__init__(root=self.data_dir,
                      transform=transform,
                      pre_transform=pre_transform)
     path = osp.join(self.data_dir, "processed",
                     self.processed_file_names[0])
     self.data, self.slices = torch.load(path)
コード例 #14
0
ファイル: nvzflow_reader.py プロジェクト: sararadh-cool/ona
    def _get_file_logger(self):
        # Output file logger - outputs once per minute
        file_dir = environ.get(ENV_NVZFLOW_LOG_DIR, DEFAULT_NVZFLOW_LOG_DIR)
        create_dirs(file_dir)

        file_handler = TimedRotatingFileHandler(
            join(file_dir, 'nvzflow.log'),
            when='m',
            interval=1,
            backupCount=int(
                environ.get(ENV_NVZFLOW_LOG_LIMIT, DEFAULT_NVZFLOW_LOG_LIMIT)),
            utc=True,
        )

        file_logger = logging.getLogger('nvzflow_reader_ouput')
        file_logger.setLevel(logging.INFO)
        file_handler.setFormatter(logging.Formatter('%(message)s'))
        file_logger.addHandler(file_handler)

        return file_logger
コード例 #15
0
def main():
    # create folders to save train resources
    log_dir = utils.create_dirs(FLAGS.log_dir_path, FLAGS.batch_size,
                                FLAGS.learning_rate)

    # load dataset
    print('Loading description dataset...')
    dataset = polyu.description.Dataset(FLAGS.dataset_path)
    print('Loaded')

    # train
    train(dataset, log_dir)
コード例 #16
0
ファイル: scrap.py プロジェクト: SnetkovR/news_scraping
def main(url):
    config = configparser.RawConfigParser()
    config.read("scraper.conf")
    handler = PageHandler(requester=Requester,
                          parser=TextParser,
                          scrap_config=config)
    text = handler.handle(url)

    path = create_dirs(url)

    with open(path + ".txt", 'w', encoding="utf-8") as out:
        out.write(text)
コード例 #17
0
def main(workspace=None):

    pwd = utils.work_base_folder
    cnt_polygon_folder = os.path.join(pwd, utils.cnt_polygon_folder)
    stage1_folder = os.path.join(pwd, utils.stage1_folder)
    arcpy.env.workspace = "in_memory"
    arcpy.env.overwriteOutput = True

    utils.create_dirs([stage1_folder, arcpy.env.workspace])

    targets = utils.list_folder_sorted_ext(
        os.path.join(pwd, cnt_polygon_folder), ".shp")
    for j in targets:

        # We need give a full path, otherwise ArcGIS will look for it in workspace.
        orig = os.path.join(pwd, cnt_polygon_folder, j)
        q = utils.relocate(orig, stage1_folder)

        # Main geoprocessing routine
        execute(orig, q, arcpy.env.workspace)

        print("OK")
コード例 #18
0
ファイル: collector.py プロジェクト: ak5793/sEMG_Project
def initialize_pipeline(connection):
    """
    Begins the data collection pipeline.
    """
    gather_parameters = prompt_dispatcher(connection, 'gather_parameters_func')
    subject_num = gather_parameters('What is the subject number?')
    motion_num = gather_parameters('What is the motion?')
    trial_num = gather_parameters('What is the trial number?')
    data_path = create_dirs(subject_num, HAND_MOTIONS[motion_num], trial_num)
    if not prompt_dispatcher(connection, "Is this a valid path?")(data_path):
        return close_connection(connection)
    prompt_dispatcher(connection, "Prepare for data collection.")()
    record_data(connection, data_path, HAND_MOTIONS[motion_num])
コード例 #19
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config['result_dir'], config['checkpoint_dir'], config['checkpoint_dir_lstm']])
    # save the config in a txt file
    save_config(config)
    # create tensorflow session
    sessions = []
    data = []
    model_vaes = []
    vae_trainers = []
    lstm_models = []
    model_vae_global = VAEmodel(config, "Global")
    sess_global = tf.Session(config=tf.ConfigProto())
    for i in range(1, 10):
        sess = tf.Session(config=tf.ConfigProto())
        sessions.append(sess)
        data.append(generator_fl(config, i))
        model_vaes.append(VAEmodel(config, "Client{}".format(i)))
        model_vaes[-1].load(sessions[-1])
        vae_trainers.append(vaeTrainer(sessions[-1], model_vaes[-1], data[-1], config))
        lstm_models.append(lstmKerasModel("Client{}".format(i), config))
    model_vae_global.load(sess_global)
    trainer_vae_global = vaeTrainer(sess_global, model_vae_global, data[0], config)
    lstm_model_global = lstmKerasModel("Global", config)
    client_weights = [0.1] * 8
    client_weights.append(0.2)
    aggregator = Aggregator(vae_trainers, trainer_vae_global, lstm_models, lstm_model_global, config, client_weights)
    aggregator.aggregate_vae()
    aggregator.aggregate_lstm()
コード例 #20
0
ファイル: main.py プロジェクト: xifengbishu/MAMNet-Tensorflow
def main():
    # create the experiments directory
    ckpt_dir = os.path.join(args.exp_dir, args.exp_name)
    results_dir = os.path.join(ckpt_dir, 'results')
    create_dirs([ckpt_dir, results_dir])

    # create tensorflow session
    sess = tf.Session()
    print("\nSession is created!")

    # create instances of the model, data generator, logger, and trainer
    module = import_module("model." + args.model_name)
    model = module.create_model(args)

    if args.is_train or args.is_test:
        data = DataGenerator(args) 
        logger = Logger(sess, args) 
        trainer = Trainer(sess, model, data, logger, args) 

    if args.is_train:
        trainer.train()
    if args.is_test: 
        trainer.test() 
コード例 #21
0
def main(args):
    """entry of training or evaluation"""

    print("== tf version: {} ==".format(tf.__version__))

    # check FLAGS correctness and directories
    check_flags(FLAGS)
    create_dirs(FLAGS)

    # build graph
    """One thing that I learned is to build model before l
        loading data. Loading data won't be very troublesome.
        But building model will."""

    print("[IRS] creating model and building graph ...")
    model = IRSModel(flags=FLAGS)

    # data loader
    print("[IRS] loading dataset ...")
    dataloader = DataLoader(flags=FLAGS)

    # run training
    print("[IRS] start running training ...")
    train(flags=FLAGS, model=model, dataloader=dataloader)
コード例 #22
0
def main():
  # create folders to save train resources
  log_dir = utils.create_dirs(FLAGS.log_dir_path, FLAGS.batch_size,
                              FLAGS.learning_rate)

  # load polyu dataset
  print('Loading PolyU-HRF dataset...')
  polyu_path = os.path.join(FLAGS.polyu_dir_path, 'GroundTruth',
                            'PoreGroundTruth')
  dataset = polyu.Dataset(
      os.path.join(polyu_path, 'PoreGroundTruthSampleimage'),
      os.path.join(polyu_path, 'PoreGroundTruthMarked'),
      split=(15, 5, 10),
      patch_size=FLAGS.patch_size,
      label_mode=FLAGS.label_mode,
      label_size=FLAGS.label_size)
  print('Loaded')

  # train
  train(dataset, log_dir)
コード例 #23
0
ファイル: visualize2D.py プロジェクト: XiHuYan/ae
        ix = np.where(label == g)
        ax.scatter(x[ix, 0], x[ix, 1], label=g, s=20)
    ax.legend()
    plt.show()
    plt.savefig(join(save_path, 'tSNE_plot.png'), dpi=1024)
    print('saving 2D vis to %s' % join(save_path, 'tSNE_plot.png'))


if __name__ == '__main__':
    argparser = argparse.ArgumentParser()
    argparser.add_argument('--exp_id', required=True, help='experiment id')
    argparser.add_argument('--datasets', required=True, help='dataset_name')

    args = argparser.parse_args()
    log_dir = 'logs/%s/%s' % (args.datasets, args.exp_id)
    res_dir = 'results/%s/%s' % (args.datasets, args.exp_id)
    create_dirs([res_dir])

    # X = np.loadtxt(join(data_root, dataset_name+'.txt'))
    y = np.loadtxt(join(config.data_root, args.datasets + '_label.txt'),
                   dtype='int')

    if config.formt == 'npy':
        x = np.load(join(res_dir, 'encoding.npy'))
    else:
        x = np.loadtxt(join(res_dir, 'encoding.txt'), dtype='float')
    x = x.squeeze()

    x_emb = TSNE(n_components=2).fit_transform(x)

    color_plot(x_emb, y, res_dir)
コード例 #24
0
from graphgen_cls.data import Graph_DFS_code_from_file
from model import create_model
from train import train
from sklearn.model_selection import StratifiedKFold
import sys
import os

import torch
torch.set_printoptions(threshold=10_000)

if __name__ == '__main__':

    args = Args()
    args = args.update_args()

    create_dirs(args)

    random.seed(123)

    # graphs = create_graphs(args)

    # random.shuffle(graphs)
    # graphs_train = graphs[: int(0.80 * len(graphs))]
    # graphs_validate = graphs[int(0.80 * len(graphs)): int(0.90 * len(graphs))]

    graph_list = create_graphs(args)

    with open(
            os.path.join(args.dataset_path, args.graph_type,
                         'graph_label.dat'), 'rb') as f:
        graph_label_list = pickle.load(f)
コード例 #25
0
 def save(self):
     create_dirs([self.debug_dir])
     self.warper.reset(M=self.M)
     pickle.dump(self.M, open(self.perspective_transform_file, 'wb'))
     print('[i] Succesfully saved perspective tranform params to',
           self.perspective_transform_file)
コード例 #26
0
 def __check_mlstats_dirs(self):
     '''Check if the mlstats directories exist'''
     create_dirs(COMPRESSED_DIR)
コード例 #27
0
	def create_standard_dirs(self):
		'''
		create all the necessary folders for one inspection
		'''
		building_dir = os.path.join(self.root, self.name)
		if not os.path.exists(building_dir):
			os.mkdir(building_dir)
		
		dir_list = ['3drecon', self.inspect_no, 'misc', 'reports']
		create_dirs(dir_list, building_dir)
				
		inspect_sub_dirs = ['flightlog', 'img_handheld_ir', 'img_handheld_rgb', 
						'img_handheld_survey', 'img_m210rtkv2_x7_others', 'results']  
		inspect_dir = os.path.join(building_dir, self.inspect_no)
		create_dirs(inspect_sub_dirs, inspect_dir)
		
		self.others_dir = os.path.join(inspect_dir, 'img_m210rtkv2_x7_others')
		others_sub_dirs = ['raw', 'raw_SD'] #may add more sub folders later on
		create_dirs(others_sub_dirs, self.others_dir)
				
		results_sub_dirs = ['all_results_drone_rgb', 'img_drone_rgb', 'all_results_handheld_rgb',
						  'img_handheld_rgb']
		self.result_dir = os.path.join(inspect_dir, 'results')
		create_dirs(results_sub_dirs, self.result_dir)
		
		self.facade_drone_dirs = ['facade'+str(i) for i in self.facade_list_drone]
		self.facade_dir_process = os.path.join(self.result_dir, 'img_drone_rgb')		
		self.facade_dir_raw = os.path.join(self.others_dir, 'raw')
		self.facade_result = os.path.join(self.result_dir, 'all_results_drone_rgb')
		if self.handheld:
			self.facade_hhl_dirs = ['facade'+str(i) for i in self.facade_list_handheld]
			self.facade_dir_handheld = os.path.join(self.result_dir, 'img_handheld_rgb')
			self.facade_result_handheld = os.path.join(self.result_dir, 'all_results_handheld_rgb')
			for folder in [self.facade_dir_handheld, self.facade_result_handheld]:    
				create_dirs(self.facade_hhl_dirs, folder)
				
			for facade_dir in self.facade_hhl_dirs:
			#create folder to store filtered images inside each facade folder
				overlay_dir = os.path.join(self.facade_result_handheld, facade_dir, 'overlay')
				if not os.path.exists(overlay_dir):
					os.mkdir(overlay_dir)
				
		for folder in [self.facade_dir_process,self.facade_dir_raw,self.facade_result]:
			create_dirs(self.facade_drone_dirs, folder)
		
		for facade_dir in self.facade_drone_dirs:
			#create folder to store filtered images inside each facade folder
			overlay_dir = os.path.join(self.facade_result, facade_dir, 'overlay')
			if not os.path.exists(overlay_dir):
				os.mkdir(overlay_dir)
	
		if self.thermal:
			result_dirs = ['all_results_handheld_ir', 'all_results_drone_ir', 
						  'img_drone_ir', 'img_handheld_ir']
			create_dirs(result_dirs, self.result_dir)
			for name in ['all_results_drone_ir', 'img_drone_ir']:
				folder = os.path.join(self.result_dir, name)
				create_dirs(self.facade_drone_dirs, folder)
			thermal_dirs = ['ir', 'rgb']
			for facade_dir in self.facade_drone_dirs:
				parent_dir = os.path.join(self.facade_dir_raw, facade_dir)
				raw_dir = os.path.join(self.others_dir, 'raw_SD')
				create_dirs(thermal_dirs, parent_dir)
				create_dirs(thermal_dirs, raw_dir)
コード例 #28
0
ファイル: main.py プロジェクト: pombredanne/MailingListStats
 def __create_download_dirs(self, mailing_list):
     # Remote archives are retrieved and stored in compressed_dir.
     # Local compressed archives are left in their original location.
     if mailing_list.is_remote():
         create_dirs(mailing_list.compressed_dir)
コード例 #29
0
from utils import process_config, create_dirs, get_args
from tensorflow.python.client import device_lib


def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']


print(get_available_gpus())

# load VAE model
config = process_config('PX4_config.json')
# create the experiments dirs
create_dirs([config['result_dir'], config['checkpoint_dir']])
# create tensorflow session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# create your data generator
data = DataGenerator(config)
# create a CNN model
model_vae = VAEmodel(config)
# create a CNN model
trainer_vae = vaeTrainer(sess, model_vae, data, config)
model_vae.load(sess)

# here you train your model
if config['TRAIN_VAE']:
    if config['num_epochs_vae'] > 0:
        trainer_vae.train()
コード例 #30
0
        fpr_list.append(fpr_temp)
        tpr_list.append(tpr_temp)
        precision_list.append(precision_temp)
        recall_list.append(recall_temp)
        auroc_list.append(auroc_temp)
        aupr_list.append(aupr_temp)

    plot_roc_curve(fpr_list, tpr_list, './result/DanQ_JASPAR/')
    plot_pr_curve(precision_list, recall_list, './result/DanQ_JASPAR/')

    header = np.array([['auroc', 'aupr']])
    content = np.stack((auroc_list, aupr_list), axis=1)
    content = np.concatenate((header, content), axis=0)
    write2csv(content, './result/DanQ_JASPAR/result.csv')
    write2txt(content, './result/DanQ_JASPAR/result.txt')
    avg_auroc = np.nanmean(auroc_list)
    avg_aupr = np.nanmean(aupr_list)
    print('AVG-AUROC:{:.3f}, AVG-AUPR:{:.3f}.\n'.format(avg_auroc, avg_aupr))

if __name__ == '__main__':
    # Parses the command line arguments and returns as a simple namespace.
    parser = argparse.ArgumentParser(description='main.py')
    parser.add_argument('-e', '--exe_mode', default='train', help='The execution mode.')
    args = parser.parse_args()

    # Selecting the execution mode (keras).
    create_dirs(['./result/DanQ_JASPAR/'])
    if args.exe_mode == 'train':
        train()
    elif args.exe_mode == 'test':
        test()
コード例 #31
0
 def _create_download_dirs(self):
     # Remote archives are retrieved and stored in output_dir.
     # Local compressed archives are left in their original location.
     if self.mailing_list.is_remote():
         create_dirs(self.mailing_list.compressed_dir)
コード例 #32
0
        auroc_list.append(auroc_temp)
        aupr_list.append(aupr_temp)

    plot_roc_curve(fpr_list, tpr_list, './result/')
    plot_pr_curve(precision_list, recall_list, './result/')

    header = np.array([['auroc', 'aupr']])
    content = np.stack((auroc_list, aupr_list), axis=1)
    content = np.concatenate((header, content), axis=0)
    write2csv(content, './result/result.csv')
    write2txt(content, './result/result.txt')
    avg_auroc = np.nanmean(auroc_list)
    avg_aupr = np.nanmean(aupr_list)
    print('AVG-AUROC:{:.3f}, AVG-AUPR:{:.3f}.\n'.format(avg_auroc, avg_aupr))


if __name__ == '__main__':
    # Parses the command line arguments and returns as a simple namespace.
    parser = argparse.ArgumentParser(description='main.py')
    parser.add_argument('-e',
                        '--exe_mode',
                        default='train',
                        help='The execution mode.')
    args = parser.parse_args()

    # Selecting the execution mode (keras).
    create_dirs(['./result', './result/model'])
    if args.exe_mode == 'train':
        train()
    elif args.exe_mode == 'test':
        test()
コード例 #33
0
 def __init__(self, *args, **kwargs):
     self.data_type = kwargs['data_type']
     self.pcap_dir = kwargs.pop('pcap_dir')
     create_dirs(self.pcap_dir)
     super(TcpdumpPusher, self).__init__(*args, **kwargs)
コード例 #34
0
ファイル: main.py プロジェクト: pombredanne/MailingListStats
 def __check_mlstats_dirs(self, compressed_dir):
     '''Check if the mlstats directories exist'''
     create_dirs(compressed_dir)
コード例 #35
0
ファイル: main.py プロジェクト: gotoc/MailingListStats
 def __check_mlstats_dirs(self, compressed_dir):
     """Check if the mlstats directories exist"""
     create_dirs(compressed_dir)