def applyCV2Dir(dirname, infdir_name='sample_imgs/test_output',
                procdir_name='sample_imgs/cv_output',
                use_clahe=True, overwrite_file=False,
                data_reconstruct=False):
    log_names = os.listdir(dirname)
    for log_name_short in log_names:
        print('-' * 20)
        print(log_name_short)
        log_name = os.path.join(dirname, log_name_short)
        infdir = os.path.join(log_name, infdir_name)
        procdir = os.path.join(log_name, procdir_name)
        if not os.path.isdir(infdir):
            continue
        if (not overwrite_file) and os.path.isdir(procdir):
            continue
        print('processing %s...' % infdir)
        makeDir(procdir)

        dset_name = None
        name_split = os.path.basename(log_name).split('_')
        for cand in dataset_cands:
            dataset_str = name_split[0]
            dset_name = cand if (cand in dataset_str) else dset_name
        for cand in dataset_cands:
            dataset_str = name_split[-1]
            dset_name = cand if (cand in dataset_str) else dset_name

        indir = os.path.join('data/%s_detailed/original' % dset_name)
        fnames = [os.path.join(indir, fname.replace('_output', '')) for fname in os.listdir(infdir)]
        for fname in fnames:
            feature_extractor.main_dirinput(
                fname, procdir, infdir, use_clahe=use_clahe)
def genDataset(dirname, out_dirname, procfn, **kwargs):
    for dname in ['original', 'data_split.json']:
        try:
            shutil.copytree(os.path.join(dirname, dname),
                            os.path.join(out_dirname, dname))
        except:
            shutil.copy(os.path.join(dirname, dname),
                        os.path.join(out_dirname, dname))

    oridir = os.path.join(dirname, 'original')
    gtdir = os.path.join(dirname, 'gt')
    gtoutdir = os.path.join(out_dirname, 'gt')
    makeDir([gtoutdir])

    fnames = [fname for fname in os.listdir(oridir)]
    fnames.sort()
    for img_name in fnames:
        gt_fname = convertFnameDomains('original', 'ground_truth', img_name)
        pred_fname = os.path.join(gtdir, gt_fname)
        out_fname = os.path.join(gtoutdir, gt_fname)

        macro_img = cv2.imread(pred_fname)
        if macro_img is None:
            continue

        bin_img = procfn(macro_img, img_name, kwargs)
        anno_fname = convertFnameDomains('original', 'annotated', img_name)
        cv2.imwrite(out_fname, bin_img)
Ejemplo n.º 3
0
def setupFiles(from_dir, to_dir):
    if os.path.isfile(from_dir):
        shutil.copyfile(from_dir, to_dir)
        return

    makeDir(to_dir)
    for dname in os.listdir(from_dir):
        setupFiles(os.path.join(from_dir, dname), os.path.join(to_dir, dname))
Ejemplo n.º 4
0
    def create_images(self, number_images, set_name, computer, config,
                      gen_mode):
        # Generates images from the OpenAI Gym Cartpole environment specified in self.init

        root_path = config['paths'][computer]['save_images']
        images_path = os.path.join(root_path, self.env_name, gen_mode,
                                   set_name)
        utils.makeDir(images_path)

        # Init csv file
        csv_path = os.path.join(images_path, 'data_pairs.csv')
        with open(csv_path, "w") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['X', 'Theta', 'Reward', 'Image', 'Drone angle'])

        # Create images
        if gen_mode in [
                'random', 'random_weird', 'random_0', 'random_1', 'random_2',
                'random_3', 'random_straight', 'random_0_straight',
                'random_1_straight', 'random_2_straight', 'random_3_straight'
        ]:
            for i in range(number_images):
                if i % 100 == 99:
                    print(i)
                image_name = str(i)
                image_path = os.path.join(images_path, image_name + '.png')
                self.one_image(set_name, csv_path, image_path)
            self.env.close()

        elif gen_mode == 'incremental' or gen_mode == 'incremental_weird':
            if self.env_name == 'duckietown':
                raise ValueError(
                    "It is not possible to run an incremental image generation on the Duckietown environment."
                )
            if set_name == 'train':
                pass
            square_side = int(np.floor(np.sqrt(number_images)))
            x_range = self.env.x_range
            theta_range = self.env.theta_range
            i = 0
            print('x_range[0]: {}, x_range[1]: ­{}, num = square_side: ­{}'.
                  format(x_range[0], x_range[1], square_side))
            for x in np.linspace(x_range[0], x_range[1], num=square_side):
                for theta in np.linspace(theta_range[0],
                                         theta_range[1],
                                         num=square_side):
                    state = (x, theta)
                    image_name = str(i)
                    image_path = os.path.join(images_path, image_name + '.png')
                    self.one_image(set_name, csv_path, image_path, state)
                    i += 1
            self.env.close()

        elif gen_mode == 'training':
            raise NotImplementedError(
                "Generating images during training has not been implemented yet."
            )
Ejemplo n.º 5
0
    def __init__(self, config, computer,
                 test_incremental):  #trained_model_prefix, label_style):
        # GPU Params
        use_gpu = torch.cuda.is_available()
        self.device = torch.device("cuda:0" if use_gpu else "cpu")

        # Paths and configuration parameters
        self.environment = config['exp']['env']
        self.gen_mode = config['exp']['gen_mode']
        if test_incremental:
            if self.gen_mode == 'random':
                test_set_gen_mode = 'incremental'
            elif self.gen_mode == 'random_weird':
                test_set_gen_mode = 'incremental_weird'
        else:
            test_set_gen_mode = self.gen_mode
        self.label_style = config['exp']['label_type']
        model_name = config['exp']['model_name']
        self.testing_set_path = os.path.join(
            config['paths'][computer]['save_images'], self.environment,
            test_set_gen_mode, 'test')
        self.root_path = config['paths'][computer]['save_cnn']
        self.root_label_path = os.path.join(self.root_path, self.environment,
                                            self.label_style, self.gen_mode,
                                            model_name)
        self.rescale_size = tuple(config['cnn']['rescale_size'])

        self.model_type = config['cnn']['model']

        if test_incremental:
            self.test_results_path = os.path.join(self.root_label_path,
                                                  'test_results',
                                                  'incremental')
        else:
            self.test_results_path = os.path.join(self.root_label_path,
                                                  'test_results', 'test_set')
            print(self.test_results_path)
        ut.makeDir(self.test_results_path)
        print(self.test_results_path)
        self.model_path = os.path.join(self.root_label_path,
                                       'latest_model.pth')
        self.cnn_params_path = os.path.join(self.root_label_path,
                                            'cnn_params.yaml')

        # Loading net
        self.net = self.load_model()
        self.criterion = nn.MSELoss()
        # Loading data
        self.test_set = self.load_data()
Ejemplo n.º 6
0
 def __init__(self, root_dir, height=256, width=256, camera_id=0, fps=30):
     self.save_dir = utils.makeDir(root_dir, 'video') if root_dir else None
     self.height = height
     self.width = width
     self.camera_id = camera_id
     self.fps = fps
     self.frames = []
Ejemplo n.º 7
0
def copyTarArc( old_tar, new_tar, temp_directory):
    old_tar = tarfile.open(old_tar, mode='r')

    members =  old_tar.getmembers()
    members.sort()

    utils.makeDir(temp_directory)
    for member in members:
        fixed_path =  utils.correctPath(member.name)

        if fixed_path :
            member.name = fixed_path
            old_tar.extract(member, path=temp_directory)

        createTarArc(temp_directory, new_tar)

    old_tar.close()
Ejemplo n.º 8
0
	def save_data(self, data, date, path_to_save):

		nb_exp_steps = data['nb_exp_steps']
		mean_ev_returns = data['mean_ev_returns']
		mean_exp_returns = data['mean_exp_returns']

		mat_ev_ret = [list (a) for a in zip(mean_ev_returns, nb_exp_steps)]
		mat_exp_ret	= [list (a) for a in zip(mean_exp_returns, nb_exp_steps)]

		print(tuple(mat_ev_ret))

		test_path = os.path.join(path_to_save, 'training_data', 'runs', 'test')
		train_path = os.path.join(path_to_save, 'training_data', 'runs','train')

		ut.makeDir(test_path)
		ut.makeDir(train_path)

		path_ev_ret = os.path.join(test_path, '{}_avg_returns_test.npy'.format(date.strftime("%Y-%m-%d_%H-%M-%S")))
		path_exp_ret = os.path.join(train_path, '{}_avg_returns_train.npy'.format(date.strftime("%Y-%m-%d_%H-%M-%S")))



		np.save(path_ev_ret, mat_ev_ret)
		np.save(path_exp_ret, mat_exp_ret)
Ejemplo n.º 9
0
def extractAllTarArc(tar_arc, dest=""):
    if  dest:
        utils.makeDir(dest)
    tf = tarfile.open(tar_arc, 'r')
    tf.extractall(dest)
    tf.close()
Ejemplo n.º 10
0
def extractAll(zip_archive, dest=""):
    if dest:
        utils.makeDir(dest)
    zf = zipfile.ZipFile(zip_archive)
    zf.extractall(path=dest )
    zf.close()
Ejemplo n.º 11
0
        temp_root = os.environ['SLURM_TMPDIR']

    ### CNN saving paths during usage (data training and model)
    use_cnn_path = os.path.join(temp_root, config['paths'][computer]['cnn'])
    use_cnn_label_path = os.path.join(use_cnn_path, environment, label_type,
                                      gen_mode, model_name)
    cnn_training_data_path = os.path.join(use_cnn_label_path, 'training_data')
    cnn_training_plots_path = os.path.join(cnn_training_data_path, 'plots')
    cnn_latest_model_path = os.path.join(use_cnn_label_path,
                                         'latest_model.pth')
    cnn_inter_model_path = os.path.join(use_cnn_label_path, 'inter_models')
    cnn_params_path = os.path.join(use_cnn_label_path, 'cnn_params.yaml')
    cnn_train_losses_path = os.path.join(use_cnn_label_path,
                                         'train_losses.npy')
    cnn_test_losses_path = os.path.join(use_cnn_label_path, 'test_losses.npy')
    ut.makeDir(cnn_training_data_path)
    ut.makeDir(cnn_training_plots_path)
    ut.makeDir(cnn_inter_model_path)
    config_path['use_cnn_path'] = use_cnn_path
    config_path['training_data_path'] = cnn_training_data_path
    config_path['training_plots_path'] = cnn_training_plots_path
    config_path['model_path'] = cnn_latest_model_path
    config_path['inter_model_path'] = cnn_inter_model_path
    config_path['cnn_params_path'] = cnn_params_path
    config_path['cnn_train_losses_path'] = cnn_train_losses_path
    config_path['cnn_test_losses_path'] = cnn_test_losses_path

    ### Image dataset path
    use_images_path = os.path.join(temp_root,
                                   config['paths'][computer]['images'])
    training_set_path = os.path.join(use_images_path, environment, gen_mode,
Ejemplo n.º 12
0
    def __init__(self, env, test_env, config_path, config_rl):

        self.env = env

        ### RL parameters
        self.env_name = config_rl['env_name']
        self.test_env_name = config_rl['test_env_name']
        self.seed = config_rl['seed']
        self.policy = config_rl['policy']
        self.eval = config_rl['eval_']
        self.gamma = config_rl['gamma']
        self.tau = config_rl['tau']
        self.lr = config_rl['lr']
        self.alpha = config_rl['alpha']
        self.automatic_entropy_tuning = config_rl['automatic_entropy_tuning']
        self.batch_size = config_rl['batch_size']
        self.num_episodes = config_rl['num_episodes']
        self.max_steps_episode = config_rl['max_steps_episode']
        self.hidden_size = config_rl['hidden_size']
        self.updates_per_step = config_rl['updates_per_step']
        self.start_steps = config_rl['start_steps']
        self.target_update_interval = config_rl['target_update_interval']
        self.replay_size = config_rl['replay_size']
        self.cuda = config_rl['cuda']
        self.gen_est_reward_test = config_rl['gen_est_reward_test']

        self.sac_args = SAC_args(self.gamma, self.tau, self.lr, self.alpha,
                                 self.automatic_entropy_tuning, self.policy,
                                 self.target_update_interval, self.cuda,
                                 self.hidden_size)

        self.env = env
        self.test_env = test_env
        #self.env.env.reset()
        torch.manual_seed(self.seed)
        np.random.seed(self.seed)
        self.env.seed(self.seed)
        if self.env_name not in ['DTC_GT_Reward']:
            self.agent = SAC(self.env.observation_space.shape[0],
                             self.env.action_space, self.sac_args)
        else:
            self.agent = SAC_CNN(self.env.observation_space.shape[0],
                                 self.env.action_space, self.sac_args)

        ### Path parameters
        self.training_data_path = config_path['training_data_path']
        ut.makeDir(self.training_data_path + '/runs')
        ut.makeDir(self.training_data_path + '/runs/train')
        ut.makeDir(self.training_data_path + '/runs/test')
        self.vis_path = self.training_data_path + '/runs/vis/{}_SAC_{}_{}_{}'.format(
            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
            self.env_name, self.policy,
            "autotune" if self.automatic_entropy_tuning else "")
        self.train_res_path = self.training_data_path + '/runs/train/{}_SAC_{}_{}_{}'.format(
            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
            self.env_name, self.policy,
            "autotune" if self.automatic_entropy_tuning else "")
        self.train_rew_profit_path = self.training_data_path + '/runs/train/{}_SAC_{}_{}_{}_return_profit'.format(
            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
            self.env_name, self.policy,
            "autotune" if self.automatic_entropy_tuning else "")
        self.train_rew_error_path = self.training_data_path + '/runs/train/{}_SAC_{}_{}_{}_return_error'.format(
            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
            self.env_name, self.policy,
            "autotune" if self.automatic_entropy_tuning else "")
        self.test_res_path = self.training_data_path + '/runs/test/{}_SAC_{}_{}_{}_test'.format(
            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
            self.env_name, self.policy,
            "autotune" if self.automatic_entropy_tuning else "")
        self.test_rew_profit_path = self.training_data_path + '/runs/test/{}_SAC_{}_{}_{}_return_profit_test'.format(
            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
            self.env_name, self.policy,
            "autotune" if self.automatic_entropy_tuning else "")
        self.test_rew_error_path = self.training_data_path + '/runs/test/{}_SAC_{}_{}_{}_return_error_test'.format(
            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
            self.env_name, self.policy,
            "autotune" if self.automatic_entropy_tuning else "")
        self.critic_model_path = config_path['critic_model_path']
        self.actor_model_path = config_path['actor_model_path']
        self.writer = SummaryWriter(logdir=self.vis_path)
        self.memory = ReplayMemory(self.replay_size)

        self.reward_mem = []
        self.reward_profit_mem = []
        self.reward_error_mem = []
        self.test_mem = []
        self.test_prof_mem = []
        self.test_err_mem = []
Ejemplo n.º 13
0
    if computer == 'mila':
        # Getting local disk info
        temp_root = os.environ['SLURM_TMPDIR'] + '/'

    ### RL saving paths during usage (data training and model)
    use_rl_path = os.path.join(temp_root, config['paths'][computer]['rl'])
    if model_name:
        exp_name = env_name + '_' + model_name
    else:
        exp_name = env_name
    use_rl_env_path = os.path.join(use_rl_path, exp_name)

    print('use_rl_env_path: {}'.format(use_rl_env_path))

    rl_training_data_path = os.path.join(use_rl_env_path, 'training_data')
    ut.makeDir(rl_training_data_path)
    config_path['training_data_path'] = rl_training_data_path
    config_path['critic_model_path'] = os.path.join(use_rl_env_path,
                                                    'critic_model.pth')
    config_path['actor_model_path'] = os.path.join(use_rl_env_path,
                                                   'actor_model.pth')

    ### Training environment

    #### Importing library
    if test_env_name == 'cartpole':
        from cartpole_mod_env import *
    elif test_env_name == 'duckietown' or test_env_name == 'duckietown_cam':
        from duckietown_mod_env import *

    #### Loading environment
Ejemplo n.º 14
0
 def set_test_results_path(self, new_path):
     self.test_results_path = new_path
     ut.makeDir(self.test_results_path)
Ejemplo n.º 15
0
def extractAll(zip_archive, dest=""):
    if dest:
        utils.makeDir(dest)
    zf = zipfile.ZipFile(zip_archive)
    zf.extractall(path=dest)
    zf.close()
Ejemplo n.º 16
0
    change_list = []

    if "dryrun" in sys.argv:
        dryrun = 1
        sys.argv.remove("dryrun")

    if len(sys.argv) < 2:
        print "Usage: %s  <pisi_packages_directory>" % sys.argv[0]
        sys.exit(1)

    if not os.path.exists(sys.argv[1]):
        print "%s does not exists" % sys.argv[1]
        sys.exit(1)

    pisi_directory = sys.argv[1]
    pisi_list = glob.glob1(pisi_directory, "*6.pisi")

    temp_directory = "/tmp/oatp"

    olddir = os.getcwd()

    for pisi in pisi_list:
        utils.makeDir(temp_directory)
        os.chdir(temp_directory)
        main(os.path.join(pisi_directory, pisi))

    if dryrun:
        inform()

    os.chdir(olddir)
Ejemplo n.º 17
0
def convDeepcrack(img_dir, dataset_name):
    target_sz = {'aigle': (462, 311), 'cfd': None, 'deepcrack': None}
    deepcrack_dir = os.path.join(img_dir, 'deepcrack')
    makeDir(deepcrack_dir)
    gen_deepcrack.main(img_dir, deepcrack_dir, target_sz[dataset_name])
Ejemplo n.º 18
0
def convDeeplab(img_dir, dataset_name):
    deeplab_dir = os.path.join(img_dir, 'deeplab')
    deeplab_tf_dir = os.path.join(img_dir, 'deeplab_tfrecords')
    makeDir([deeplab_dir, deeplab_tf_dir])
    gen_deeplab.main(img_dir, deeplab_dir, deeplab_tf_dir)
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from enum import Enum

#----script starts here

parser = ambient.ArgumentParser (description="""generate plots from ESO data""")

#parse user's command line arguments
args = parser.parse_args ()

#make sure the output directory exists
utils.makeDir (args.outdir)

#get list of all seeing days
allSeeingDays = ambient.Days (os.path.join (args.indir, ambient.parameterBasenames [0] + ".orig"), args.start_date, args.stop_date)

#scripts need start/stop dates now
#assert args.start_date

#if start-date/stop-date is specified on command line, only look at a subset of days
if args.start_date:
  seeingDays = ambient.dayListFromRange (allSeeingDays, args.start_date, args.stop_date)
else:
  seeingDays = allSeeingDays.days


def seeingCorrelations (seeingDays, otherDays):
Ejemplo n.º 20
0
    change_list = []

    if "dryrun" in sys.argv:
        dryrun = 1
        sys.argv.remove("dryrun")

    if len(sys.argv) < 2:
        print "Usage: %s  <pisi_packages_directory>" % sys.argv[0]
        sys.exit(1)

    if not os.path.exists(sys.argv[1]):
        print "%s does not exists" % sys.argv[1]
        sys.exit(1)

    pisi_directory = sys.argv[1]
    pisi_list = glob.glob1(pisi_directory, "*6.pisi")

    temp_directory = "/tmp/oatp"

    olddir = os.getcwd()

    for pisi in pisi_list:
        utils.makeDir(temp_directory)
        os.chdir(temp_directory)
        main(os.path.join(pisi_directory, pisi))

    if dryrun:
        inform()

    os.chdir(olddir)