def __init__(self, args, out_folder):

        self.folder = os.path.join(training_folder, args.training_folder)
        self.device = utils.choose_device()
        self.debug = args.debug
        self.steps = args.steps
        self.samples = args.samples
        self.batched_days = args.days
        self.k = args.k

        if not isinstance(self.samples, list):
            self.samples = [self.samples]
        if not isinstance(self.k, list):
            self.k = [self.k]

        if not os.path.exists(out_folder):
            os.mkdir(out_folder)

        self.output_folder = os.path.join(out_folder, args.output_folder)
        if not os.path.exists(self.output_folder):
            os.mkdir(self.output_folder)

        self.year_folders = self.list_dir(self.folder)

        print(self.year_folders)
Пример #2
0
    def __init__(self, year, training_params, folder, debug):

        model = training_params["model"]
        self.folder = folder
        self.debug = debug

        if (self.debug):
            self.max_epochs = 2
        else:
            self.max_epochs = training_params["max epochs"]

        if not os.path.exists(folder):
            os.mkdir(folder)

        # Get data
        self.device = utils.choose_device()
        self.datareader = data_reader.DataReader(year, self.device)

        self.model = loop_utils.generate_model(self.datareader, model,
                                               OUTPUT_LENGTH)

        self.model.datareader = self.datareader

        self.model.to(self.device)

        self.loss_function = utils.choose_loss(model)

        self.optimizer = optim.RMSprop(self.model.parameters(),
                                       lr=training_params["lr"])

        self.training_params = training_params

        self.number_concat_batches = training_params["batch"]
Пример #3
0
    def __init__(self, env, config=None):
        super(DQNAgent, self).__init__(env, config)
        self.value_net = MultitaskNetwork(self.config["model"])
        self.target_net = MultitaskNetwork(self.config["model"])
        self.target_net.load_state_dict(self.value_net.state_dict())
        self.target_net.eval()

        logger.debug("Number of trainable parameters: {}".format(
            trainable_parameters(self.value_net)))
        self.device = choose_device(self.config["device"])
        self.value_net.to(self.device)
        self.target_net.to(self.device)

        self.rl_lossFunction = loss_function_factory(
            self.config["rl_lossfunction"])
        self.predict_lossfunction = loss_function_factory(
            self.config['predict_lossfunction'])

        self.rl_optimizer = optimizer_factory(self.config["optimizer"]["type"],
                                              self.value_net.rl_updatePara(),
                                              **self.config["optimizer"])

        self.pre_optimizer = optimizer_factory(
            self.config["optimizer"]["type"], self.value_net.pre_updatePara(),
            **self.config["optimizer"])

        self.steps = 0
Пример #4
0
                    type=str,
                    default=default_log_dir,
                    help='Location to save logs and checkpoints')
parser.add_argument('--vtf',
                    action='store_true',
                    help='validation time flip augmentation')
parser.add_argument('--resize',
                    action='store_true',
                    help='resize to 128x128 instead of reflective padding')
args = parser.parse_args()

if args.resize:
    # if resize is used, loss on center doesn't make sense
    args.loss_on_center = False

device = choose_device(args.device)
use_gpu = device.type == 'cuda'

orig_img_size = 101
img_size = 128
padding = compute_padding(orig_img_size, orig_img_size, img_size)

#what is the logic behind the geometric transformation?

geometric_transform_prob = 0.5 * 0.25
geometric_transform = Compose([
    RandomApply([CropAndRescale(max_scale=0.2)], p=geometric_transform_prob),
    RandomApply([HorizontalShear(max_scale=0.07)], p=geometric_transform_prob),
    RandomApply([Rotation(max_angle=15)], p=geometric_transform_prob),
    RandomApply([ElasticDeformation(max_distort=0.15)],
                p=geometric_transform_prob)
Пример #5
0
    def iteration(self, config, c, batch_size, keys, log_dataframes, data, i,
                  mean_step_return, estimator):

        run_folder = os.path.join(config["training folder"],
                                  config["run folders"][i])
        model = torch.load(os.path.join(run_folder, 'model.pt'),
                           map_location=self.device)

        # Update parameters for model
        model.device = utils.choose_device()
        model.datareader.device = utils.choose_device()
        model.n_samples = self.n_samples

        if self.args.fixed_epsilon:
            attack_gen = atg.FixedEpsilonAttackGenerator(
                model, args, mean_step_return, estimator)

            if self.args.target == "binary":
                mean_percentage = attack_gen.compute_predictions()

                for l in range(len(self.args.max_pert)):
                    values = [
                        mean_percentage["buy"][l], mean_percentage["sell"][l]
                    ]

                    line = [
                        estimator, batch_size, self.lr, self.n_iterations,
                        self.args.max_pert[l]
                    ] + values
                    data.append(line)
            else:
                mean_perturbed = attack_gen.compute_predictions()

                for m in range(len(self.args.k)):
                    for l in range(len(self.args.max_pert)):
                        values = [mean_perturbed[m][l]]

                        line = [
                            estimator, batch_size, self.lr, self.n_iterations,
                            self.args.k[m], self.args.max_pert[l]
                        ] + values
                        data.append(line)

        else:

            if self.args.log_batch:
                batched_days = self.get_batch_days(batch_size)
            else:
                batched_days = self.args.days

            attack_gen = atg.FixedCAttackGenerator(model,
                                                   args,
                                                   mean_step_return,
                                                   c,
                                                   batch_size,
                                                   batched_days=batched_days)

            # Second result is certification statistics
            if self.args.log_batch:
                values, logs = attack_gen.compute_predictions()

                df = self.create_log_dataframe(logs)
                key = (c, batch_size)
                keys.append(key)

                # Create dataframe with logs
                log_dataframes.append(df)

            else:
                values = attack_gen.compute_predictions()

            line = [c, batch_size, self.lr, self.n_iterations] + values
            data.append(line)
    def __init__(self, mean_return, dev_return):
        super(Model, self).__init__()

        self.mean = mean_return
        self.dev = dev_return
        self.device = utils.choose_device()
Пример #7
0
parser.add_argument("--disable-cutout", action='store_true', help='disable cutout data augmentation')
parser.add_argument('--pretrained', default='imagenet', choices=('imagenet', 'coco', 'oid'),
                    help='dataset name for pretrained model')
parser.add_argument("--basenet", choices=models.BASENET_CHOICES, default='resnet34', help='model of basenet')
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
default_log_dir = os.path.join('runs', current_time + '_' + socket.gethostname())
parser.add_argument('--log-dir', type=str, default=default_log_dir, help='Location to save logs and checkpoints')
parser.add_argument('--vtf', action='store_true', help='validation time flip augmentation')
parser.add_argument('--resize', action='store_true', help='resize to 128x128 instead of reflective padding')
args = parser.parse_args()

if args.resize:
    # if resize is used, loss on center doesn't make sense
    args.loss_on_center = False

device = choose_device(args.device)
use_gpu = device.type == 'cuda'

orig_img_size = 101
img_size = 128
padding = compute_padding(orig_img_size, orig_img_size, img_size)

geometric_transform_prob = 0.5 * 0.25
geometric_transform = Compose([RandomApply([CropAndRescale(max_scale=0.2)], p=geometric_transform_prob),
                               RandomApply([HorizontalShear(max_scale=0.07)], p=geometric_transform_prob),
                               RandomApply([Rotation(max_angle=15)], p=geometric_transform_prob),
                               RandomApply([ElasticDeformation(max_distort=0.15)], p=geometric_transform_prob)])
brightness_transform_prob = 0.5 * 0.33
brightness_transform = Compose([RandomApply([BrightnessShift(max_value=0.1)], p=brightness_transform_prob),
                                RandomApply([BrightnessScaling(max_value=0.08)], p=brightness_transform_prob),
                                RandomApply([GammaChange(max_value=0.08)], p=brightness_transform_prob)])
Пример #8
0
    def run(self):

        for year_folder in self.year_folders:

            start_time = time.time()

            year_output_folder = self.make_output_folder(year_folder)
            config_list = self.get_config_folders(year_folder)

            for config in config_list:

                print("\n")
                print("Model", config["output folder"])

                for i in range(config["n reps"]):

                    print("Repetition", i)

                    # print("Repetition",i)
                    run_folder = os.path.join(config["training folder"],
                                              config["run folders"][i])
                    model = torch.load(os.path.join(run_folder, 'model.pt'),
                                       map_location=self.device)

                    # Update parameters for model
                    model.device = utils.choose_device()
                    model.datareader.device = utils.choose_device()
                    model.n_samples = max(self.samples)
                    model.log_samples = self.samples

                    config_output_full = os.path.join(year_output_folder,
                                                      config["output folder"])
                    if not os.path.exists(config_output_full):
                        os.mkdir(config_output_full)
                    output_folder = os.path.join(config_output_full,
                                                 config["run folders"][i])
                    if not os.path.exists(output_folder):
                        os.mkdir(output_folder)

                    if self.rps:
                        pr_gen = prg.Prediction_Generator(
                            model,
                            self.steps,
                            self.k,
                            self.batched_days,
                            self.samples,
                            rps=self.rps,
                            predictions=self.predictions,
                            binary_predictions=self.binary_predictions,
                            n_bins=self.n_bins,
                            debug=self.debug)

                        res, binary_res, trading_results = pr_gen.compute_predictions(
                        )

                        dataframe = self.result_to_dataframe(res)
                        dataframe.to_csv(output_folder + "/results.csv")

                        dataframe_binary = self.binary_results_to_dataframe(
                            binary_res)
                        dataframe_binary.to_csv(output_folder +
                                                "/binary_results.csv")

                    else:

                        pr_gen = prg.Prediction_Generator(model,
                                                          self.steps,
                                                          self.k,
                                                          self.batched_days,
                                                          self.samples,
                                                          rps=self.rps,
                                                          debug=self.debug)

                        trading_results = pr_gen.compute_predictions()

                    dataframe_trading = self.trading_results_to_dataframe(
                        trading_results)
                    dataframe_trading.to_csv(output_folder +
                                             "/trading_results.csv")

            # Compute mean time per config and rep, and round
            end_time = time.time()
            elapsed_time = round((end_time - start_time) /
                                 float(config["n reps"] * len(config_list)), 2)
            self.log_meta(elapsed_time, year_output_folder)
 def __init__(self, c):
     super(CustomAttackLoss, self).__init__()
     self.c = c
     self.device = utils.choose_device()