Exemplo n.º 1
0
class TensorBoard(Callback):

    # TODO: add option to write images; find fix for graph

    def __init__(self, log_dir, update_frequency = 10):
        super(Callback, self).__init__()
        self.log_dir = log_dir
        self.writer = None
        self.update_frequency = update_frequency

    def on_train_begin(self, **_):
        self.writer = SummaryWriter(os.path.join(self.log_dir, datetime.datetime.now().__str__()))
        rndm_input = torch.autograd.Variable(torch.rand(1, *self.model.input_shape), requires_grad = True).to(self.logger['device'])
        # fwd_pass = self.model(rndm_input)
        self.writer.add_graph(self.model, rndm_input)
        return self

    def on_epoch_end(self, **_):
        if (self.logger['epoch'] % self.update_frequency) == 0:
            epoch_metrics = self.logger['epoch_metrics'][self.logger['epoch']]
            for e_metric, e_metric_dct in epoch_metrics.iteritems():
                for e_metric_split, e_metric_val in e_metric_dct.iteritems():
                    self.writer.add_scalar('{}/{}'.format(e_metric_split, e_metric), e_metric_val, self.logger['epoch'])
            for name, param in self.model.named_parameters():
                self.writer.add_histogram(name.replace('.', '/'), param.clone().cpu().data.numpy(), self.logger['epoch'])
        return self

    def on_train_end(self, **_):
        return self.writer.close()
Exemplo n.º 2
0
class TensorBoardReporting(ReportingHook):
    """Log results to tensorboard.

    Writes tensorboard logs to a directory specified in the `mead-settings`
    section for tensorboard. Otherwise it defaults to `runs`.
    """
    def __init__(self, **kwargs):
        super(TensorBoardReporting, self).__init__(**kwargs)
        from tensorboardX import SummaryWriter
        # Base dir is often the dir created to save the model into
        base_dir = kwargs.get('base_dir', '.')
        log_dir = os.path.expanduser(kwargs.get('log_dir', 'runs'))
        if not os.path.isabs(log_dir):
            log_dir = os.path.join(base_dir, log_dir)
        # Run dir is the name of an individual run
        run_dir = kwargs.get('run_dir')
        pid = str(os.getpid())
        run_dir = '{}-{}'.format(run_dir, pid) if run_dir is not None else pid
        log_dir = os.path.join(log_dir, run_dir)
        flush_secs = int(kwargs.get('flush_secs', 2))
        self._log = SummaryWriter(log_dir, flush_secs=flush_secs)

    def step(self, metrics, tick, phase, tick_type=None, **kwargs):
        tick_type = ReportingHook._infer_tick_type(phase, tick_type)
        for metric in metrics.keys():
            name = "{}/{}/{}".format(phase, tick_type, metric)
            self._log.add_scalar(name, metrics[metric], tick)
Exemplo n.º 3
0
class TBVisualizer:
    def __init__(self, opt):
        self._opt = opt
        self._save_path = os.path.join(opt.checkpoints_dir, opt.name)

        self._log_path = os.path.join(self._save_path, 'loss_log2.txt')
        self._tb_path = os.path.join(self._save_path, 'summary.json')
        self._writer = SummaryWriter(self._save_path)

        with open(self._log_path, "a") as log_file:
            now = time.strftime("%c")
            log_file.write('================ Training Loss (%s) ================\n' % now)

    def __del__(self):
        self._writer.close()

    def display_current_results(self, visuals, it, is_train, save_visuals=False):
        for label, image_numpy in visuals.items():
            sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
            self._writer.add_image(sum_name, image_numpy, it)

            if save_visuals:
                util.save_image(image_numpy,
                                os.path.join(self._opt.checkpoints_dir, self._opt.name,
                                             'event_imgs', sum_name, '%08d.png' % it))

        self._writer.export_scalars_to_json(self._tb_path)

    def plot_scalars(self, scalars, it, is_train):
        for label, scalar in scalars.items():
            sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
            self._writer.add_scalar(sum_name, scalar, it)

    def print_current_train_errors(self, epoch, i, iters_per_epoch, errors, t, visuals_were_stored):
        log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
        visuals_info = "v" if visuals_were_stored else ""
        message = '%s (T%s, epoch: %d, it: %d/%d, t/smpl: %.3fs) ' % (log_time, visuals_info, epoch, i, iters_per_epoch, t)
        for k, v in errors.items():
            message += '%s:%.3f ' % (k, v)

        print(message)
        with open(self._log_path, "a") as log_file:
            log_file.write('%s\n' % message)

    def print_current_validate_errors(self, epoch, errors, t):
        log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
        message = '%s (V, epoch: %d, time_to_val: %ds) ' % (log_time, epoch, t)
        for k, v in errors.items():
            message += '%s:%.3f ' % (k, v)

        print(message)
        with open(self._log_path, "a") as log_file:
            log_file.write('%s\n' % message)

    def save_images(self, visuals):
        for label, image_numpy in visuals.items():
            image_name = '%s.png' % label
            save_path = os.path.join(self._save_path, "samples", image_name)
            util.save_image(image_numpy, save_path)
Exemplo n.º 4
0
    def log_to_tensorboard(self, writer: SummaryWriter, epoch: int) -> None:
        def none_to_zero(x: Optional[float]) -> float:
            if x is None or math.isnan(x):
                return 0.0
            return x

        for name, value in [
            ("Training/td_loss", self.get_recent_td_loss()),
            ("Training/reward_loss", self.get_recent_reward_loss()),
        ]:
            writer.add_scalar(name, none_to_zero(value), epoch)
    def train(self, epoch_to_restore=0):
        g = Generator(self.nb_channels_first_layer, self.dim)

        if epoch_to_restore > 0:
            filename_model = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch_to_restore))
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        g.cuda()
        g.train()

        dataset = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader = DataLoader(dataset, self.batch_size, shuffle=True, num_workers=4, pin_memory=True)
        fixed_dataloader = DataLoader(dataset, 16)
        fixed_batch = next(iter(fixed_dataloader))

        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer = SummaryWriter(self.dir_logs)

        try:
            epoch = epoch_to_restore
            while True:
                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1

                    for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                        g.zero_grad()
                        x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                        z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                        g_z = g.forward(z)

                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                    writer.add_scalar('train_loss', loss, epoch)

                z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                g.eval()
                g_z = g.forward(z)
                images = make_grid(g_z.data[:16], nrow=4, normalize=True)
                writer.add_image('generations', images, epoch)
                filename = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('[*] Closing Writer.')
            writer.close()
Exemplo n.º 6
0
 def test_writing_stack(self):
     with TemporaryDirectory() as tmp_dir1, TemporaryDirectory() as tmp_dir2:
         writer1 = SummaryWriter(tmp_dir1)
         writer1.add_scalar = MagicMock()
         writer2 = SummaryWriter(tmp_dir2)
         writer2.add_scalar = MagicMock()
         with summary_writer_context(writer1):
             with summary_writer_context(writer2):
                 SummaryWriterContext.add_scalar("test2", torch.ones(1))
             SummaryWriterContext.add_scalar("test1", torch.zeros(1))
         writer1.add_scalar.assert_called_once_with(
             "test1", torch.zeros(1), global_step=0
         )
         writer2.add_scalar.assert_called_once_with(
             "test2", torch.ones(1), global_step=0
         )
Exemplo n.º 7
0
 def test_swallowing_exception(self):
     with TemporaryDirectory() as tmp_dir:
         writer = SummaryWriter(tmp_dir)
         writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
         writer.exceptions_to_ignore = (NotImplementedError, KeyError)
         with summary_writer_context(writer):
             SummaryWriterContext.add_scalar("test", torch.ones(1))
Exemplo n.º 8
0
 def test_not_swallowing_exception(self):
     with TemporaryDirectory() as tmp_dir:
         writer = SummaryWriter(tmp_dir)
         writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
         with self.assertRaisesRegexp(
             NotImplementedError, "test"
         ), summary_writer_context(writer):
             SummaryWriterContext.add_scalar("test", torch.ones(1))
Exemplo n.º 9
0
 def test_writing(self):
     with TemporaryDirectory() as tmp_dir:
         writer = SummaryWriter(tmp_dir)
         writer.add_scalar = MagicMock()
         with summary_writer_context(writer):
             SummaryWriterContext.add_scalar("test", torch.ones(1))
         writer.add_scalar.assert_called_once_with(
             "test", torch.ones(1), global_step=0
         )
Exemplo n.º 10
0
    def log_to_tensorboard(self, writer: SummaryWriter, epoch: int) -> None:
        def none_to_zero(x: Optional[float]) -> float:
            if x is None or math.isnan(x):
                return 0.0
            return x

        for name, value in [
            ("Reward_CPE/Direct Method Reward", self.direct_method.normalized),
            ("Reward_CPE/IPS Reward", self.inverse_propensity.normalized),
            ("Reward_CPE/Doubly Robust Reward", self.doubly_robust.normalized),
            (
                "Value_CPE/Sequential Doubly Robust",
                self.sequential_doubly_robust.normalized,
            ),
            (
                "Value_CPE/Weighted Doubly Robust",
                self.weighted_doubly_robust.normalized,
            ),
            ("Value_CPE/MAGIC Estimator", self.magic.normalized),
        ]:
            writer.add_scalar(name, none_to_zero(value), epoch)
Exemplo n.º 11
0
 def test_global_step(self):
     with TemporaryDirectory() as tmp_dir:
         writer = SummaryWriter(tmp_dir)
         writer.add_scalar = MagicMock()
         with summary_writer_context(writer):
             SummaryWriterContext.add_scalar("test", torch.ones(1))
             SummaryWriterContext.increase_global_step()
             SummaryWriterContext.add_scalar("test", torch.zeros(1))
         writer.add_scalar.assert_has_calls(
             [
                 call("test", torch.ones(1), global_step=0),
                 call("test", torch.zeros(1), global_step=1),
             ]
         )
         self.assertEqual(2, len(writer.add_scalar.mock_calls))
Exemplo n.º 12
0
def learn(learning_rate, iterations, x, y, validation=None, stop_early=False, run_comment=''):
    # Define a neural network using high-level modules.
    writer = SummaryWriter(comment=run_comment)
    model = Sequential(
        Linear(len(x[0]), len(y[0]), bias=True)  # n inputs -> 1 output
    )
    loss_fn = BCEWithLogitsLoss(reduction='sum')  # reduction=mean converges slower.
    # TODO: Add an option to twiddle pos_weight, which lets us trade off precision and recall. Maybe also graph using add_pr_curve(), which can show how that tradeoff is going.
    optimizer = Adam(model.parameters(),lr=learning_rate)

    if validation:
        validation_ins, validation_outs = validation
        previous_validation_loss = None
    with progressbar(range(iterations)) as bar:
        for t in bar:
            y_pred = model(x)  # Make predictions.
            loss = loss_fn(y_pred, y)
            writer.add_scalar('loss', loss, t)
            if validation:
                validation_loss = loss_fn(model(validation_ins), validation_outs)
                if stop_early:
                    if previous_validation_loss is not None and previous_validation_loss < validation_loss:
                        print('Stopping early at iteration {t} because validation error rose.'.format(t=t))
                        model.load_state_dict(previous_model)
                        break
                    else:
                        previous_validation_loss = validation_loss
                        previous_model = model.state_dict()
                writer.add_scalar('validation_loss', validation_loss, t)
            writer.add_scalar('training_accuracy_per_tag', accuracy_per_tag(model, x, y), t)
            optimizer.zero_grad()  # Zero the gradients.
            loss.backward()  # Compute gradients.
            optimizer.step()

    # Horizontal axis is what confidence. Vertical is how many samples were that confidence.
    writer.add_histogram('confidence', confidences(model, x), t)
    writer.close()
    return model
Exemplo n.º 13
0
class TensorBoard(Callback):
    def __init__(self, logdir):
        super().__init__()
        self.logdir = logdir
        self.writer = None

    def on_train_begin(self):
        os.makedirs(self.logdir, exist_ok=True)
        self.writer = SummaryWriter(self.logdir)

    def on_epoch_end(self, epoch):
        for k, v in self.metrics_collection.train_metrics.items():
            self.writer.add_scalar('train/{}'.format(k), float(v), global_step=epoch)

        for k, v in self.metrics_collection.val_metrics.items():
            self.writer.add_scalar('val/{}'.format(k), float(v), global_step=epoch)

        for idx, param_group in enumerate(self.estimator.optimizer.param_groups):
            lr = param_group['lr']
            self.writer.add_scalar('group{}/lr'.format(idx), float(lr), global_step=epoch)

    def on_train_end(self):
        self.writer.close()
    nets = [
        Net(env.observation_space.shape[0], env.action_space.n)
        for _ in range(POPULATION_SIZE)
    ]
    population = [
        (net, evaluate(env, net))
        for net in nets
    ]
    while True:
        population.sort(key=lambda p: p[1], reverse=True)
        rewards = [p[1] for p in population[:PARENTS_COUNT]]
        reward_mean = np.mean(rewards)
        reward_max = np.max(rewards)
        reward_std = np.std(rewards)

        writer.add_scalar("reward_mean", reward_mean, gen_idx)
        writer.add_scalar("reward_std", reward_std, gen_idx)
        writer.add_scalar("reward_max", reward_max, gen_idx)
        print("%d: reward_mean=%.2f, reward_max=%.2f, reward_std=%.2f" % (
            gen_idx, reward_mean, reward_max, reward_std))
        if reward_mean > 199:
            print("Solved in %d steps" % gen_idx)
            break

        # generate next population
        prev_population = population
        population = [population[0]]
        for _ in range(POPULATION_SIZE-1):
            parent_idx = np.random.randint(0, PARENTS_COUNT)
            parent = prev_population[parent_idx][0]
            net = mutate_parent(parent)
Exemplo n.º 15
0
        Dsr = D(sr)

        target_real = torch.ones((sr.shape[0], 1),
                                 dtype=torch.float).to(device)
        target_fake = torch.zeros((sr.shape[0], 1),
                                  dtype=torch.float).to(device)

        loss_D = criterion_CE(Dsr, target_fake)
        loss_D += criterion_CE(Dhr, target_real)
        # loss_D = -torch.mean(torch.log(Dsr + eps)) - torch.mean(torch.log(1-Dhr+eps))

        optimizerD.zero_grad()
        loss_D.backward()
        optimizerD.step()

        summary.add_scalar(f'loss D/loss D',
                           loss_D.data.cpu().numpy(), iter_count)

        sr = G(lr)
        nm = NMD(sr)
        Dhr = D(hr)
        Dsr = D(sr)
        loss_recon = criterion_L1(sr, hr)
        # loss_recon = torch.mean(torch.abs(hr - sr))
        loss_natural = criterion_CE(nm, target_real)
        # loss_natural = torch.mean(-torch.log(nm + eps))

        loss_G = criterion_CE(Dhr, target_fake)
        loss_G += criterion_CE(Dsr, target_real)
        # loss_G = -torch.mean(torch.log(Dhr + eps)) - torch.mean(torch.log(1-Dsr+eps))

        lambda_1 = 1
Exemplo n.º 16
0
class Processor():
    """ 
        Processor for Skeleton-based Action Recgnition
    """
    def __init__(self, arg):
        self.arg = arg
        self.save_arg()
        if arg.phase == 'train':
            if not arg.train_feeder_args['debug']:
                if os.path.isdir(arg.model_saved_name):
                    print('log_dir: ', arg.model_saved_name, 'already exist')
                    answer = input('delete it? y/n:')
                    if answer == 'y':
                        shutil.rmtree(arg.model_saved_name)
                        print('Dir removed: ', arg.model_saved_name)
                        input(
                            'Refresh the website of tensorboard by pressing any keys'
                        )
                    else:
                        print('Dir not removed: ', arg.model_saved_name)
                self.train_writer = SummaryWriter(
                    os.path.join(arg.model_saved_name, 'train'), 'train')
                self.val_writer = SummaryWriter(
                    os.path.join(arg.model_saved_name, 'val'), 'val')
            else:
                self.train_writer = self.val_writer = SummaryWriter(
                    os.path.join(arg.model_saved_name, 'test'), 'test')
        self.global_step = 0
        self.load_model()
        self.load_optimizer()
        self.load_data()
        self.lr = self.arg.base_lr
        self.best_acc = 0

    def load_data(self):
        Feeder = import_class(self.arg.feeder)
        self.data_loader = dict()
        if self.arg.phase == 'train':
            self.data_loader['train'] = torch.utils.data.DataLoader(
                dataset=Feeder(**self.arg.train_feeder_args),
                batch_size=self.arg.batch_size,
                shuffle=True,
                num_workers=self.arg.num_worker,
                drop_last=True,
                worker_init_fn=init_seed)
        self.data_loader['test'] = torch.utils.data.DataLoader(
            dataset=Feeder(**self.arg.test_feeder_args),
            batch_size=self.arg.test_batch_size,
            shuffle=False,
            num_workers=self.arg.num_worker,
            drop_last=False,
            worker_init_fn=init_seed)

    def load_model(self):
        output_device = self.arg.device[0] if type(
            self.arg.device) is list else self.arg.device
        self.output_device = output_device
        Model = import_class(self.arg.model)
        shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
        print(Model)

        self.model = Model(**self.arg.model_args).cuda(output_device)
        #self.Archi = Architect(self.model)

        #for name, param in self.model.named_parameters():
        #print(name)
        print(self.model)
        self.loss = nn.CrossEntropyLoss().cuda(
            output_device)  # Criterion is here

        if self.arg.weights:
            self.global_step = int(arg.weights[:-3].split('-')[-1])
            self.print_log('Load weights from {}.'.format(self.arg.weights))
            if '.pkl' in self.arg.weights:
                with open(self.arg.weights, 'r') as f:
                    weights = pickle.load(f)
            else:
                weights = torch.load(self.arg.weights)

            weights = OrderedDict(
                [[k.split('module.')[-1],
                  v.cuda(output_device)] for k, v in weights.items()])

            for w in self.arg.ignore_weights:
                if weights.pop(w, None) is not None:
                    self.print_log('Sucessfully Remove Weights: {}.'.format(w))
                else:
                    self.print_log('Can Not Remove Weights: {}.'.format(w))

            try:
                self.model.load_state_dict(weights)
            except:
                state = self.model.state_dict()
                diff = list(set(state.keys()).difference(set(weights.keys())))
                print('Can not find these weights:')
                for d in diff:
                    print('  ' + d)
                state.update(weights)
                self.model.load_state_dict(state)

        if type(self.arg.device) is list:
            if len(self.arg.device) > 1:
                self.model = nn.DataParallel(self.model,
                                             device_ids=self.arg.device,
                                             output_device=output_device)

    def load_optimizer(self):
        if self.arg.optimizer == 'SGD':
            self.optimizer = optim.SGD(list(self.model.parameters())[1:],
                                       lr=self.arg.base_lr,
                                       momentum=0.9,
                                       nesterov=self.arg.nesterov,
                                       weight_decay=self.arg.weight_decay)
        elif self.arg.optimizer == 'Adam':
            self.optimizer = optim.Adam(list(self.model.parameters())[1:],
                                        lr=self.arg.base_lr,
                                        weight_decay=self.arg.weight_decay)
        else:
            raise ValueError()

        self.lr_scheduler = ReduceLROnPlateau(self.optimizer,
                                              mode='min',
                                              factor=0.1,
                                              patience=10,
                                              verbose=True,
                                              threshold=1e-4,
                                              threshold_mode='rel',
                                              cooldown=0)

    def save_arg(self):
        # save arg
        arg_dict = vars(self.arg)
        if not os.path.exists(self.arg.work_dir):
            os.makedirs(self.arg.work_dir)
        with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
            yaml.dump(arg_dict, f)

    def adjust_learning_rate(self, epoch):
        if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
            if epoch < self.arg.warm_up_epoch:
                lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
            else:
                lr = self.arg.base_lr * (0.1**np.sum(
                    epoch >= np.array(self.arg.step)))
            for param_group in self.optimizer.param_groups:
                param_group['lr'] = lr
            return lr
        else:
            raise ValueError()

    def print_time(self):
        localtime = time.asctime(time.localtime(time.time()))
        self.print_log("Local current time :  " + localtime)

    def print_log(self, str, print_time=True):
        if print_time:
            localtime = time.asctime(time.localtime(time.time()))
            str = "[ " + localtime + ' ] ' + str
        print(str)
        if self.arg.print_log:
            with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f:
                print(str, file=f)

    def record_time(self):
        self.cur_time = time.time()
        return self.cur_time

    def split_time(self):
        split_time = time.time() - self.cur_time
        self.record_time()
        return split_time

    def train(self,
              epoch,
              sampler,
              es,
              pop_size=50,
              old_es_params=[],
              save_model=False):

        # Sample a group of smaples
        es_params, n_reused, idx_reused = sampler.ask(pop_size, old_es_params)
        print('es shape {}'.format(es_params.shape))

        weights = torch.ones(10, 8) * 0.125
        if epoch > 10:
            # Sample one from current Distribution and use it to train GCN
            sample = es.ask(1).reshape(-1)
            es_param = sample.reshape(10, -1)  # For 10 layers
            weights = torch.from_numpy(es_param).float().cuda()

        weights = F.softmax(weights, dim=-1)
        self.model.train()

        self.print_log('Training epoch: {}'.format(epoch + 1))

        loader = self.data_loader['train']
        self.adjust_learning_rate(epoch)
        # for name, param in self.model.named_parameters():
        #     self.train_writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch)
        loss_value = []
        self.train_writer.add_scalar('epoch', epoch, self.global_step)
        self.record_time()
        timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
        process = tqdm(loader)
        if self.arg.only_train_part:
            if epoch > self.arg.only_train_epoch:
                print('only train part, require grad')
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = True
                        # print(key + '-require grad')
            else:
                print('only train part, do not require grad')
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = False
                        # print(key + '-not require grad')
        for batch_idx, (data, label, index) in enumerate(process):
            self.global_step += 1
            # get data
            data = Variable(data.float().cuda(self.output_device),
                            requires_grad=False)
            label = Variable(label.long().cuda(self.output_device),
                             requires_grad=False)
            timer['dataloader'] += self.split_time()

            # forward
            output = self.model(data, weights)
            # if batch_idx == 0 and epoch == 0:
            #     self.train_writer.add_graph(self.model, output)
            if isinstance(output, tuple):
                output, l1 = output
                l1 = l1.mean()
            else:
                l1 = 0
            loss = self.loss(output, label) + l1

            # backward
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            loss_value.append(loss.item())
            timer['model'] += self.split_time()

            value, predict_label = torch.max(output.data, 1)
            acc = torch.mean((predict_label == label.data).float())
            self.train_writer.add_scalar('acc', acc, self.global_step)
            self.train_writer.add_scalar('loss', loss.item(), self.global_step)
            self.train_writer.add_scalar('loss_l1', l1, self.global_step)
            # self.train_writer.add_scalar('batch_time', process.iterable.last_duration, self.global_step)

            # statistics
            self.lr = self.optimizer.param_groups[0]['lr']
            self.train_writer.add_scalar('lr', self.lr, self.global_step)
            # if self.global_step % self.arg.log_interval == 0:
            #     self.print_log(
            #         '\tBatch({}/{}) done. Loss: {:.4f}  lr:{:.6f}'.format(
            #             batch_idx, len(loader), loss.item(), lr))
            timer['statistics'] += self.split_time()

        # statistics of time consumption and loss
        proportion = {
            k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values()))))
            for k, v in timer.items()
        }
        self.print_log('\tMean training loss: {:.4f}.'.format(
            np.mean(loss_value)))
        self.print_log(
            '\tTime consumption: [Data]{dataloader}, [Network]{model}'.format(
                **proportion))

        if save_model:
            state_dict = self.model.state_dict()
            weights = OrderedDict([[k.split('module.')[-1],
                                    v.cpu()] for k, v in state_dict.items()])

            torch.save(
                weights, self.arg.model_saved_name + '-' + str(epoch) + '-' +
                str(int(self.global_step)) + '.pt')

        # Update the distribution afternon initial the networks.
        if epoch > 10:
            scores = np.zeros(50)
            for j in range(50):
                es_param = es_params[j]
                es_param = es_param.reshape(10, -1)
                weights = torch.from_numpy(es_param).float().cuda()
                weights = F.softmax(weights, dim=-1)

                scores[j] = self.eval(epoch,
                                      weights,
                                      save_score=self.arg.save_score,
                                      loader_name=['test'])
                self.print_log('Current Archi: {}'.format(weights))
                self.print_log('Its Performance: {}'.format(scores[j]))

            es.tell(es_params, scores)
            old_es_params = deepcopy(es_params)

    def eval(self,
             epoch,
             weights,
             save_score=False,
             loader_name=['test'],
             wrong_file=None,
             result_file=None):
        if wrong_file is not None:
            f_w = open(wrong_file, 'w')
        if result_file is not None:
            f_r = open(result_file, 'w')
        self.model.eval()
        self.print_log('Eval epoch: {}'.format(epoch + 1))
        for ln in loader_name:
            loss_value = []
            score_frag = []
            right_num_total = 0
            total_num = 0
            loss_total = 0
            step = 0
            process = tqdm(self.data_loader[ln])
            for batch_idx, (data, label, index) in enumerate(process):
                data = Variable(data.float().cuda(self.output_device),
                                requires_grad=False,
                                volatile=True)
                label = Variable(label.long().cuda(self.output_device),
                                 requires_grad=False,
                                 volatile=True)
                output = self.model(data, weights)
                if isinstance(output, tuple):
                    output, l1 = output
                    l1 = l1.mean()
                else:
                    l1 = 0
                loss = self.loss(output, label)
                score_frag.append(output.data.cpu().numpy())
                loss_value.append(loss.item())

                _, predict_label = torch.max(output.data, 1)
                step += 1

                if wrong_file is not None or result_file is not None:
                    predict = list(predict_label.cpu().numpy())
                    true = list(label.data.cpu().numpy())
                    for i, x in enumerate(predict):
                        if result_file is not None:
                            f_r.write(str(x) + ',' + str(true[i]) + '\n')
                        if x != true[i] and wrong_file is not None:
                            f_w.write(
                                str(index[i]) + ',' + str(x) + ',' +
                                str(true[i]) + '\n')
            score = np.concatenate(score_frag)
            loss = np.mean(loss_value)
            accuracy = self.data_loader[ln].dataset.top_k(
                score, 1)  # the top1 accuracy
            if accuracy > self.best_acc:
                self.best_acc = accuracy
            # self.lr_scheduler.step(loss)
            print('Accuracy: ', accuracy, ' model: ',
                  self.arg.model_saved_name)
            if self.arg.phase == 'train':
                self.val_writer.add_scalar('loss', loss, self.global_step)
                self.val_writer.add_scalar('loss_l1', l1, self.global_step)
                self.val_writer.add_scalar('acc', accuracy, self.global_step)

            score_dict = dict(
                zip(self.data_loader[ln].dataset.sample_name, score))
            self.print_log('\tMean {} loss of {} batches: {}.'.format(
                ln, len(self.data_loader[ln]), np.mean(loss_value)))
            for k in self.arg.show_topk:
                self.print_log('\tTop{}: {:.2f}%'.format(
                    k, 100 * self.data_loader[ln].dataset.top_k(score, k)))

            if save_score:
                with open(
                        '{}/epoch{}_{}_score.pkl'.format(
                            self.arg.work_dir, epoch + 1, ln), 'wb') as f:
                    pickle.dump(score_dict, f)
            return accuracy

    def start(self):
        # CEM

        n_layers = 10
        n_ops = 8
        param_size = int(n_layers * n_ops)

        es_params = []
        old_es_params = []
        pop_size = 50
        scores = [0.] * pop_size

        weights = torch.ones(n_layers, n_ops) * 0.125
        params = F.softmax(weights, dim=-1).detach()
        params = params.view(params.numel()).cpu().numpy()  # to numpy
        old_es_params = params

        es = sepCEM(param_size,
                    mu_init=params,
                    sigma_init=1e-3,
                    damp=1e-3,
                    damp_limit=1e-5,
                    pop_size=pop_size,
                    parents=pop_size // 2)
        sampler = IMSampler(es)

        if self.arg.phase == 'train':
            self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
            self.global_step = self.arg.start_epoch * len(
                self.data_loader['train']) / self.arg.batch_size
            for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
                if self.lr < 1e-3:
                    break
                save_model = ((epoch + 1) % self.arg.save_interval
                              == 0) or (epoch + 1 == self.arg.num_epoch)

                self.train(epoch,
                           sampler,
                           es,
                           pop_size,
                           old_es_params,
                           save_model=save_model)


#                self.eval(
#                    epoch,
#                    save_score=self.arg.save_score,
#                    loader_name=['test'])

            print('best accuracy: ', self.best_acc, ' model_name: ',
                  self.arg.model_saved_name)

        elif self.arg.phase == 'test':
            if not self.arg.test_feeder_args['debug']:
                wf = self.arg.model_saved_name + '_wrong.txt'
                rf = self.arg.model_saved_name + '_right.txt'
            else:
                wf = rf = None
            if self.arg.weights is None:
                raise ValueError('Please appoint --weights.')
            self.arg.print_log = False
            self.print_log('Model:   {}.'.format(self.arg.model))
            self.print_log('Weights: {}.'.format(self.arg.weights))
            self.eval(epoch=0,
                      save_score=self.arg.save_score,
                      loader_name=['test'],
                      wrong_file=wf,
                      result_file=rf)
            self.print_log('Done.\n')
Exemplo n.º 17
0
            output = upsample(output,
                              size=(450, 450),
                              mode='bilinear',
                              align_corners=True)

            # Compute the losses, side outputs and fuse
            loss = class_balanced_cross_entropy_loss(output,
                                                     gts,
                                                     size_average=False,
                                                     batch_average=True)
            running_loss_tr += loss.item()

            # Print stuff
            if ii % num_img_tr == num_img_tr - 1:
                running_loss_tr = running_loss_tr / num_img_tr
                writer.add_scalar('data/total_loss_epoch', running_loss_tr,
                                  epoch)
                print('[Epoch: %d, numImages: %5d]' %
                      (epoch, ii * p['trainBatch'] + inputs.data.shape[0]))
                print('Loss: %f' % running_loss_tr)
                running_loss_tr = 0
                stop_time = timeit.default_timer()
                print("Execution time: " + str(stop_time - start_time) + "\n")

            # Backward the averaged gradient
            loss /= p['nAveGrad']
            loss.backward()
            aveGrad += 1

            # Update the weights once in p['nAveGrad'] forward passes
            if aveGrad % p['nAveGrad'] == 0:
                writer.add_scalar('data/total_loss_iter', loss.item(),
Exemplo n.º 18
0
def train(config):
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    train_dataset = create_mnist_dataset(config.dataset_path,
                                         config.batch_size, True)
    eval_dataset = create_mnist_dataset(config.dataset_path, config.batch_size,
                                        False)

    net = VAE().to(device)
    full_model_path = config.model_path + config.model_name
    if config.load_pretrained_model:
        net.load_state_dict(torch.load(full_model_path))
        print('Load the pretrained model from %s successfully!' %
              full_model_path)
    else:
        weight_init(net)
        if not os.path.exists(config.model_path):
            os.makedirs(config.model_path)
        print('First time training!')
    net.train()

    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=config.learning_rate,
                                 betas=[0.5, 0.999])

    summary = SummaryWriter(config.summary_path)
    total_iter = 1

    for e in range(1, config.epoch + 1):
        for idx, (x, _) in enumerate(train_dataset):
            x = x.to(device).view(-1, 784)
            optimizer.zero_grad()
            recon_x, mu, logvar = net(x)
            loss = loss_func(recon_x, x, mu, logvar)
            loss.backward()
            optimizer.step()

            print('[Epoch %d|Train Batch %d] Loss = %.6f' %
                  (e, idx, loss.item()))
            summary.add_scalar('Train/Loss', loss.item(), total_iter)
            total_iter += 1

        if e % 5 == 0:
            net.eval()
            eval_losses = []
            with torch.no_grad():
                for idx, (x, _) in enumerate(eval_dataset):
                    x = x.to(device).view(-1, 784)
                    recon_x, mu, logvar = net(x)
                    loss = loss_func(recon_x, x, mu, logvar)

                    print('[Epoch %d|Eval Batch %d] Loss = %.6f' %
                          (e, idx, loss.item()))
                    eval_losses.append(loss.item())

                mean_eval_loss = np.mean(eval_losses)
                summary.add_scalar('Eval/Loss', mean_eval_loss, e)

            net.train()

        if e % 5 == 0:
            with torch.no_grad():
                fake_z = torch.randn((64, net.nz)).to(device)
                fake_imgs = net.decode(fake_z).view(-1, 1, 28, 28).detach()
                fake_imgs = torchvision.utils.make_grid(
                    fake_imgs, padding=2,
                    normalize=True).detach().cpu().numpy()
                summary.add_image('Eval/Fake_imgs_after_%d_epochs' % e,
                                  fake_imgs, e)

        if e % 2 == 0:
            torch.save(net.state_dict(), full_model_path)

    summary.close()
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    print('state size:', num_inputs)
    print('action size:', num_actions)

    online_net = DuelDQNet(num_inputs, num_actions)
    target_net = DuelDQNet(num_inputs, num_actions)
    update_target_model(online_net, target_net)

    optimizer = optim.Adam(online_net.parameters(), lr=lr)
    writer = SummaryWriter('logs')

    online_net.to(device)
    target_net.to(device)
    online_net.train()
    target_net.train()
    memory = Memory(replay_memory_capacity)
    running_score = 0
    epsilon = 1.0
    steps = 0
    loss = 0

    for e in range(3000):
        done = False

        score = 0
        state = env.reset()
        state = torch.Tensor(state).to(device)
        state = state.unsqueeze(0)

        while not done:
            steps += 1
            action = get_action(state, target_net, epsilon, env)
            next_state, reward, done, _ = env.step(action)

            next_state = torch.Tensor(next_state)
            next_state = next_state.unsqueeze(0)

            mask = 0 if done else 1
            reward = reward if not done or score == 499 else -1
            action_one_hot = np.zeros(2)
            action_one_hot[action] = 1
            memory.push(state, next_state, action_one_hot, reward, mask)

            score += reward
            state = next_state

            if steps > initial_exploration:
                epsilon -= 0.00005
                epsilon = max(epsilon, 0.1)

                batch = memory.sample(batch_size)
                loss = DuelDQNet.train_model(online_net, target_net, optimizer,
                                             batch)

                if steps % update_target == 0:
                    update_target_model(online_net, target_net)

        score = score if score == 500.0 else score + 1
        running_score = 0.99 * running_score + 0.01 * score
        if e % log_interval == 0:
            print('{} episode | score: {:.2f} | epsilon: {:.2f}'.format(
                e, running_score, epsilon))
            writer.add_scalar('log/score', float(running_score), e)
            writer.add_scalar('log/loss', float(loss), e)

        if running_score > goal_score:
            break
Exemplo n.º 20
0
        if (ii + 1) % opt.d_every == 0:
            optimizer_d.zero_grad()
            output = netd(real_img)
            error_d_real = criterion(output, true_labels)
            error_d_real.backward()
            noises.data.copy_(torch.randn(opt.batch_size, opt.nz, 1, 1))

            fake_img = netg(noises).detach()
            fake_output = netd(fake_img)
            error_d_fake = criterion(fake_output, fake_labels)
            error_d_fake.backward()

            error = error_d_real + error_d_fake

            print('error_d:', error.data[0])
            writer.add_scalar('data/error_d', error_d_fake.data[0], ii)

            optimizer_d.step()

        if (ii + 1) % opt.g_every == 0:
            optimizer_g.zero_grad()

            noises.data.copy_(torch.randn(opt.batch_size, opt.nz, 1, 1))
            fake_img = netg(noises)
            fake_output = netd(fake_img)
            error_g = criterion(fake_output, true_labels)

            print('error_g:,', error_g.data[0])
            writer.add_scalar('data/error_g', error_g.data[0], ii)

            error_g.backward()
Exemplo n.º 21
0
def train_spn():
    parser = argparse.ArgumentParser(
        description='PyTorch Style Transfer -- LinearStyleTransferWithSPN')

    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to configuration file')

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)

    cfg.freeze()

    # create output dir
    if cfg.OUTPUT_DIR:
        os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    # create logger
    logger = setup_logger(cfg.MODEL.NAME,
                          save_dir=cfg.OUTPUT_DIR,
                          filename=cfg.MODEL.NAME + '.txt')

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    logger.info("Using {} GPUs".format(num_gpus))

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + get_pretty_env_info())

    logger.info('Loaded configuration file {}'.format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    # create model
    model = get_model(cfg.MODEL.NAME, cfg)

    # push model to device
    model.to(cfg.DEVICE)

    logger.info(model)

    # create dataloader
    train_path_content, train_path_style = get_data(cfg, dtype='train')
    content_dataset = DatasetNoSeg(cfg, train_path_content, train=True)

    # content loader
    sampler = torch.utils.data.sampler.RandomSampler(content_dataset)
    batch_sampler = torch.utils.data.sampler.BatchSampler(
        sampler, cfg.DATALOADER.BATCH_SIZE, drop_last=False)
    content_loader = DataLoader(content_dataset,
                                batch_sampler=IterationBasedBatchSampler(
                                    batch_sampler,
                                    cfg.OPTIMIZER.MAX_ITER,
                                    start_iter=0),
                                num_workers=cfg.DATALOADER.NUM_WORKERS)
    logger.info('Content Loader Created!')

    content_loader = iter(content_loader)

    optimizer = build_optimizer(cfg, model.SPN)
    lr_scheduler = build_lr_scheduler(cfg, optimizer)
    logger.info("Using Optimizer: ")
    logger.info(optimizer)
    logger.info("Using LR Scheduler: {}".format(
        cfg.OPTIMIZER.LR_SCHEDULER.NAME))

    iterator = tqdm(range(cfg.OPTIMIZER.MAX_ITER))

    writer = SummaryWriter(log_dir=cfg.OUTPUT_DIR)
    # start training
    for i in iterator:
        content_img = next(content_loader).to(cfg.DEVICE)

        # reconstruct image - auto encoder only
        reconstructed = model.forward_with_no_trans(content_img)
        # fix distortion with SPN
        propogated = model.forward_spn(reconstructed, content_img)

        loss = model.cal_spn_loss(propogated, content_img)

        # update info
        iterator.set_description(
            desc='Iteration: {} -- Loss: {:.3f}'.format(i + 1, loss.item()))
        writer.add_scalar('loss', loss.item(), i + 1)

        # update model
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # update lr
        lr_scheduler.step()

        # save image
        if i % 1000 == 0:
            n = content_img.shape[0]
            all_imgs = torch.cat((reconstructed, content_img), dim=0)
            save_image(all_imgs,
                       os.path.join(cfg.OUTPUT_DIR, '{}.jpg'.format(i)),
                       nrow=n)

        if i % 10000 == 0:
            torch.save(
                model.SPN.state_dict(),
                os.path.join(cfg.OUTPUT_DIR, '{}_lst_spn.pth'.format(i)))

    torch.save(model.SPN.state_dict(),
               os.path.join(cfg.OUTPUT_DIR, 'final_lst_spn.pth'))
    writer.close()
Exemplo n.º 22
0
def train(opt):
    params = Params(f'projects/{opt.project}.yml')

    if params.num_gpus == 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    if torch.cuda.is_available():
        torch.cuda.manual_seed(42)
    else:
        torch.manual_seed(42)

    opt.saved_path = opt.saved_path + f'/{params.project_name}/'
    opt.log_path = opt.log_path + f'/{params.project_name}/tensorboard/'
    os.makedirs(opt.log_path, exist_ok=True)
    os.makedirs(opt.saved_path, exist_ok=True)

    training_params = {'batch_size': opt.batch_size,
                       'shuffle': True,
                       'drop_last': True,
                       'collate_fn': collater,
                       'num_workers': opt.num_workers}

    val_params = {'batch_size': opt.batch_size,
                  'shuffle': False,
                  'drop_last': True,
                  'collate_fn': collater,
                  'num_workers': opt.num_workers}

    input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]

    if params.project_name == 'coco' or params.project_name == 'shape':
        training_set = CocoDataset(root_dir=os.path.join(opt.data_path, params.project_name), set=params.train_set,
            transform=transforms.Compose([Normalizer(mean=params.mean, std=params.std), Augmenter(), Resizer(input_sizes[opt.compound_coef])]))

        val_set = CocoDataset(root_dir=os.path.join(opt.data_path, params.project_name), set=params.val_set,
            transform=transforms.Compose([Normalizer(mean=params.mean, std=params.std), Resizer(input_sizes[opt.compound_coef])]))
    else:
        training_set = KITTIDataset(data_path=params.train_data_path, class_list = params.obj_list,
            transform=transforms.Compose([Normalizer(mean=params.mean, std=params.std), Augmenter(), Resizer(input_sizes[opt.compound_coef])]))

        val_set = KITTIDataset(data_path=params.val_data_path, class_list = params.obj_list,
            transform=transforms.Compose([Normalizer(mean=params.mean, std=params.std), Resizer(input_sizes[opt.compound_coef])]))

    training_generator = DataLoader(training_set, **training_params)
    val_generator = DataLoader(val_set, **val_params)

    model = EfficientDetBackbone(num_classes=len(params.obj_list), compound_coef=opt.compound_coef,
        ratios=eval(params.anchors_ratios), scales=eval(params.anchors_scales))

    # load last weights
    if opt.load_weights is not None:
        if opt.load_weights.endswith('.pth'):
            weights_path = opt.load_weights
        else:
            weights_path = get_last_weights(opt.saved_path)
        try:
            last_step = int(os.path.basename(weights_path).split('_')[-1].split('.')[0])
        except:
            last_step = 0

        try:
            ret = model.load_state_dict(torch.load(weights_path), strict=False)
        except RuntimeError as e:
            print(f'[Warning] Ignoring {e}')
            print(
                '[Warning] Don\'t panic if you see this, this might be because you load a pretrained weights with different number of classes. The rest of the weights should be loaded already.')

        print(f'[Info] loaded weights: {os.path.basename(weights_path)}, resuming checkpoint from step: {last_step}')
    else:
        last_step = 0
        print('[Info] initializing weights...')
        init_weights(model)

    # freeze backbone if train head_only
    if opt.head_only:
        def freeze_backbone(m):
            classname = m.__class__.__name__
            for ntl in ['EfficientNet', 'BiFPN']:
                if ntl in classname:
                    for param in m.parameters():
                        param.requires_grad = False

        model.apply(freeze_backbone)
        print('[Info] freezed backbone')

    # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
    # apply sync_bn when using multiple gpu and batch_size per gpu is lower than 4
    #  useful when gpu memory is limited.
    # because when bn is disable, the training will be very unstable or slow to converge,
    # apply sync_bn can solve it,
    # by packing all mini-batch across all gpus as one batch and normalize, then send it back to all gpus.
    # but it would also slow down the training by a little bit.
    if params.num_gpus > 1 and opt.batch_size // params.num_gpus < 4:
        model.apply(replace_w_sync_bn)
        use_sync_bn = True
    else:
        use_sync_bn = False

    writer = SummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}/')

    # warp the model with loss function, to reduce the memory usage on gpu0 and speedup
    model = ModelWithLoss(model, debug=opt.debug)

    if params.num_gpus > 0:
        model = model.cuda()
        if params.num_gpus > 1:
            model = CustomDataParallel(model, params.num_gpus)
            if use_sync_bn:
                patch_replication_callback(model)

    if opt.optim == 'adamw':
        optimizer = torch.optim.AdamW(model.parameters(), opt.lr)
    else:
        optimizer = torch.optim.SGD(model.parameters(), opt.lr, momentum=0.9, nesterov=True)

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)

    epoch = 0
    best_loss = 1e5
    best_epoch = 0
    step = max(0, last_step)
    model.train()

    num_iter_per_epoch = len(training_generator)

    try:
        for epoch in range(opt.num_epochs):
            last_epoch = step // num_iter_per_epoch
            if epoch < last_epoch:
                continue

            epoch_loss = []
            progress_bar = tqdm(training_generator)
            for iter, data in enumerate(progress_bar):
                if iter < step - last_epoch * num_iter_per_epoch:
                    progress_bar.update()
                    continue
                try:
                    imgs = data['img']
                    annot = data['annot']

                    if params.num_gpus == 1:
                        # if only one gpu, just send it to cuda:0
                        # elif multiple gpus, send it to multiple gpus in CustomDataParallel, not here
                        imgs = imgs.cuda()
                        annot = annot.cuda()

                    optimizer.zero_grad()
                    cls_loss, reg_loss = model(imgs, annot, obj_list=params.obj_list)
                    cls_loss = cls_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss = cls_loss + reg_loss
                    if loss == 0 or not torch.isfinite(loss):
                        continue

                    loss.backward()
                    # torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                    optimizer.step()

                    epoch_loss.append(float(loss))

                    progress_bar.set_description(
                        'Step: {}. Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Total loss: {:.5f}'.format(
                            step, epoch, opt.num_epochs, iter + 1, num_iter_per_epoch, cls_loss.item(),
                            reg_loss.item(), loss.item()))
                    writer.add_scalars('Loss', {'train': loss}, step)
                    writer.add_scalars('Regression_loss', {'train': reg_loss}, step)
                    writer.add_scalars('Classfication_loss', {'train': cls_loss}, step)

                    # log learning_rate
                    current_lr = optimizer.param_groups[0]['lr']
                    writer.add_scalar('learning_rate', current_lr, step)

                    step += 1

                    if step % opt.save_interval == 0 and step > 0:
                        save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')
                        print('checkpoint...')

                except Exception as e:
                    print('[Error]', traceback.format_exc())
                    print(e)
                    continue
            scheduler.step(np.mean(epoch_loss))

            if epoch % opt.val_interval == 0:
                model.eval()
                loss_regression_ls = []
                loss_classification_ls = []
                for iter, data in enumerate(val_generator):
                    with torch.no_grad():
                        imgs = data['img']
                        annot = data['annot']

                        if params.num_gpus == 1:
                            imgs = imgs.cuda()
                            annot = annot.cuda()

                        cls_loss, reg_loss = model(imgs, annot, obj_list=params.obj_list)
                        cls_loss = cls_loss.mean()
                        reg_loss = reg_loss.mean()

                        loss = cls_loss + reg_loss
                        if loss == 0 or not torch.isfinite(loss):
                            continue

                        loss_classification_ls.append(cls_loss.item())
                        loss_regression_ls.append(reg_loss.item())

                cls_loss = np.mean(loss_classification_ls)
                reg_loss = np.mean(loss_regression_ls)
                loss = cls_loss + reg_loss

                print(
                    'Val. Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'.format(
                        epoch, opt.num_epochs, cls_loss, reg_loss, loss))
                writer.add_scalars('Loss', {'val': loss}, step)
                writer.add_scalars('Regression_loss', {'val': reg_loss}, step)
                writer.add_scalars('Classfication_loss', {'val': cls_loss}, step)

                if loss + opt.es_min_delta < best_loss:
                    best_loss = loss
                    best_epoch = epoch

                    save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')

                model.train()
                           
                # Early stopping
                if epoch - best_epoch > opt.es_patience > 0:
                    print('[Info] Stop training at epoch {}. The lowest loss achieved is {}'.format(epoch, best_loss))
                    break
    except KeyboardInterrupt:
        save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')
        writer.close()
    writer.close()
Exemplo n.º 23
0
        exp_average = 0.0
        #taking values from all seeds
        #UPDATE - now it takes into account cases when n_seeds differ for different keys
        key_seeds = len(all_variations[key])
        for seed in range(key_seeds):

            #summing experiment rewards from all seeds
            exp_average += all_variations[key][seed][n_exp]

        #taking the average for experiment
        exp_average /= key_seeds

        variations_averages[key].append(exp_average)
        if args.tensorboard:
            writer.add_scalar('avg reward', exp_average, n_exp)

#   variations_averages is a  dict where:
#key - type of noise(const,adaptive,decreasing etc)
#value - list of average rewards for n seeds

##plotting

key1 = list(variations_averages.keys())[0]
t = np.arange(len(variations_averages[key1]))
# print(t)

plt.rcParams["figure.figsize"] = (10, 5)

for key in variations_averages.keys():
    plot_name = key
Exemplo n.º 24
0
class TBXLogger(Logger):
    """TensorBoardX Logger.

    Note that hparams will be written only after a trial has terminated.
    This logger automatically flattens nested dicts to show on TensorBoard:

        {"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
    """

    VALID_HPARAMS = (str, bool, int, float, list, type(None))
    VALID_NP_HPARAMS = (np.bool8, np.float32, np.float64, np.int32, np.int64)

    def _init(self):
        try:
            from tensorboardX import SummaryWriter
        except ImportError:
            if log_once("tbx-install"):
                logger.info('pip install "ray[tune]" to see TensorBoard files.')
            raise
        self._file_writer = SummaryWriter(self.logdir, flush_secs=30)
        self.last_result = None

    def on_result(self, result: Dict):
        step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]

        tmp = result.copy()
        for k in ["config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION]:
            if k in tmp:
                del tmp[k]  # not useful to log these

        flat_result = flatten_dict(tmp, delimiter="/")
        path = ["ray", "tune"]
        valid_result = {}

        for attr, value in flat_result.items():
            full_attr = "/".join(path + [attr])
            if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not np.isnan(value):
                valid_result[full_attr] = value
                self._file_writer.add_scalar(full_attr, value, global_step=step)
            elif (isinstance(value, list) and len(value) > 0) or (
                isinstance(value, np.ndarray) and value.size > 0
            ):
                valid_result[full_attr] = value

                # Must be video
                if isinstance(value, np.ndarray) and value.ndim == 5:
                    self._file_writer.add_video(
                        full_attr, value, global_step=step, fps=20
                    )
                    continue

                try:
                    self._file_writer.add_histogram(full_attr, value, global_step=step)
                # In case TensorboardX still doesn't think it's a valid value
                # (e.g. `[[]]`), warn and move on.
                except (ValueError, TypeError):
                    if log_once("invalid_tbx_value"):
                        logger.warning(
                            "You are trying to log an invalid value ({}={}) "
                            "via {}!".format(full_attr, value, type(self).__name__)
                        )

        self.last_result = valid_result
        self._file_writer.flush()

    def flush(self):
        if self._file_writer is not None:
            self._file_writer.flush()

    def close(self):
        if self._file_writer is not None:
            if self.trial and self.trial.evaluated_params and self.last_result:
                flat_result = flatten_dict(self.last_result, delimiter="/")
                scrubbed_result = {
                    k: value
                    for k, value in flat_result.items()
                    if isinstance(value, tuple(VALID_SUMMARY_TYPES))
                }
                self._try_log_hparams(scrubbed_result)
            self._file_writer.close()

    def _try_log_hparams(self, result):
        # TBX currently errors if the hparams value is None.
        flat_params = flatten_dict(self.trial.evaluated_params)
        scrubbed_params = {
            k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)
        }

        np_params = {
            k: v.tolist()
            for k, v in flat_params.items()
            if isinstance(v, self.VALID_NP_HPARAMS)
        }

        scrubbed_params.update(np_params)

        removed = {
            k: v
            for k, v in flat_params.items()
            if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)
        }
        if removed:
            logger.info(
                "Removed the following hyperparameter values when "
                "logging to tensorboard: %s",
                str(removed),
            )

        from tensorboardX.summary import hparams

        try:
            experiment_tag, session_start_tag, session_end_tag = hparams(
                hparam_dict=scrubbed_params, metric_dict=result
            )
            self._file_writer.file_writer.add_summary(experiment_tag)
            self._file_writer.file_writer.add_summary(session_start_tag)
            self._file_writer.file_writer.add_summary(session_end_tag)
        except Exception:
            logger.exception(
                "TensorboardX failed to log hparams. "
                "This may be due to an unsupported type "
                "in the hyperparameter values."
            )
Exemplo n.º 25
0
        #print(action_cuda)

        env_info = env.step(action_cuda)[default_brain]
        reward = torch.cuda.FloatTensor([env_info.rewards[0]])
        total_reward += env_info.rewards[0]

        mask = 0 if env_info.local_done[0] else 1
        mask = torch.cuda.FloatTensor([mask])
        rollouts.insert(step, obs.data, action.data, action_log_prob.data, value.data, reward, mask)
        step += 1
        obs = env_info.observations[0]
        obs = img_to_tensor(obs)

        if env_info.local_done[0]:
            if episode % 5 == 0:
                writer.add_scalar('episode_reward', total_reward, episode)
            episode += 1
            total_reward = 0

        if step == args.num_steps:
            step = 0
            break

        if env_info.local_done[0]:
            break

    if step == 0:
        print('ppo update')
        # do ppo update
        next_value = agent(obs)[0].data
Exemplo n.º 26
0
        }

        # summarize the performance
        accu = result.pop('accu')
        accu_dict = {key: val for key, val in zip(ROIs, accu)}
        mean_accu = np.nanmean(accu)
        accu_dict.update({'mean': mean_accu})
        print(', '.join('%s: %.5f' % (key, val)
                        for key, val in result.items()))
        print('Accu: ' + ', '.join('%s: %.5f' % (key, val)
                                   for key, val in accu_dict.items()))

        # record the performance
        if logger is not None:
            for key, val in result.items():
                logger.add_scalar('%s/epoch/%s' % (stage_info[stage], key),
                                  val, epoch)
            logger.add_scalar('%s/epoch/mean_accu' % stage_info[stage],
                              mean_accu, epoch)
            logger.add_scalars('%s/epoch/accu' % stage_info[stage], accu_dict,
                               epoch)

        # do some stuffs depending on validation
        if stage == 'valid':

            # revert the matching dice score to the whole one from batches
            scores = dict()
            progress_bar = tqdm(reverter.on_batches(
                result_list, config['output_threshold']),
                                total=len(reverter.data_list),
                                dynamic_ncols=True,
                                ncols=get_tty_columns(),
Exemplo n.º 27
0
        for state in range(self.env.observation_space.n):
            state_values = [
                self.calc_action_value(state, action)
                for action in range(self.env.action_space.n)
            ]
            self.values[state] = max(state_values)


if __name__ == '__main__':
    test_env = gym.make(ENV_NAME)
    agent = Agent()
    writer = SummaryWriter(comment='-v-learning')
    iter_no = 0
    best_reward = 0.0
    while True:
        iter_no += 1
        agent.play_n_random_steps(100)
        agent.value_iteration()

        reward = 0.0
        for _ in range(TEST_EPISODES):
            reward += agent.play_episode(test_env)
        reward /= TEST_EPISODES
        writer.add_scalar('reward', reward, iter_no)
        if reward > best_reward:
            print('best reward updated %.3f -> %.3f' % (best_reward, reward))
            best_reward = reward
        if reward > 0.80:
            print('solved in %d iterations' % iter_no)
            break
    writer.close()
            obs_v = common.train_a2c(net_i2a, mb_obs, mb_rewards, mb_actions, mb_values,
                                     optimizer, tb_tracker, step_idx, device=device)
            # policy distillation
            probs_v = torch.FloatTensor(mb_probs).to(device)
            policy_opt.zero_grad()
            logits_v, _ = net_policy(obs_v)
            policy_loss_v = -F.log_softmax(logits_v, dim=1) * probs_v.view_as(logits_v)
            policy_loss_v = policy_loss_v.sum(dim=1).mean()
            policy_loss_v.backward()
            policy_opt.step()
            tb_tracker.track("loss_distill", policy_loss_v, step_idx)

            step_idx += 1

            if step_idx % TEST_EVERY_BATCH == 0:
                test_reward, test_steps = common.test_model(test_env, net_i2a, device=device)
                writer.add_scalar("test_reward", test_reward, step_idx)
                writer.add_scalar("test_steps", test_steps, step_idx)
                if best_test_reward is None or best_test_reward < test_reward:
                    if best_test_reward is not None:
                        fname = os.path.join(saves_path, "best_%08.3f_%d.dat" % (test_reward, step_idx))
                        torch.save(net_i2a.state_dict(), fname)
                        torch.save(net_policy.state_dict(), fname + ".policy")
                    else:
                        fname = os.path.join(saves_path, "em.dat")
                        torch.save(net_em.state_dict(), fname)
                    best_test_reward = test_reward
                print("%d: test reward=%.2f, steps=%.2f, best_reward=%.2f" % (
                    step_idx, test_reward, test_steps, best_test_reward))
Exemplo n.º 29
0
            for step_idx, exp in enumerate(exp_source):

                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], step_idx)
                    tracker.reward(rewards[0], step_idx)

                if step_idx % TEST_ITERS == 0:
                    ts = time.time()
                    reward, step = common.test_net(act_net,
                                                   test_env,
                                                   device=device)
                    print("Test done in %.2f sec, reward %.3f, steps %d" %
                          (time.time() - ts, reward, step))
                    writer.add_scalar("test_reward", reward, step_idx)
                    writer.add_scalar("test_step", step, step_idx)

                    if best_reward is None or best_reward < reward:
                        if best_reward is not None:
                            print("Best reward updated: %.3f -> %.3f" %
                                  (best_reward, reward))
                            name = "best_%+.3f_%d.dat" % (reward, step_idx)
                            val_name = "val_" + name
                            fname = os.path.join(save_path, name)
                            val_fname = os.path.join(save_path, val_name)
                            torch.save(act_net.state_dict(), fname)
                            torch.save(crt_net.state_dict(), val_fname)
                        best_reward = reward

                trajectory.append(exp)
Exemplo n.º 30
0
    def train(
        self,
        train_dataloader,
        output_dir,
        show_running_loss=True,
        eval_dataloader=None,
        verbose=True,
        **kwargs,
    ):
        """
        Trains the model on train_dataset.

        Utility function to be used by the train_model() method. Not intended to be used directly.
        """

        device = self.device
        model = self.model
        args = self.args

        tb_writer = SummaryWriter(logdir=args["tensorboard_dir"])

        t_total = len(train_dataloader) // args[
            "gradient_accumulation_steps"] * args["num_train_epochs"]

        no_decay = ["bias", "LayerNorm.weight"]
        optimizer_grouped_parameters = [
            {
                "params": [
                    p for n, p in model.named_parameters()
                    if not any(nd in n for nd in no_decay)
                ],
                "weight_decay":
                args["weight_decay"],
            },
            {
                "params": [
                    p for n, p in model.named_parameters()
                    if any(nd in n for nd in no_decay)
                ],
                "weight_decay":
                0.0,
            },
        ]

        warmup_steps = math.ceil(t_total * args["warmup_ratio"])
        args["warmup_steps"] = warmup_steps if args[
            "warmup_steps"] == 0 else args["warmup_steps"]

        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=args["learning_rate"],
                          eps=args["adam_epsilon"])
        scheduler = get_linear_schedule_with_warmup(
            optimizer,
            num_warmup_steps=args["warmup_steps"],
            num_training_steps=t_total)

        if args["fp16"]:
            try:
                from apex import amp
            except ImportError:
                raise ImportError(
                    "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
                )

            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level=args["fp16_opt_level"])

        if args["n_gpu"] > 1:
            model = torch.nn.DataParallel(model)

        global_step = 0
        tr_loss, logging_loss = 0.0, 0.0
        model.zero_grad()
        train_iterator = trange(int(args["num_train_epochs"]),
                                desc="Epoch",
                                disable=args["silent"])
        epoch_number = 0
        best_eval_metric = None
        early_stopping_counter = 0

        if args["evaluate_during_training"]:
            training_progress_scores = self._create_training_progress_scores(
                **kwargs)

        if args["wandb_project"]:
            wandb.init(project=args["wandb_project"],
                       config={**args},
                       **args["wandb_kwargs"])
            wandb.watch(self.model)

        model.train()
        for _ in train_iterator:
            # epoch_iterator = tqdm(train_dataloader, desc="Iteration")
            for step, batch in enumerate(
                    tqdm(train_dataloader,
                         desc="Current iteration",
                         disable=args["silent"])):
                batch = tuple(t.to(device) for t in batch)
                input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch

                (lm_loss), (mc_loss), *_ = model(
                    input_ids,
                    token_type_ids=token_type_ids,
                    mc_token_ids=mc_token_ids,
                    mc_labels=mc_labels,
                    lm_labels=lm_labels,
                )
                # model outputs are always tuple in pytorch-transformers (see doc)
                loss = lm_loss * args["lm_coef"] + mc_loss * args["mc_coef"]

                if args["n_gpu"] > 1:
                    loss = loss.mean(
                    )  # mean() to average on multi-gpu parallel training

                current_loss = loss.item()

                if show_running_loss:
                    print("\rRunning loss: %f" % current_loss, end="")

                if args["gradient_accumulation_steps"] > 1:
                    loss = loss / args["gradient_accumulation_steps"]

                if args["fp16"]:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                    # torch.nn.utils.clip_grad_norm_(
                    #     amp.master_params(optimizer), args["max_grad_norm"]
                    # )
                else:
                    loss.backward()
                    # torch.nn.utils.clip_grad_norm_(
                    #     model.parameters(), args["max_grad_norm"]
                    # )

                tr_loss += loss.item()
                if (step + 1) % args["gradient_accumulation_steps"] == 0:
                    if args["fp16"]:
                        torch.nn.utils.clip_grad_norm_(
                            amp.master_params(optimizer),
                            args["max_grad_norm"])
                    else:
                        torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                       args["max_grad_norm"])

                    optimizer.step()
                    scheduler.step()  # Update learning rate schedule
                    model.zero_grad()
                    global_step += 1

                    if args["logging_steps"] > 0 and global_step % args[
                            "logging_steps"] == 0:
                        # Log metrics
                        tb_writer.add_scalar("lr",
                                             scheduler.get_lr()[0],
                                             global_step)
                        tb_writer.add_scalar("loss", (tr_loss - logging_loss) /
                                             args["logging_steps"],
                                             global_step)
                        logging_loss = tr_loss
                        if args["wandb_project"]:
                            wandb.log({
                                "Training loss": current_loss,
                                "lr": scheduler.get_lr()[0],
                                "global_step": global_step,
                            })

                    if args["save_steps"] > 0 and global_step % args[
                            "save_steps"] == 0:
                        # Save model checkpoint
                        output_dir_current = os.path.join(
                            output_dir, "checkpoint-{}".format(global_step))

                        self._save_model(output_dir_current, model=model)

                    if args["evaluate_during_training"] and (
                            args["evaluate_during_training_steps"] > 0
                            and global_step %
                            args["evaluate_during_training_steps"] == 0):
                        # Only evaluate when single GPU otherwise metrics may not average well
                        results, _, _ = self.eval_model(
                            eval_dataloader,
                            verbose=verbose
                            and args["evaluate_during_training_verbose"],
                            silent=True,
                            **kwargs,
                        )
                        for key, value in results.items():
                            tb_writer.add_scalar("eval_{}".format(key), value,
                                                 global_step)

                        output_dir_current = os.path.join(
                            output_dir, "checkpoint-{}".format(global_step))

                        if args["save_eval_checkpoints"]:
                            self._save_model(output_dir_current,
                                             model=model,
                                             results=results)

                        training_progress_scores["global_step"].append(
                            global_step)
                        training_progress_scores["train_loss"].append(
                            current_loss)
                        for key in results:
                            training_progress_scores[key].append(results[key])
                        report = pd.DataFrame(training_progress_scores)
                        report.to_csv(
                            os.path.join(args["output_dir"],
                                         "training_progress_scores.csv"),
                            index=False,
                        )

                        if args["wandb_project"]:
                            wandb.log(
                                self._get_last_metrics(
                                    training_progress_scores))

                        if not best_eval_metric:
                            best_eval_metric = results[
                                args["early_stopping_metric"]]
                            self._save_model(args["best_model_dir"],
                                             model=model,
                                             results=results)
                        if best_eval_metric and args[
                                "early_stopping_metric_minimize"]:
                            if (results[args["early_stopping_metric"]] -
                                    best_eval_metric <
                                    args["early_stopping_delta"]):
                                best_eval_metric = results[
                                    args["early_stopping_metric"]]
                                self._save_model(args["best_model_dir"],
                                                 model=model,
                                                 results=results)
                                early_stopping_counter = 0
                            else:
                                if args["use_early_stopping"]:
                                    if early_stopping_counter < args[
                                            "early_stopping_patience"]:
                                        early_stopping_counter += 1
                                        if verbose:
                                            logger.info(
                                                f" No improvement in {args['early_stopping_metric']}"
                                            )
                                            logger.info(
                                                f" Current step: {early_stopping_counter}"
                                            )
                                            logger.info(
                                                f" Early stopping patience: {args['early_stopping_patience']}"
                                            )
                                    else:
                                        if verbose:
                                            logger.info(
                                                f" Patience of {args['early_stopping_patience']} steps reached"
                                            )
                                            logger.info(
                                                " Training terminated.")
                                            train_iterator.close()
                                        return global_step, tr_loss / global_step
                        else:
                            if (results[args["early_stopping_metric"]] -
                                    best_eval_metric >
                                    args["early_stopping_delta"]):
                                best_eval_metric = results[
                                    args["early_stopping_metric"]]
                                self._save_model(args["best_model_dir"],
                                                 model=model,
                                                 results=results)
                                early_stopping_counter = 0
                            else:
                                if args["use_early_stopping"]:
                                    if early_stopping_counter < args[
                                            "early_stopping_patience"]:
                                        early_stopping_counter += 1
                                        if verbose:
                                            logger.info(
                                                f" No improvement in {args['early_stopping_metric']}"
                                            )
                                            logger.info(
                                                f" Current step: {early_stopping_counter}"
                                            )
                                            logger.info(
                                                f" Early stopping patience: {args['early_stopping_patience']}"
                                            )
                                    else:
                                        if verbose:
                                            logger.info(
                                                f" Patience of {args['early_stopping_patience']} steps reached"
                                            )
                                            logger.info(
                                                " Training terminated.")
                                            train_iterator.close()
                                        return global_step, tr_loss / global_step

            epoch_number += 1
            output_dir_current = os.path.join(
                output_dir,
                "checkpoint-{}-epoch-{}".format(global_step, epoch_number))

            if args["save_model_every_epoch"] or args[
                    "evaluate_during_training"]:
                os.makedirs(output_dir_current, exist_ok=True)

            if args["save_model_every_epoch"]:
                self._save_model(output_dir_current, model=model)

            if args["evaluate_during_training"]:
                results, _, _ = self.eval_model(
                    eval_dataloader,
                    verbose=verbose
                    and args["evaluate_during_training_verbose"],
                    silent=True,
                    **kwargs,
                )

                self._save_model(output_dir_current, results=results)

                training_progress_scores["global_step"].append(global_step)
                training_progress_scores["train_loss"].append(current_loss)
                for key in results:
                    training_progress_scores[key].append(results[key])
                report = pd.DataFrame(training_progress_scores)
                report.to_csv(os.path.join(args["output_dir"],
                                           "training_progress_scores.csv"),
                              index=False)

                if args["wandb_project"]:
                    wandb.log(self._get_last_metrics(training_progress_scores))

                if not best_eval_metric:
                    best_eval_metric = results[args["early_stopping_metric"]]
                    self._save_model(args["best_model_dir"],
                                     model=model,
                                     results=results)
                if best_eval_metric and args["early_stopping_metric_minimize"]:
                    if results[args[
                            "early_stopping_metric"]] - best_eval_metric < args[
                                "early_stopping_delta"]:
                        best_eval_metric = results[
                            args["early_stopping_metric"]]
                        self._save_model(args["best_model_dir"],
                                         model=model,
                                         results=results)
                        early_stopping_counter = 0
                else:
                    if results[args[
                            "early_stopping_metric"]] - best_eval_metric > args[
                                "early_stopping_delta"]:
                        best_eval_metric = results[
                            args["early_stopping_metric"]]
                        self._save_model(args["best_model_dir"],
                                         model=model,
                                         results=results)
                        early_stopping_counter = 0

        return global_step, tr_loss / global_step
Exemplo n.º 31
0
resnet18 = models.resnet18(False)
writer = SummaryWriter()
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]

true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]

for n_iter in range(100):
    s1 = torch.rand(1)  # value to keep
    s2 = torch.rand(1)
    writer.add_scalar('data/scalar1', s1[0],
                      n_iter)  # data grouping by `slash`
    writer.add_scalars(
        'data/scalar_group', {
            "xsinx": n_iter * np.sin(n_iter),
            "xcosx": n_iter * np.cos(n_iter),
            "arctanx": np.arctan(n_iter)
        }, n_iter)
    x = torch.rand(32, 3, 64, 64)  # output from network
    if n_iter % 10 == 0:
        x = vutils.make_grid(x, normalize=True, scale_each=True)
        writer.add_image('Image', x, n_iter)  # Tensor
        #writer.add_image('astronaut', skimage.data.astronaut(), n_iter) # numpy
        #writer.add_image('imread', skimage.io.imread('screenshots/audio.png'), n_iter) # numpy
        x = torch.zeros(sample_rate * 2)
        for i in range(x.size(0)):
            x[i] = np.cos(
Exemplo n.º 32
0
def main():
    """
    YOLOv3 trainer. See README for details.
    """
    args = parse_args()
    print("Setting Arguments.. : ", args)

    cuda = torch.cuda.is_available() and args.use_cuda
    os.makedirs(args.checkpoint_dir, exist_ok=True)

    # Parse config settings
    with open(args.cfg, 'r') as f:
        cfg = yaml.load(f)

    print("successfully loaded config file: ", cfg)

    momentum = cfg['TRAIN']['MOMENTUM']
    decay = cfg['TRAIN']['DECAY']
    burn_in = cfg['TRAIN']['BURN_IN']
    iter_size = cfg['TRAIN']['MAXITER']
    steps = eval(cfg['TRAIN']['STEPS'])
    batch_size = cfg['TRAIN']['BATCHSIZE']
    subdivision = cfg['TRAIN']['SUBDIVISION']
    ignore_thre = cfg['TRAIN']['IGNORETHRE']
    random_resize = cfg['AUGMENTATION']['RANDRESIZE']
    base_lr = cfg['TRAIN']['LR'] / batch_size / subdivision
    gradient_clip = cfg['TRAIN']['GRADIENT_CLIP']

    print('effective_batch_size = batch_size * iter_size = %d * %d' %
          (batch_size, subdivision))

    # Make trainer behavior deterministic
    set_seed(seed=0)
    setup_cudnn(deterministic=True)

    # Learning rate setup
    def burnin_schedule(i):
        if i < burn_in:
            factor = pow(i / burn_in, 4)
        elif i < steps[0]:
            factor = 1.0
        elif i < steps[1]:
            factor = 0.1
        else:
            factor = 0.01
        return factor

    # Initiate model
    model = YOLOv3(cfg['MODEL'], ignore_thre=ignore_thre)

    if args.weights_path:
        print("loading darknet weights....", args.weights_path)
        parse_yolo_weights(model, args.weights_path)
    elif args.checkpoint:
        print("loading pytorch ckpt...", args.checkpoint)
        state = torch.load(args.checkpoint)
        if 'model_state_dict' in state.keys():
            model.load_state_dict(state['model_state_dict'])
        else:
            model.load_state_dict(state)

    if cuda:
        print("using cuda")
        model = model.cuda()

    if args.tfboard_dir:
        print("using tfboard")
        from tensorboardX import SummaryWriter
        tblogger = SummaryWriter(args.tfboard_dir)

    model.train()

    imgsize = cfg['TRAIN']['IMGSIZE']
    dataset = COCODataset(model_type=cfg['MODEL']['TYPE'],
                          data_dir='COCO/',
                          img_size=imgsize,
                          augmentation=cfg['AUGMENTATION'],
                          debug=args.debug)

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=args.n_cpu)
    dataiterator = iter(dataloader)

    evaluator = COCOAPIEvaluator(model_type=cfg['MODEL']['TYPE'],
                                 data_dir='COCO/',
                                 img_size=cfg['TEST']['IMGSIZE'],
                                 confthre=cfg['TEST']['CONFTHRE'],
                                 nmsthre=cfg['TEST']['NMSTHRE'])

    dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor

    # optimizer setup
    # set weight decay only on conv.weight
    params_dict = dict(model.named_parameters())
    params = []
    for key, value in params_dict.items():
        if 'conv.weight' in key:
            params += [{
                'params': value,
                'weight_decay': decay * batch_size * subdivision
            }]
        else:
            params += [{'params': value, 'weight_decay': 0.0}]
    optimizer = optim.SGD(params,
                          lr=base_lr,
                          momentum=momentum,
                          dampening=0,
                          weight_decay=decay * batch_size * subdivision)

    iter_state = 0

    if args.checkpoint:
        if 'optimizer_state_dict' in state.keys():
            optimizer.load_state_dict(state['optimizer_state_dict'])
            iter_state = state['iter'] + 1

    scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule)

    # start training loop
    for iter_i in range(iter_state, iter_size + 1):

        # COCO evaluation
        if iter_i % args.eval_interval == 0:
            print('evaluating...')
            ap = evaluator.evaluate(model)
            model.train()
            if args.tfboard_dir:
                # val/aP
                tblogger.add_scalar('val/aP50', ap['aP50'], iter_i)
                tblogger.add_scalar('val/aP75', ap['aP75'], iter_i)
                tblogger.add_scalar('val/aP5095', ap['aP5095'], iter_i)
                tblogger.add_scalar('val/aP5095_S', ap['aP5095_S'], iter_i)
                tblogger.add_scalar('val/aP5095_M', ap['aP5095_M'], iter_i)
                tblogger.add_scalar('val/aP5095_L', ap['aP5095_L'], iter_i)

        # subdivision loop
        optimizer.zero_grad()
        for inner_iter_i in range(subdivision):
            try:
                imgs, targets, _, _ = next(dataiterator)  # load a batch
            except StopIteration:
                dataiterator = iter(dataloader)
                imgs, targets, _, _ = next(dataiterator)  # load a batch
            imgs = Variable(imgs.type(dtype))
            targets = Variable(targets.type(dtype), requires_grad=False)
            loss = model(imgs, targets)
            loss.backward()

        if gradient_clip >= 0:
            torch.nn.utils.clip_grad_norm(model.parameters(), gradient_clip)

        optimizer.step()
        scheduler.step()

        if iter_i % 10 == 0:
            # logging
            current_lr = scheduler.get_lr()[0] * batch_size * subdivision
            print(
                '[Iter %d/%d] [lr %f] '
                '[Losses: xy %f, wh %f, conf %f, cls %f, total %f, imgsize %d]'
                % (iter_i, iter_size, current_lr, model.loss_dict['xy'],
                   model.loss_dict['wh'], model.loss_dict['conf'],
                   model.loss_dict['cls'], loss, imgsize),
                flush=True)

            if args.tfboard_dir:
                # lr
                tblogger.add_scalar('lr', current_lr, iter_i)
                # train/loss
                tblogger.add_scalar('train/loss_xy', model.loss_dict['xy'],
                                    iter_i)
                tblogger.add_scalar('train/loss_wh', model.loss_dict['wh'],
                                    iter_i)
                tblogger.add_scalar('train/loss_conf', model.loss_dict['conf'],
                                    iter_i)
                tblogger.add_scalar('train/loss_cls', model.loss_dict['cls'],
                                    iter_i)
                tblogger.add_scalar('train/loss', loss, iter_i)

            # random resizing
            if random_resize:
                imgsize = (random.randint(0, 9) % 10 + 10) * 32
                dataset.img_shape = (imgsize, imgsize)
                dataset.img_size = imgsize
                dataloader = torch.utils.data.DataLoader(
                    dataset,
                    batch_size=batch_size,
                    shuffle=True,
                    num_workers=args.n_cpu)
                dataiterator = iter(dataloader)

        # save checkpoint
        if args.checkpoint_dir and iter_i > 0 and (
                iter_i % args.checkpoint_interval == 0):
            torch.save(
                {
                    'iter': iter_i,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                },
                os.path.join(args.checkpoint_dir,
                             "snapshot" + str(iter_i) + ".ckpt"))

    if args.tfboard_dir:
        tblogger.close()
Exemplo n.º 33
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    logging.info('genotype = %s', genotype)

    model = Network(CIFAR_CLASSES, genotype)
    model = model.cuda()

    test_epoch = 1
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    train_transform, test_transform = utils._data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data,
                             train=False,
                             download=True,
                             transform=test_transform)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=8)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    train_data = dset.CIFAR10(root=args.data,
                              train=True,
                              download=True,
                              transform=train_transform)

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              pin_memory=True,
                                              num_workers=8)

    best_acc = 0.0
    writer = SummaryWriter(args.save)
    for epoch in range(args.epochs):
        scheduler.step()
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc, valid_obj = infer(test_queue, model, criterion)
        writer.add_scalar('trai_loss', train_obj)
        writer.add_scalar('trian_acc', train_acc)
        writer.add_scalar('val_loss', valid_obj)
        writer.add_scalar('val_acc', valid_acc)

        if valid_acc > best_acc:
            best_acc = valid_acc
            logging.info('epoch %d, valid_acc %f, best_acc %f', epoch,
                         valid_acc, best_acc)
            utils.save(model, os.path.join(args.save, 'weights_retrain.pt'))
Exemplo n.º 34
0
class Session:
    def __init__(self, env, buffer, net, target_net, epsilon_tracker, device,
                 batch_size, sync_every, discount_factor, learning_rate,
                 discount_steps):
        self.env = env
        self.buffer = buffer
        self.net = net
        self.target_net = target_net
        self.epsilon_greedy = epsilon_tracker
        self.device = device
        self.batch_size = batch_size
        self.sync_steps = sync_every
        self.discount_steps = discount_steps
        self.discount_factor = discount_factor
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          lr=learning_rate)
        self.writer = SummaryWriter(
            comment='-dqn-n-step-' +
            datetime.now().isoformat(timespec='seconds'))
        self._reset()
        self.episode_steps = EpisodeSteps(self.discount_steps)

    def _reset(self):
        self.state = self.env.reset()
        self.total_episode_reward = 0

    def train(self, target_reward):
        step = 0
        episode_rewards = []
        while True:
            self.optimizer.zero_grad()

            epsilon = self.epsilon_greedy.decay(step)
            episode_reward = self._play_single_step(epsilon)

            if len(self.buffer) < self.batch_size:
                print('\rFilling up the replay buffer...', end='')
                continue

            states, actions, rewards, dones, next_states, sample_indexes, sample_weights = self.buffer.sample(
                self.batch_size)
            loss, sample_priorities = self._calculate_loss(
                states, actions, next_states, dones, rewards, sample_weights)
            self.buffer.update_priorities(sample_indexes, sample_priorities)
            loss.backward()
            self.optimizer.step()
            self._periodic_sync_target_network(step)

            if episode_reward is not None:
                episode_rewards.append(episode_reward)
                mean_reward = np.array(episode_rewards)[-100:].mean()
                self._report_progress(step, loss.item(), episode_rewards,
                                      mean_reward, epsilon)
                if mean_reward > target_reward:
                    print('\nEnvironment Solved!')
                    self.writer.close()
                    break

            step += 1

    @torch.no_grad()
    def _play_single_step(self, epsilon):
        episode_reward = None
        state_t = torch.FloatTensor(np.array([self.state],
                                             copy=False)).to(self.device)
        q_actions = self.net(state_t)
        action = torch.argmax(q_actions, dim=1).item()
        if np.random.random() < epsilon:
            action = np.random.choice(self.env.action_space.n)
        next_state, reward, done, _ = self.env.step(action)
        self.total_episode_reward += reward

        self.episode_steps.append(self.state, action, reward, done, next_state)
        if self.episode_steps.completed():
            self.episode_steps.roll_out(discount_factor=self.discount_factor)
            self.buffer.append(self.episode_steps)
            self.episode_steps = EpisodeSteps(self.discount_steps)

        if done:
            episode_reward = self.total_episode_reward
            self._reset()
        else:
            self.state = next_state
        return episode_reward

    def _calculate_loss(self, states, actions, next_states, dones, rewards,
                        sample_weights):
        state_q_all = self.net(states)
        state_q_taken_action = state_q_all.gather(
            1, actions.unsqueeze(-1)).squeeze(-1)

        with torch.no_grad():
            next_state_q_all = self.target_net(next_states)
            next_state_q_max = torch.max(next_state_q_all, dim=1)[0]
            next_state_q_max[dones] = 0
            state_q_expected = rewards + self.discount_factor * next_state_q_max
            state_q_expected = state_q_expected.detach()
        # PyTorch doesn't support weights for  MSELoss class
        loss = (state_q_expected - state_q_taken_action)**2
        weighted_loss = sample_weights * loss
        return weighted_loss.mean(), (weighted_loss + 1e-6).data.cpu().numpy()

    def _periodic_sync_target_network(self, step):
        if step % self.sync_steps:
            self.target_net.load_state_dict(self.net.state_dict())

    def _report_progress(self, step, loss, episode_rewards, mean_reward,
                         epsilon):
        self.writer.add_scalar('Reward', mean_reward, step)
        self.writer.add_scalar('loss', loss, step)
        self.writer.add_scalar('epsilon', epsilon, step)
        print(
            f'\rsteps:{step} , episodes:{len(episode_rewards)}, loss: {loss:.6f} , '
            f'eps: {epsilon:.2f}, reward: {mean_reward:.2f}',
            end='')

    def demonstrate(self, net_state_file_path=None):
        """Demonstrate the performance of the trained net in a video"""
        env = gym.wrappers.Monitor(self.env,
                                   'videos',
                                   video_callable=lambda episode_id: True,
                                   force=True)
        if net_state_file_path:
            state_dict = torch.load(net_state_file_path,
                                    map_location=lambda stg, _: stg)
            self.net.load_state_dict(state_dict)
        state = env.reset()
        total_reward = 0
        while True:
            env.render()
            action = self.net(torch.FloatTensor([state])).max(dim=1)[1]
            new_state, reward, done, _ = env.step(action.item())
            total_reward += reward
            if done:
                break
            state = new_state
        print("Total reward: %.2f" % total_reward)
Exemplo n.º 35
0
    SVGD.InitMomentumUpdaters()
    for i in pbar:

        args.step_size = GetInnerStepSize(i)

        #print('i, len', i, len(M))
        M = SVGD.step(M, retain_graph=False, step_size=args.step_size)

        for paramsvec in M:
            for param in paramsvec:
                param.detach_()
                param.requires_grad = True
        with torch.no_grad():
            logp = 0
            for paramsvec in M:
                logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)

        logp.detach_()
        logp = logp.item() / len(M)

        writer.add_scalar('SVGDLogP', logp, i + iii*args.iters)
        #print(logp)
        #torch.cuda.empty_cache()
        pbar.set_description("SVGD fitting")
        pbar.set_postfix({'inner_lr':args.step_size, 'logp':logp})
    torch.cuda.empty_cache()
    logps.update(logp)
    M = raw_M

print(logps.mean)
Exemplo n.º 36
0
class InitiateTraining(object):
    def __init__(self, args):
        self.train_path = args.train_images_path
        self.label_path = args.train_labels_path
        self.split_rate = args.split_rate
        self.pre_trained = args.pre_trained
        self.config = args.config
        self.epoch = args.epoch
        self.experiment = args.experiment
        self.save_model = args.save_model
        self.criterion = nn.SmoothL1Loss()
        self.hyperparameters = read_file(self.config)
        self.best_epoch = 1e+10
        self.best_accuracy = 1e+10
        self.batch_size = self.hyperparameters['batch_size']
        self.transform = transforms.Compose([
            transforms.Resize((self.hyperparameters['image_size'],
                               self.hyperparameters['image_size'])),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        self.writer = SummaryWriter()

        if not os.path.exists(self.save_model):
            os.makedirs(self.save_model)

    def set_bn_eval(self, m):
        """
        Method to freeze batch normalization layers in the pre-trained network
        """
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
            for parameter in m.parameters():
                parameter.requires_grad = False

    def train(self):
        """
        Method to initiate training
        """
        model = torchvision.models.resnet18(pretrained=True)
        optimizer = optim.Adam(model.parameters(), lr=1e-3)

        # logic to check for pre-trained weights from earlier checkpoint
        if self.pre_trained is not None:
            pre_trained_model = torch.load(self.pre_trained)
            model.load_state_dict(pre_trained_model)
        else:
            fc_features = model.fc.in_features
            model.fc = nn.Linear(fc_features, 4)

        model.to(device)
        self.criterion.to(device)
        self.lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                      step_size=5,
                                                      gamma=0.1)

        train_set = CUBDataset(self.train_path,
                               label_path=self.label_path,
                               mode='train',
                               split_rate=self.split_rate,
                               transform=self.transform)
        train_loader = DataLoader(train_set,
                                  batch_size=self.batch_size,
                                  shuffle=True)
        validation_set = CUBDataset(self.train_path,
                                    label_path=self.label_path,
                                    mode='validation',
                                    split_rate=self.split_rate,
                                    transform=self.transform)
        validation_loader = DataLoader(validation_set,
                                       batch_size=self.batch_size,
                                       shuffle=True)

        epochs = self.hyperparameters['epochs']
        print("==> Starting Training")
        for idx, train_epoch in enumerate(range(epochs)):

            total_accuracy, total_loss = 0.0, 0.0
            self.lr_scheduler.step()

            # training the localization network
            for batch_idx, data in enumerate(train_loader):
                model.train()

                # freezing batch normalization layers
                # model.apply(self.set_bn_eval)

                img, label, img_size = data
                label = box_transform(label, img_size)

                _input = Variable(img.to(device))  # use cuda(device)
                _target = Variable(label.to(device))  # use cuda

                # resetting optimizer to not remove old gradients
                optimizer.zero_grad()

                # forward pass
                output = model(_input)

                # backward pass
                loss = self.criterion(output, _target)
                loss.backward()
                optimizer.step()

                # compute accuracy for the prediction
                accuracy = compute_acc(output.data.cpu(), _target.data.cpu(),
                                       img_size)

                print(
                    "Epoch: {}/{}, Batch: {}/{}, Training Accuracy: {:3f}, Training Loss: {:3f}"
                    .format(idx + 1, epochs, batch_idx + 1, len(train_loader),
                            accuracy, loss))

                total_accuracy += accuracy
                total_loss += loss

            total_loss = float(total_loss) / len(train_loader)
            total_accuracy = float(total_accuracy) / len(train_loader)

            val_accuracy, val_loss = 0.0, 0.0
            for batch_idx, data in enumerate(validation_loader):
                model.train(False)
                img, label, img_size = data
                label = box_transform(label, img_size)

                _input = Variable(img.to(device))
                _target = Variable(label.to(device))

                with torch.no_grad():
                    output = model(_input)

                loss = self.criterion(output, _target)
                accuracy = compute_acc(output.data.cpu(), _target.data.cpu(),
                                       img_size)

                val_accuracy += accuracy
                val_loss += loss

            val_loss = float(val_loss) / len(validation_loader)
            val_accuracy = float(val_accuracy) / len(validation_loader)

            print(
                "Epoch: {}/{}, Training Accuracy: {:3f}, Training Loss: {:3f}, Validation Accuracy: {:3f}, Validation Loss: {:3f}"
                .format(idx + 1, epochs, total_accuracy, total_loss,
                        val_accuracy, val_loss))

            self.writer.add_scalar('training_loss', total_loss,
                                   train_epoch + 1)
            self.writer.add_scalar('training_accuracy', total_accuracy,
                                   train_epoch + 1)
            self.writer.add_scalar('validation_loss', val_loss,
                                   train_epoch + 1)
            self.writer.add_scalar('validation_accuracy', val_accuracy,
                                   train_epoch + 1)

            if val_accuracy < self.best_accuracy:
                self.best_epoch = train_epoch

                torch.save(
                    model.state_dict(),
                    os.path.join(self.save_model,
                                 str(self.experiment) + '_model.pt'))

                print("=> Best Epoch: {}, Accuracy: {:3f}".format(
                    self.best_epoch, val_accuracy))

        self.writer.close()
Exemplo n.º 37
0
def main(args):
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    my_experiment = experiment(args.name, args, "results/", args.commit)
    writer = SummaryWriter(my_experiment.path + "tensorboard")

    logger = logging.getLogger('experiment')
    logger.setLevel(logging.INFO)
    total_clases = 10

    frozen_layers = []
    for temp in range(args.rln * 2):
        frozen_layers.append("vars." + str(temp))
    logger.info("Frozen layers = %s", " ".join(frozen_layers))


    final_results_all = []
    temp_result = []
    total_clases = args.schedule
    for tot_class in total_clases:
        lr_list = [0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001, 0.00003, 0.00001, 0.000003, 0.000001, 0.0000003, 0.0000001]
        lr_all = []
        for lr_search in range(0, 5):

            keep = np.random.choice(list(range(650)), tot_class, replace=False)
            # np.random.shuffle(keep)

            dataset = utils.remove_classes_omni(
                df.DatasetFactory.get_dataset("omniglot", train=True, background=False, path=args.dataset_path), keep)
            iterator_sorted = torch.utils.data.DataLoader(
                utils.iterator_sorter_omni(dataset, False, classes=total_clases),
                batch_size=1,
                shuffle=args.iid, num_workers=2)
            dataset = utils.remove_classes_omni(
                df.DatasetFactory.get_dataset("omniglot", train=not args.test, background=False, path=args.dataset_path),
                keep)
            iterator = torch.utils.data.DataLoader(dataset, batch_size=32,
                                                   shuffle=False, num_workers=1)

            print(args)

            if torch.cuda.is_available():
                device = torch.device('cuda')
            else:
                device = torch.device('cpu')

            results_mem_size = {}

            for mem_size in [args.memory]:
                max_acc = -10
                max_lr = -10
                for lr in lr_list:

                    print(lr)
                    # for lr in [0.001, 0.0003, 0.0001, 0.00003, 0.00001]:
                    maml = torch.load(args.model, map_location='cpu')

                    if args.scratch:
                        config = mf.ModelFactory.get_model("na", args.dataset)
                        maml = learner.Learner(config)
                        # maml = MetaLearingClassification(args, config).to(device).net

                    maml = maml.to(device)

                    for name, param in maml.named_parameters():
                        param.learn = True

                    for name, param in maml.named_parameters():
                        # logger.info(name)
                        if name in frozen_layers:
                            param.learn = False

                        else:
                            if args.reset:
                                w = nn.Parameter(torch.ones_like(param))
                                # logger.info("W shape = %s", str(len(w.shape)))
                                if len(w.shape) > 1:
                                    torch.nn.init.kaiming_normal_(w)
                                else:
                                    w = nn.Parameter(torch.zeros_like(param))
                                param.data = w
                                param.learn = True

                    frozen_layers = []
                    for temp in range(args.rln * 2):
                        frozen_layers.append("vars." + str(temp))

                    torch.nn.init.kaiming_normal_(maml.parameters()[-2])
                    w = nn.Parameter(torch.zeros_like(maml.parameters()[-1]))
                    maml.parameters()[-1].data = w

                    for n, a in maml.named_parameters():
                        n = n.replace(".", "_")
                        # logger.info("Name = %s", n)
                        if n == "vars_14":
                            w = nn.Parameter(torch.ones_like(a))
                            # logger.info("W shape = %s", str(w.shape))
                            torch.nn.init.kaiming_normal_(w)
                            a.data = w
                        if n == "vars_15":
                            w = nn.Parameter(torch.zeros_like(a))
                            a.data = w

                    filter_list = ["vars.0", "vars.1", "vars.2", "vars.3", "vars.4", "vars.5"]

                    logger.info("Filter list = %s", ",".join(filter_list))
                    list_of_names = list(
                        map(lambda x: x[1], list(filter(lambda x: x[0] not in filter_list, maml.named_parameters()))))

                    list_of_params = list(filter(lambda x: x.learn, maml.parameters()))
                    list_of_names = list(filter(lambda x: x[1].learn, maml.named_parameters()))
                    if args.scratch or args.no_freeze:
                        print("Empty filter list")
                        list_of_params = maml.parameters()
                    #
                    for x in list_of_names:
                        logger.info("Unfrozen layer = %s", str(x[0]))
                    opt = torch.optim.Adam(list_of_params, lr=lr)

                    for _ in range(0, args.epoch):
                        for img, y in iterator_sorted:
                            img = img.to(device)
                            y = y.to(device)

                            pred = maml(img)
                            opt.zero_grad()
                            loss = F.cross_entropy(pred, y)
                            loss.backward()
                            opt.step()

                    logger.info("Result after one epoch for LR = %f", lr)
                    correct = 0
                    for img, target in iterator:
                        img = img.to(device)
                        target = target.to(device)
                        logits_q = maml(img, vars=None, bn_training=False, feature=False)

                        pred_q = (logits_q).argmax(dim=1)

                        correct += torch.eq(pred_q, target).sum().item() / len(img)

                    logger.info(str(correct / len(iterator)))
                    if (correct / len(iterator) > max_acc):
                        max_acc = correct / len(iterator)
                        max_lr = lr

                lr_all.append(max_lr)
                results_mem_size[mem_size] = (max_acc, max_lr)
                logger.info("Final Max Result = %s", str(max_acc))
                writer.add_scalar('/finetune/best_' + str(lr_search), max_acc, tot_class)
            temp_result.append((tot_class, results_mem_size))
            print("A=  ", results_mem_size)
            logger.info("Temp Results = %s", str(results_mem_size))

            my_experiment.results["Temp Results"] = temp_result
            my_experiment.store_json()
            print("LR RESULTS = ", temp_result)



        from scipy import stats
        best_lr = float(stats.mode(lr_all)[0][0])

        logger.info("BEST LR %s= ", str(best_lr))


        for aoo in range(0, args.runs):

            keep = np.random.choice(list(range(650)), tot_class, replace=False)
            #
            if args.dataset == "omniglot":

                dataset = utils.remove_classes_omni(
                    df.DatasetFactory.get_dataset("omniglot", train=True, background=False), keep)
                iterator_sorted = torch.utils.data.DataLoader(
                    utils.iterator_sorter_omni(dataset, False, classes=total_clases),
                    batch_size=1,
                    shuffle=args.iid, num_workers=2)
                dataset = utils.remove_classes_omni(
                    df.DatasetFactory.get_dataset("omniglot", train=not args.test, background=False), keep)
                iterator = torch.utils.data.DataLoader(dataset, batch_size=32,
                                                       shuffle=False, num_workers=1)
            elif args.dataset == "CIFAR100":
                keep = np.random.choice(list(range(50, 100)), tot_class)
                dataset = utils.remove_classes(df.DatasetFactory.get_dataset(args.dataset, train=True), keep)
                iterator_sorted = torch.utils.data.DataLoader(
                    utils.iterator_sorter(dataset, False, classes=tot_class),
                    batch_size=16,
                    shuffle=args.iid, num_workers=2)
                dataset = utils.remove_classes(df.DatasetFactory.get_dataset(args.dataset, train=False), keep)
                iterator = torch.utils.data.DataLoader(dataset, batch_size=128,
                                                       shuffle=False, num_workers=1)
            # sampler = ts.MNISTSampler(list(range(0, total_clases)), dataset)
            #
            print(args)

            if torch.cuda.is_available():
                device = torch.device('cuda')
            else:
                device = torch.device('cpu')

            results_mem_size = {}

            for mem_size in [args.memory]:
                max_acc = -10
                max_lr = -10

                lr = best_lr

                # for lr in [0.001, 0.0003, 0.0001, 0.00003, 0.00001]:
                maml = torch.load(args.model, map_location='cpu')

                if args.scratch:
                    config = mf.ModelFactory.get_model("na", args.dataset)
                    maml = learner.Learner(config)
                    # maml = MetaLearingClassification(args, config).to(device).net

                maml = maml.to(device)

                for name, param in maml.named_parameters():
                    param.learn = True

                for name, param in maml.named_parameters():
                    # logger.info(name)
                    if name in frozen_layers:
                        # logger.info("Freeezing name %s", str(name))
                        param.learn = False
                        # logger.info(str(param.requires_grad))
                    else:
                        if args.reset:
                            w = nn.Parameter(torch.ones_like(param))
                            # logger.info("W shape = %s", str(len(w.shape)))
                            if len(w.shape) > 1:
                                torch.nn.init.kaiming_normal_(w)
                            else:
                                w = nn.Parameter(torch.zeros_like(param))
                            param.data = w
                            param.learn = True

                frozen_layers = []
                for temp in range(args.rln * 2):
                    frozen_layers.append("vars." + str(temp))

                torch.nn.init.kaiming_normal_(maml.parameters()[-2])
                w = nn.Parameter(torch.zeros_like(maml.parameters()[-1]))
                maml.parameters()[-1].data = w

                for n, a in maml.named_parameters():
                    n = n.replace(".", "_")
                    # logger.info("Name = %s", n)
                    if n == "vars_14":
                        w = nn.Parameter(torch.ones_like(a))
                        # logger.info("W shape = %s", str(w.shape))
                        torch.nn.init.kaiming_normal_(w)
                        a.data = w
                    if n == "vars_15":
                        w = nn.Parameter(torch.zeros_like(a))
                        a.data = w

                correct = 0

                for img, target in iterator:
                    with torch.no_grad():
                        img = img.to(device)
                        target = target.to(device)
                        logits_q = maml(img, vars=None, bn_training=False, feature=False)
                        pred_q = (logits_q).argmax(dim=1)
                        correct += torch.eq(pred_q, target).sum().item() / len(img)

                logger.info("Pre-epoch accuracy %s", str(correct / len(iterator)))

                filter_list = ["vars.0", "vars.1", "vars.2", "vars.3", "vars.4", "vars.5"]

                logger.info("Filter list = %s", ",".join(filter_list))
                list_of_names = list(
                    map(lambda x: x[1], list(filter(lambda x: x[0] not in filter_list, maml.named_parameters()))))

                list_of_params = list(filter(lambda x: x.learn, maml.parameters()))
                list_of_names = list(filter(lambda x: x[1].learn, maml.named_parameters()))
                if args.scratch or args.no_freeze:
                    print("Empty filter list")
                    list_of_params = maml.parameters()
                #
                for x in list_of_names:
                    logger.info("Unfrozen layer = %s", str(x[0]))
                opt = torch.optim.Adam(list_of_params, lr=lr)

                for _ in range(0, args.epoch):
                    for img, y in iterator_sorted:
                        img = img.to(device)
                        y = y.to(device)

                        pred = maml(img)
                        opt.zero_grad()
                        loss = F.cross_entropy(pred, y)
                        loss.backward()
                        opt.step()

                logger.info("Result after one epoch for LR = %f", lr)
                correct = 0
                for img, target in iterator:
                    img = img.to(device)
                    target = target.to(device)
                    logits_q = maml(img, vars=None, bn_training=False, feature=False)

                    pred_q = (logits_q).argmax(dim=1)

                    correct += torch.eq(pred_q, target).sum().item() / len(img)

                logger.info(str(correct / len(iterator)))
                if (correct / len(iterator) > max_acc):
                    max_acc = correct / len(iterator)
                    max_lr = lr

                lr_list = [max_lr]
                results_mem_size[mem_size] = (max_acc, max_lr)
                logger.info("Final Max Result = %s", str(max_acc))
                writer.add_scalar('/finetune/best_' + str(aoo), max_acc, tot_class)
            final_results_all.append((tot_class, results_mem_size))
            print("A=  ", results_mem_size)
            logger.info("Final results = %s", str(results_mem_size))

            my_experiment.results["Final Results"] = final_results_all
            my_experiment.store_json()
            print("FINAL RESULTS = ", final_results_all)
    writer.close()
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA, steps_count=REWARD_STEPS)

    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)

    total_rewards = []
    step_rewards = []
    step_idx = 0
    done_episodes = 0
    reward_sum = 0.0

    batch_states, batch_actions, batch_scales = [], [], []

    for step_idx, exp in enumerate(exp_source):
        reward_sum += exp.reward
        baseline = reward_sum / (step_idx + 1)
        writer.add_scalar("baseline", baseline, step_idx)
        batch_states.append(exp.state)
        batch_actions.append(int(exp.action))
        batch_scales.append(exp.reward - baseline)

        # handle new rewards
        new_rewards = exp_source.pop_total_rewards()
        if new_rewards:
            done_episodes += 1
            reward = new_rewards[0]
            total_rewards.append(reward)
            mean_rewards = float(np.mean(total_rewards[-100:]))
            print("%d: reward: %6.2f, mean_100: %6.2f, episodes: %d" % (
                step_idx, reward, mean_rewards, done_episodes))
            writer.add_scalar("reward", reward, step_idx)
            writer.add_scalar("reward_100", mean_rewards, step_idx)
Exemplo n.º 39
0
class Trainer(object):
    def __init__(self, args):
        self.config = args
        # parameters
        self.start_epoch = 0
        self.max_epoch = args.max_epoch
        self.training_max_iter = args.training_max_iter
        self.val_max_iter = args.val_max_iter
        self.save_dir = args.save_dir
        self.device = args.device
        self.verbose = args.verbose
        self.best_acc = 0
        self.best_loss = 10000000

        self.model = args.model.to(self.device)
        self.optimizer = args.optimizer
        self.scheduler = args.scheduler
        self.scheduler_interval = args.scheduler_interval
        self.snapshot_interval = args.snapshot_interval
        self.evaluation_metric = args.evaluation_metric
        self.metric_weight = args.metric_weight
        self.writer = SummaryWriter(log_dir=args.tboard_dir)
        # self.writer = SummaryWriter(logdir=args.tboard_dir)

        if args.pretrain != '':
            self._load_pretrain(args.pretrain)

        self.train_loader = args.train_loader
        self.val_loader = args.val_loader

    def train(self):

        self.model.train()
        # res = self.evaluate(self.start_epoch)
        # for k,v in res.items():
        # self.writer.add_scalar(f'val/{k}', v, 0)
        for epoch in range(self.start_epoch, self.max_epoch):
            self.train_epoch(epoch + 1)

            if (epoch + 1) % 1 == 0:
                print("start evaluation...")
                res = self.evaluate(epoch + 1)
                if res['desc_loss'] < self.best_loss:
                    self.best_loss = res['desc_loss']
                    self._snapshot(epoch + 1, 'best_loss')
                if res['accuracy'] > self.best_acc:
                    self.best_acc = res['accuracy']
                    self._snapshot(epoch + 1, 'best_acc')

            for k, v in res.items():
                self.writer.add_scalar(f'val/{k}', v, epoch + 1)

            if (epoch + 1) % self.scheduler_interval == 0:
                self.scheduler.step()

            if (epoch + 1) % self.snapshot_interval == 0:
                self._snapshot(epoch + 1)

        # finish all epoch
        print("Training finish!... save training results")

    def train_epoch(self, epoch):
        data_timer, model_timer = Timer(), Timer()
        desc_loss_meter, det_loss_meter, acc_meter, d_pos_meter, d_neg_meter = AverageMeter(
        ), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
        num_iter = int(
            len(self.train_loader.dataset) // self.train_loader.batch_size)
        num_iter = min(self.training_max_iter, num_iter)
        train_loader_iter = self.train_loader.__iter__()
        # for iter, inputs in enumerate(self.train_loader):
        for iter in range(num_iter):
            data_timer.tic()
            inputs = train_loader_iter.next()
            for k, v in inputs.items():  # load inputs to device.
                if type(v) == list:
                    inputs[k] = [item.to(self.device) for item in v]
                else:
                    inputs[k] = v.to(self.device)
            data_timer.toc()

            model_timer.tic()

            # forward
            self.optimizer.zero_grad()

            features, scores = self.model(inputs)
            anc_features = features[inputs["corr"][:, 0].long()]
            pos_features = features[inputs["corr"][:, 1].long() +
                                    inputs['stack_lengths'][0][0]]
            anc_scores = scores[inputs["corr"][:, 0].long()]
            pos_scores = scores[inputs["corr"][:, 1].long() +
                                inputs['stack_lengths'][0][0]]

            desc_loss, acc, d_pos, d_neg, _, dist = self.evaluation_metric[
                "desc_loss"](anc_features, pos_features, inputs['dist_keypts'])
            det_loss = self.evaluation_metric['det_loss'](dist, anc_scores,
                                                          pos_scores)
            loss = desc_loss * \
                self.metric_weight['desc_loss'] + \
                det_loss * self.metric_weight['det_loss']
            d_pos = np.mean(d_pos)
            d_neg = np.mean(d_neg)

            # backward
            loss.backward()
            do_step = True
            for param in self.model.parameters():
                if param.grad is not None:
                    if (1 - torch.isfinite(param.grad).long()).sum() > 0:
                        do_step = False
                        break
            if do_step is True:
                self.optimizer.step()
            # if self.config.grad_clip_norm > 0:
            # torch.nn.utils.clip_grad_value_(self.model.parameters(), self.config.grad_clip_norm)
            model_timer.toc()
            desc_loss_meter.update(float(desc_loss))
            det_loss_meter.update(float(det_loss))
            d_pos_meter.update(float(d_pos))
            d_neg_meter.update(float(d_neg))
            acc_meter.update(float(acc))

            if (iter + 1) % 100 == 0 and self.verbose:
                curr_iter = num_iter * (epoch - 1) + iter
                self.writer.add_scalar('train/Desc_Loss',
                                       float(desc_loss_meter.avg), curr_iter)
                self.writer.add_scalar('train/Det_Loss',
                                       float(det_loss_meter.avg), curr_iter)
                self.writer.add_scalar('train/D_pos', float(d_pos_meter.avg),
                                       curr_iter)
                self.writer.add_scalar('train/D_neg', float(d_neg_meter.avg),
                                       curr_iter)
                self.writer.add_scalar('train/Accuracy', float(acc_meter.avg),
                                       curr_iter)
                print(f"Epoch: {epoch} [{iter+1:4d}/{num_iter}] "
                      f"desc loss: {desc_loss_meter.avg:.2f} "
                      f"det loss: {det_loss_meter.avg:.2f} "
                      f"acc:  {acc_meter.avg:.2f} "
                      f"d_pos: {d_pos_meter.avg:.2f} "
                      f"d_neg: {d_neg_meter.avg:.2f} "
                      f"data time: {data_timer.avg:.2f}s "
                      f"model time: {model_timer.avg:.2f}s")
        # finish one epoch
        epoch_time = model_timer.total_time + data_timer.total_time
        print(
            f'Epoch {epoch}: Desc Loss: {desc_loss_meter.avg:.2f}, Det Loss : {det_loss_meter.avg:.2f}, Accuracy: {acc_meter.avg:.2f}, D_pos: {d_pos_meter.avg:.2f}, D_neg: {d_neg_meter.avg:.2f}, time {epoch_time:.2f}s'
        )

    def evaluate(self, epoch):
        self.model.eval()
        data_timer, model_timer = Timer(), Timer()
        desc_loss_meter, det_loss_meter, acc_meter, d_pos_meter, d_neg_meter = AverageMeter(
        ), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
        num_iter = int(
            len(self.val_loader.dataset) // self.val_loader.batch_size)
        num_iter = min(self.val_max_iter, num_iter)
        val_loader_iter = self.val_loader.__iter__()
        for iter in range(num_iter):
            data_timer.tic()
            inputs = val_loader_iter.next()
            for k, v in inputs.items():  # load inputs to device.
                if type(v) == list:
                    inputs[k] = [item.to(self.device) for item in v]
                else:
                    inputs[k] = v.to(self.device)
            data_timer.toc()

            model_timer.tic()
            features, scores = self.model(inputs)
            anc_features = features[inputs["corr"][:, 0].long()]
            pos_features = features[inputs["corr"][:, 1].long() +
                                    inputs['stack_lengths'][0][0]]
            anc_scores = scores[inputs["corr"][:, 0].long()]
            pos_scores = scores[inputs["corr"][:, 1].long() +
                                inputs['stack_lengths'][0][0]]

            desc_loss, acc, d_pos, d_neg, _, dist = self.evaluation_metric[
                'desc_loss'](anc_features, pos_features, inputs['dist_keypts'])
            det_loss = self.evaluation_metric['det_loss'](dist, anc_scores,
                                                          pos_scores)
            loss = desc_loss * \
                self.metric_weight['desc_loss'] + \
                det_loss * self.metric_weight['det_loss']
            d_pos = np.mean(d_pos)
            d_neg = np.mean(d_neg)

            model_timer.toc()
            desc_loss_meter.update(float(desc_loss))
            det_loss_meter.update(float(det_loss))
            d_pos_meter.update(float(d_pos))
            d_neg_meter.update(float(d_neg))
            acc_meter.update(float(acc))

            if (iter + 1) % 100 == 0 and self.verbose:
                print(f"Eval epoch: {epoch+1} [{iter+1:4d}/{num_iter}] "
                      f"desc loss: {desc_loss_meter.avg:.2f} "
                      f"det loss: {det_loss_meter.avg:.2f} "
                      f"acc:  {acc_meter.avg:.2f} "
                      f"d_pos: {d_pos_meter.avg:.2f} "
                      f"d_neg: {d_neg_meter.avg:.2f} "
                      f"data time: {data_timer.avg:.2f}s "
                      f"model time: {model_timer.avg:.2f}s")
        self.model.train()
        res = {
            'desc_loss': desc_loss_meter.avg,
            'det_loss': det_loss_meter.avg,
            'accuracy': acc_meter.avg,
            'd_pos': d_pos_meter.avg,
            'd_neg': d_neg_meter.avg,
        }
        print(
            f'Evaluation: Epoch {epoch}: Desc Loss {res["desc_loss"]}, Det Loss {res["det_loss"]}, Accuracy {res["accuracy"]}'
        )
        return res

    def _snapshot(self, epoch, name=None):
        state = {
            'epoch': epoch,
            'state_dict': self.model.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'scheduler': self.scheduler.state_dict(),
            'best_loss': self.best_loss,
        }
        if name is None:
            filename = os.path.join(self.save_dir, f'model_{epoch}.pth')
        else:
            filename = os.path.join(self.save_dir, f'model_{name}.pth')
        print(f"Save model to {filename}")
        torch.save(state, filename)

    def _load_pretrain(self, resume):
        if os.path.isfile(resume):
            print(f"=> loading checkpoint {resume}")
            state = torch.load(resume)
            self.start_epoch = state['epoch']
            self.model.load_state_dict(state['state_dict'])
            self.scheduler.load_state_dict(state['scheduler'])
            self.optimizer.load_state_dict(state['optimizer'])
            self.best_loss = state['best_loss']
            # self.best_acc = state['best_acc']
        else:
            raise ValueError(f"=> no checkpoint found at '{resume}'")

    def _get_lr(self, group=0):
        return self.optimizer.param_groups[group]['lr']
        while True:
            frame_idx += 1
            buffer.populate(1)
            epsilon_tracker.frame(frame_idx)

            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
                    break

            if len(buffer) < params['replay_initial']:
                continue
            if eval_states is None:
                eval_states = buffer.sample(STATES_TO_EVALUATE)
                eval_states = [np.array(transition.state, copy=False) for transition in eval_states]
                eval_states = np.array(eval_states, copy=False)

            optimizer.zero_grad()
            batch = buffer.sample(params['batch_size'])
            loss_v = calc_loss(batch, net, tgt_net.target_model, gamma=params['gamma'], device=device,
                               double=args.double)
            loss_v.backward()
            optimizer.step()

            if frame_idx % params['target_net_sync'] == 0:
                tgt_net.sync()
            if frame_idx % EVAL_EVERY_FRAME == 0:
                mean_val = calc_values_of_states(eval_states, net, device=device)
                writer.add_scalar("values_mean", mean_val, frame_idx)

                                    if p.grad is not None])

            prob_v = F.softmax(logits_v)
            entropy_v = -(prob_v * log_prob_v).sum(dim=1).mean()
            entropy_loss_v = -ENTROPY_BETA * entropy_v
            loss_v = loss_policy_v + entropy_loss_v
            loss_v.backward()
            nn_utils.clip_grad_norm(net.parameters(), GRAD_L2_CLIP)
            optimizer.step()
            loss_v += loss_policy_v

            # calc KL-div
            new_logits_v = net(states_v)
            new_prob_v = F.softmax(new_logits_v)
            kl_div_v = -((new_prob_v / prob_v).log() * prob_v).sum(dim=1).mean()
            writer.add_scalar("kl", kl_div_v.data.cpu().numpy()[0], step_idx)

            writer.add_scalar("baseline", baseline, step_idx)
            writer.add_scalar("entropy", entropy_v.data.cpu().numpy()[0], step_idx)
            writer.add_scalar("batch_scales", np.mean(batch_scales), step_idx)
            writer.add_scalar("batch_scales_std", scale_std, step_idx)
            writer.add_scalar("loss_entropy", entropy_loss_v.data.cpu().numpy()[0], step_idx)
            writer.add_scalar("loss_policy", loss_policy_v.data.cpu().numpy()[0], step_idx)
            writer.add_scalar("loss_total", loss_v.data.cpu().numpy()[0], step_idx)

            writer.add_scalar("grad_l2", np.sqrt(np.mean(np.square(grads))), step_idx)
            writer.add_scalar("grad_max", np.max(np.abs(grads)), step_idx)
            writer.add_scalar("grad_var", np.var(grads), step_idx)

            batch_states.clear()
            batch_actions.clear()
Exemplo n.º 42
0
class SummaryWorker(multiprocessing.Process):
    def __init__(self, env):
        super(SummaryWorker, self).__init__()
        self.env = env
        self.config = env.config
        self.queue = multiprocessing.Queue()
        try:
            self.timer_scalar = utils.train.Timer(env.config.getfloat('summary', 'scalar'))
        except configparser.NoOptionError:
            self.timer_scalar = lambda: False
        try:
            self.timer_image = utils.train.Timer(env.config.getfloat('summary', 'image'))
        except configparser.NoOptionError:
            self.timer_image = lambda: False
        try:
            self.timer_histogram = utils.train.Timer(env.config.getfloat('summary', 'histogram'))
        except configparser.NoOptionError:
            self.timer_histogram = lambda: False
        with open(os.path.expanduser(os.path.expandvars(env.config.get('summary_histogram', 'parameters'))), 'r') as f:
            self.histogram_parameters = utils.RegexList([line.rstrip() for line in f])
        self.draw_bbox = utils.visualize.DrawBBox(env.config, env.category)
        self.draw_iou = utils.visualize.DrawIou(env.config)

    def __call__(self, name, **kwargs):
        if getattr(self, 'timer_' + name)():
            kwargs = getattr(self, 'copy_' + name)(**kwargs)
            self.queue.put((name, kwargs))

    def stop(self):
        self.queue.put((None, {}))

    def run(self):
        self.writer = SummaryWriter(os.path.join(self.env.model_dir, self.env.args.run))
        while True:
            name, kwargs = self.queue.get()
            if name is None:
                break
            func = getattr(self, 'summary_' + name)
            try:
                func(**kwargs)
            except:
                traceback.print_exc()

    def copy_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        loss_total = loss_total.data.clone().cpu().numpy()
        loss = {key: loss[key].data.clone().cpu().numpy() for key in loss}
        loss_hparam = {key: loss_hparam[key].data.clone().cpu().numpy() for key in loss_hparam}
        return dict(
            step=step,
            loss_total=loss_total,
            loss=loss, loss_hparam=loss_hparam,
        )

    def summary_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        for key in loss:
            self.writer.add_scalar('loss/' + key, loss[key][0], step)
        if self.config.getboolean('summary_scalar', 'loss_hparam'):
            self.writer.add_scalars('loss_hparam', {key: loss_hparam[key][0] for key in loss_hparam}, step)
        self.writer.add_scalar('loss_total', loss_total[0], step)

    def copy_image(self, **kwargs):
        step, height, width, rows, cols, data, pred, debug = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, debug'.split(', '))
        data = {key: data[key].clone().cpu().numpy() for key in 'image, yx_min, yx_max, cls'.split(', ')}
        pred = {key: pred[key].data.clone().cpu().numpy() for key in 'yx_min, yx_max, iou, logits'.split(', ') if key in pred}
        matching = (debug['positive'].float() - debug['negative'].float() + 1) / 2
        matching = matching.data.clone().cpu().numpy()
        return dict(
            step=step, height=height, width=width, rows=rows, cols=cols,
            data=data, pred=pred,
            matching=matching,
        )

    def summary_image(self, **kwargs):
        step, height, width, rows, cols, data, pred, matching = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, matching'.split(', '))
        image = data['image']
        limit = min(self.config.getint('summary_image', 'limit'), image.shape[0])
        image = image[:limit, :, :, :]
        yx_min, yx_max, iou = (pred[key] for key in 'yx_min, yx_max, iou'.split(', '))
        scale = [height / rows, width / cols]
        yx_min, yx_max = (a * scale for a in (yx_min, yx_max))
        if 'logits' in pred:
            cls = np.argmax(F.softmax(torch.autograd.Variable(torch.from_numpy(pred['logits'])), -1).data.cpu().numpy(), -1)
        else:
            cls = np.zeros(iou.shape, np.int)
        if self.config.getboolean('summary_image', 'bbox'):
            # data
            canvas = np.copy(image)
            canvas = pybenchmark.profile('bbox/data')(self.draw_bbox_data)(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')))
            self.writer.add_image('bbox/data', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
            # pred
            canvas = np.copy(image)
            canvas = pybenchmark.profile('bbox/pred')(self.draw_bbox_pred)(canvas, yx_min, yx_max, cls, iou, nms=True)
            self.writer.add_image('bbox/pred', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
        if self.config.getboolean('summary_image', 'iou'):
            # bbox
            canvas = np.copy(image)
            canvas_data = self.draw_bbox_data(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')), colors=['g'])
            # data
            for i, canvas in enumerate(pybenchmark.profile('iou/data')(self.draw_bbox_iou)(list(map(np.copy, canvas_data)), yx_min, yx_max, cls, matching, rows, cols, colors=['w'])):
                canvas = np.stack(canvas)
                canvas = torch.from_numpy(canvas).permute(0, 3, 1, 2)
                canvas = torchvision.utils.make_grid(canvas.float(), normalize=True, scale_each=True)
                self.writer.add_image('iou/data%d' % i, canvas, step)
            # pred
            for i, canvas in enumerate(pybenchmark.profile('iou/pred')(self.draw_bbox_iou)(list(map(np.copy, canvas_data)), yx_min, yx_max, cls, iou, rows, cols, colors=['w'])):
                canvas = np.stack(canvas)
                canvas = torch.from_numpy(canvas).permute(0, 3, 1, 2)
                canvas = torchvision.utils.make_grid(canvas.float(), normalize=True, scale_each=True)
                self.writer.add_image('iou/pred%d' % i, canvas, step)

    def draw_bbox_data(self, canvas, yx_min, yx_max, cls, colors=None):
        batch_size = len(canvas)
        if len(cls.shape) == len(yx_min.shape):
            cls = np.argmax(cls, -1)
        yx_min, yx_max, cls = ([a[b] for b in range(batch_size)] for a in (yx_min, yx_max, cls))
        return [self.draw_bbox(canvas, yx_min.astype(np.int), yx_max.astype(np.int), cls, colors=colors) for canvas, yx_min, yx_max, cls in zip(canvas, yx_min, yx_max, cls)]

    def draw_bbox_pred(self, canvas, yx_min, yx_max, cls, iou, colors=None, nms=False):
        batch_size = len(canvas)
        mask = iou > self.config.getfloat('detect', 'threshold')
        yx_min, yx_max = (np.reshape(a, [a.shape[0], -1, 2]) for a in (yx_min, yx_max))
        cls, iou, mask = (np.reshape(a, [a.shape[0], -1]) for a in (cls, iou, mask))
        yx_min, yx_max, cls, iou, mask = ([a[b] for b in range(batch_size)] for a in (yx_min, yx_max, cls, iou, mask))
        yx_min, yx_max, cls, iou = ([a[m] for a, m in zip(l, mask)] for l in (yx_min, yx_max, cls, iou))
        if nms:
            overlap = self.config.getfloat('detect', 'overlap')
            keep = [pybenchmark.profile('nms')(utils.postprocess.nms)(torch.Tensor(iou), torch.Tensor(yx_min), torch.Tensor(yx_max), overlap) if iou.shape[0] > 0 else [] for yx_min, yx_max, iou in zip(yx_min, yx_max, iou)]
            keep = [np.array(k, np.int) for k in keep]
            yx_min, yx_max, cls = ([a[k] for a, k in zip(l, keep)] for l in (yx_min, yx_max, cls))
        return [self.draw_bbox(canvas, yx_min.astype(np.int), yx_max.astype(np.int), cls, colors=colors) for canvas, yx_min, yx_max, cls in zip(canvas, yx_min, yx_max, cls)]

    def draw_bbox_iou(self, canvas_share, yx_min, yx_max, cls, iou, rows, cols, colors=None):
        batch_size = len(canvas_share)
        yx_min, yx_max = ([np.squeeze(a, -2) for a in np.split(a, a.shape[-2], -2)] for a in (yx_min, yx_max))
        cls, iou = ([np.squeeze(a, -1) for a in np.split(a, a.shape[-1], -1)] for a in (cls, iou))
        results = []
        for i, (yx_min, yx_max, cls, iou) in enumerate(zip(yx_min, yx_max, cls, iou)):
            mask = iou > self.config.getfloat('detect', 'threshold')
            yx_min, yx_max = (np.reshape(a, [a.shape[0], -1, 2]) for a in (yx_min, yx_max))
            cls, iou, mask = (np.reshape(a, [a.shape[0], -1]) for a in (cls, iou, mask))
            yx_min, yx_max, cls, iou, mask = ([a[b] for b in range(batch_size)] for a in (yx_min, yx_max, cls, iou, mask))
            yx_min, yx_max, cls = ([a[m] for a, m in zip(l, mask)] for l in (yx_min, yx_max, cls))
            canvas = [self.draw_bbox(canvas, yx_min.astype(np.int), yx_max.astype(np.int), cls, colors=colors) for canvas, yx_min, yx_max, cls in zip(np.copy(canvas_share), yx_min, yx_max, cls)]
            iou = [np.reshape(a, [rows, cols]) for a in iou]
            canvas = [self.draw_iou(_canvas, iou) for _canvas, iou in zip(canvas, iou)]
            results.append(canvas)
        return results

    def copy_histogram(self, **kwargs):
        return {key: kwargs[key].data.clone().cpu().numpy() if torch.is_tensor(kwargs[key]) else kwargs[key] for key in 'step, dnn'.split(', ')}


    def summary_histogram(self, **kwargs):
        step, dnn = (kwargs[key] for key in 'step, dnn'.split(', '))
        for name, param in dnn.named_parameters():
            if self.histogram_parameters(name):
                self.writer.add_histogram(name, param, step)
    buffer = PrioReplayBuffer(exp_source, params['replay_size'], PRIO_REPLAY_ALPHA)
    optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])

    frame_idx = 0
    beta = BETA_START

    with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
        while True:
            frame_idx += 1
            buffer.populate(1)
            epsilon_tracker.frame(frame_idx)
            beta = min(1.0, BETA_START + frame_idx * (1.0 - BETA_START) / BETA_FRAMES)

            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                writer.add_scalar("beta", beta, frame_idx)
                if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
                    break

            if len(buffer) < params['replay_initial']:
                continue

            optimizer.zero_grad()
            batch, batch_indices, batch_weights = buffer.sample(params['batch_size'], beta)
            loss_v, sample_prios_v = calc_loss(batch, batch_weights, net, tgt_net.target_model,
                                               params['gamma'], device=device)
            loss_v.backward()
            optimizer.step()
            buffer.update_priorities(batch_indices, sample_prios_v.data.cpu().numpy())

            if frame_idx % params['target_net_sync'] == 0:
                values_v = torch.FloatTensor(batch_values).to(device)
                out_logits_v, out_values_v = net(states_v)

                loss_value_v = F.mse_loss(out_values_v.squeeze(-1), values_v)
                loss_policy_v = -F.log_softmax(out_logits_v, dim=1) * probs_v
                loss_policy_v = loss_policy_v.sum(dim=1).mean()

                loss_v = loss_policy_v + loss_value_v
                loss_v.backward()
                optimizer.step()
                sum_loss += loss_v.item()
                sum_value_loss += loss_value_v.item()
                sum_policy_loss += loss_policy_v.item()

            tb_tracker.track("loss_total", sum_loss / TRAIN_ROUNDS, step_idx)
            tb_tracker.track("loss_value", sum_value_loss / TRAIN_ROUNDS, step_idx)
            tb_tracker.track("loss_policy", sum_policy_loss / TRAIN_ROUNDS, step_idx)

            # evaluate net
            if step_idx % EVALUATE_EVERY_STEP == 0:
                win_ratio = evaluate(net, best_net.target_model, rounds=EVALUATION_ROUNDS, device=device)
                print("Net evaluated, win ratio = %.2f" % win_ratio)
                writer.add_scalar("eval_win_ratio", win_ratio, step_idx)
                if win_ratio > BEST_NET_WIN_RATIO:
                    print("Net is better than cur best, sync")
                    best_net.sync()
                    best_idx += 1
                    file_name = os.path.join(saves_path, "best_%03d_%05d.dat" % (best_idx, step_idx))
                    torch.save(net.state_dict(), file_name)
                    mcts_store.clear()
        batch_targets = [calc_target(net, exp.reward, exp.last_state)
                         for exp in batch]
        # train
        optimizer.zero_grad()
        states_v = torch.FloatTensor(batch_states)
        net_q_v = net(states_v)
        target_q = net_q_v.data.numpy().copy()
        target_q[range(BATCH_SIZE), batch_actions] = batch_targets
        target_q_v = torch.tensor(target_q)
        loss_v = mse_loss(net_q_v, target_q_v)
        loss_v.backward()
        optimizer.step()

        # handle new rewards
        new_rewards = exp_source.pop_total_rewards()
        if new_rewards:
            done_episodes += 1
            reward = new_rewards[0]
            total_rewards.append(reward)
            mean_rewards = float(np.mean(total_rewards[-100:]))
            print("%d: reward: %6.2f, mean_100: %6.2f, epsilon: %.2f, episodes: %d" % (
                step_idx, reward, mean_rewards, selector.epsilon, done_episodes))
            writer.add_scalar("reward", reward, step_idx)
            writer.add_scalar("reward_100", mean_rewards, step_idx)
            writer.add_scalar("epsilon", selector.epsilon, step_idx)
            writer.add_scalar("episodes", done_episodes, step_idx)
            if mean_rewards > 195:
                print("Solved in %d steps and %d episodes!" % (step_idx, done_episodes))
                break
    writer.close()
        if exp.last_state is None:
            batch_qvals.extend(calc_qvals(cur_rewards))
            cur_rewards.clear()
            batch_episodes += 1

        # handle new rewards
        new_rewards = exp_source.pop_total_rewards()
        if new_rewards:
            done_episodes += 1
            reward = new_rewards[0]
            total_rewards.append(reward)
            mean_rewards = float(np.mean(total_rewards[-100:]))
            print("%d: reward: %6.2f, mean_100: %6.2f, episodes: %d" % (
                step_idx, reward, mean_rewards, done_episodes))
            writer.add_scalar("reward", reward, step_idx)
            writer.add_scalar("reward_100", mean_rewards, step_idx)
            writer.add_scalar("episodes", done_episodes, step_idx)
            if mean_rewards > 195:
                print("Solved in %d steps and %d episodes!" % (step_idx, done_episodes))
                break

        if batch_episodes < EPISODES_TO_TRAIN:
            continue

        optimizer.zero_grad()
        states_v = torch.FloatTensor(batch_states)
        batch_actions_t = torch.LongTensor(batch_actions)
        batch_qvals_v = torch.FloatTensor(batch_qvals)

        logits_v = net(states_v)
Exemplo n.º 47
0
def main():
    writer = SummaryWriter(args.snapshot_dir)
    
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    xlsor = XLSor(num_classes=args.num_classes)
    print(xlsor)

    saved_state_dict = torch.load(args.restore_from)
    new_params = xlsor.state_dict().copy()
    for i in saved_state_dict:
        i_parts = i.split('.')
        if not i_parts[0]=='fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i] 
    
    xlsor.load_state_dict(new_params)


    model = DataParallelModel(xlsor)
    model.train()
    model.float()
    model.cuda()    

    criterion = Criterion()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()
    
    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)


    trainloader = data.DataLoader(XRAYDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,
                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), 
                    batch_size=args.batch_size, shuffle=True, num_workers=16, pin_memory=True)

    optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, xlsor.parameters()), 'lr': args.learning_rate }],
                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)


    for i_iter, batch in enumerate(trainloader):
        i_iter += args.start_iters
        images, labels, _, _ = batch
        images = images.cuda()
        labels = labels.float().cuda()
        if torch_ver == "0.3":
            images = Variable(images)
            labels = Variable(labels)

        optimizer.zero_grad()
        lr = adjust_learning_rate(optimizer, i_iter)
        preds = model(images, args.recurrence)

        loss = criterion(preds, labels)
        loss.backward()
        optimizer.step()

        if i_iter % 100 == 0:
            writer.add_scalar('learning_rate', lr, i_iter)
            writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

        if i_iter % 100 == 0:
            images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
            if isinstance(preds, list):
                preds = preds[0]
            if isinstance(preds, list):
                preds = preds[0]
            preds = interp(preds)
            for index, img in enumerate(images_inv):
                writer.add_image('Images/'+str(index), torch.from_numpy(img/255.).permute(2,0,1), i_iter)
                writer.add_image('Labels/'+str(index), labels[index], i_iter)
                writer.add_image('preds/'+str(index), (preds[index]>0.5).float(), i_iter)

        print('iter = {} of {} completed, loss = {}'.format(i_iter, args.num_steps, loss.data.cpu().numpy()))

        if i_iter >= args.num_steps-1:
            print('save model ...')
            torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(args.num_steps)+'.pth'))
            break

        if i_iter % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(i_iter)+'.pth'))

    end = timeit.default_timer()
    print(end-start,'seconds')
Exemplo n.º 48
0
def train(args):
    """Sets up the model to train"""
    # Create a writer object to log events during training
    writer = SummaryWriter(pjoin('runs', 'exp_1'))

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Load splits
    x_train, y_train, x_val, y_val, seismic = train_val_split(args)

    # Convert to torch tensors in the form (N, C, L)
    x_train = torch.from_numpy(np.expand_dims(x_train, 1)).float().to(device)
    y_train = torch.from_numpy(np.expand_dims(y_train, 1)).float().to(device)
    x_val = torch.from_numpy(np.expand_dims(x_val, 1)).float().to(device)
    y_val = torch.from_numpy(np.expand_dims(y_val, 1)).float().to(device)
    seismic = torch.from_numpy(np.expand_dims(seismic, 1)).float().to(device)

    # Set up the dataloader for training dataset
    dataset = SeismicLoader(x_train, y_train)
    train_loader = DataLoader(dataset=dataset,
                              batch_size=args.batch_size,
                              shuffle=False)

    # import tcn
    model = TCN(1, 1, args.tcn_layer_channels, args.kernel_size,
                args.dropout).to(device)

    # Set up loss
    criterion = torch.nn.MSELoss()

    # Define Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 weight_decay=args.weight_decay,
                                 lr=args.lr)

    # Set up list to store the losses
    train_loss = [np.inf]
    val_loss = [np.inf]
    iter = 0
    # Start training
    for epoch in range(args.n_epoch):
        for x, y in train_loader:
            model.train()
            optimizer.zero_grad()
            y_pred = model(x)
            loss = criterion(y_pred, y)
            loss.backward()
            optimizer.step()
            train_loss.append(loss.item())
            writer.add_scalar(tag='Training Loss',
                              scalar_value=loss.item(),
                              global_step=iter)
            if epoch % 20 == 0:
                with torch.no_grad():
                    model.eval()
                    y_pred = model(x_val)
                    loss = criterion(y_pred, y_val)
                    val_loss.append(loss.item())
                    writer.add_scalar(tag='Validation Loss',
                                      scalar_value=loss.item(),
                                      global_step=iter)
            print(
                'epoch:{} - Training loss: {:0.4f} | Validation loss: {:0.4f}'.
                format(epoch, train_loss[-1], val_loss[-1]))

            if epoch % 100 == 0:
                with torch.no_grad():
                    model.eval()
                    AI_inv = model(seismic)
                fig, ax = plt.subplots()
                ax.imshow(AI_inv[:, 0].detach().cpu().numpy().squeeze().T,
                          cmap="rainbow")
                ax.set_aspect(4)
                writer.add_figure('Inverted Acoustic Impedance', fig, iter)
        iter += 1

    writer.close()

    # Set up directory to save results
    results_directory = 'results'
    seismic_offsets = np.expand_dims(marmousi_seismic().squeeze()[:, 100:600],
                                     1)
    seismic_offsets = torch.from_numpy(
        (seismic_offsets - seismic_offsets.mean()) /
        seismic_offsets.std()).float()
    with torch.no_grad():
        model.cpu()
        model.eval()
        AI_inv = model(seismic_offsets)

    if not os.path.exists(
            results_directory
    ):  # Make results directory if it doesn't already exist
        os.mkdir(results_directory)
        print('Saving results...')
    else:
        print('Saving results...')

    np.save(pjoin(results_directory, 'AI.npy'), marmousi_model().T[:, 100:600])
    np.save(pjoin(results_directory, 'AI_inv.npy'),
            AI_inv.detach().numpy().squeeze())
    print('Results successfully saved.')
Exemplo n.º 49
0
class Experiment:
    def __init__(self,
                 lossfunc,
                 task_params,
                 n: int,
                 scale: float,
                 alpha: float,
                 beta: float,
                 tau: float = None,
                 gamma: float = None,
                 batch_size: float = None,
                 save_folder: str = None,
                 ):

        self.alpha = alpha
        self.beta = beta
        self.tau = tau
        self.gamma = gamma
        self.batch_size = batch_size
        self.save_folder = save_folder

        if SummaryWriter is not None and self.save_folder is not None:
            self.writer = SummaryWriter(save_folder)

        if lossfunc == LossType.QUADRATIC:
            self.loss = Quadratic(n, scale, task_params)
        elif lossfunc == LossType.LOGREG:
            self.loss = Logreg(n, scale, task_params)


        self.mu = self.loss.get_mu()
        self.starting_point = np.ones(n) * 10

    def find_optimal_solution(self):
        z = minimize(self.loss.phi, self.starting_point)
        return z

    def get_gamma(self, iteration):
        if self.gamma is not None:
            return self.gamma

        return 1 / (self.mu * (iteration + 1))

    def get_tau(self, iteration):
        if self.tau is not None:
            return self.tau
        return 1 / ((iteration + 1) ** self.beta)

    def get_batch_size(self, iteration):
        if self.batch_size is not None:
            return self.batch_size

        return int((iteration + 1) ** self.alpha)

    def run(self, num_iters):
        errors = []
        xs = []

        opt = self.find_optimal_solution()
        err = 0

        loop = trange(1000, num_iters)
        x_prev = self.starting_point
        x_next = x_prev
        for k in loop:

            tau = self.get_tau(k)
            gamma = self.get_gamma(k)
            batch_size = self.get_batch_size(k)

            x_next = self.loss.step(x_prev, gamma, tau, batch_size)
            err = la.norm(x_next - opt.x, 2)
            loop.set_description("error: %.3e; bsz: %d" % (err, batch_size))
            errors.append(err)
            self.writer.add_scalar('error', err / errors[0], k)
            xs.append(x_next)
            x_prev = x_next

        data = {"x": xs, "errors": errors}
        with open(os.path.join(self.save_folder, "data.pkl"), "wb") as file:
            pickle.dump(data, file)

        print("Optimizing finished. Final error: %.3e, f(x) = %.3e, optimal = %.3e"
              % (err, self.loss.phi(x_next), opt.fun))
Exemplo n.º 50
0
    def train(self):
        """The function for the pre-train phase."""

        # Set the pretrain log
        trlog = {}
        trlog['args'] = vars(self.args)
        trlog['train_loss'] = []
        trlog['val_loss'] = []
        trlog['train_acc'] = []
        trlog['val_acc'] = []
        trlog['max_acc'] = 0.0
        trlog['max_acc_epoch'] = 0

        # Set the timer
        timer = Timer()
        # Set global count to zero
        global_count = 0
        # Set tensorboardX
        writer = SummaryWriter(comment=self.args.save_path)

        # Start pretrain
        for epoch in range(1, self.args.pre_max_epoch + 1):
            # Set the model to train mode
            self.model.train()
            self.model.mode = 'pre'
            # Set averager classes to record training losses and accuracies
            train_loss_averager = Averager()
            train_acc_averager = Averager()

            # Using tqdm to read samples from train loader
            tqdm_gen = tqdm.tqdm(self.train_loader)
            for i, batch in enumerate(tqdm_gen, 1):
                # Update global count number
                global_count = global_count + 1
                if torch.cuda.is_available():
                    data, _ = [_.cuda() for _ in batch]
                else:
                    data = batch[0]
                label = batch[1]
                if torch.cuda.is_available():
                    label = label.type(torch.cuda.LongTensor)
                else:
                    label = label.type(torch.LongTensor)
                # Output logits for model
                logits = self.model(data)
                # Calculate train loss
                loss = F.cross_entropy(logits, label)
                # Calculate train accuracy
                acc = count_acc(logits, label)
                # Write the tensorboardX records
                writer.add_scalar('data/loss', float(loss), global_count)
                writer.add_scalar('data/acc', float(acc), global_count)
                # Print loss and accuracy for this step
                tqdm_gen.set_description(
                    'Epoch {}, Loss={:.4f} Acc={:.4f}'.format(
                        epoch, loss.item(), acc))

                # Add loss and accuracy for the averagers
                train_loss_averager.add(loss.item())
                train_acc_averager.add(acc)

                # Loss backwards and optimizer updates
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

            # Update learning rate
            self.lr_scheduler.step()

            # Update the averagers
            train_loss_averager = train_loss_averager.item()
            train_acc_averager = train_acc_averager.item()

            # Start validation for this epoch, set model to eval mode
            self.model.eval()
            self.model.mode = 'preval'

            # Set averager classes to record validation losses and accuracies
            val_loss_averager = Averager()
            val_acc_averager = Averager()

            # Generate the labels for test
            label = torch.arange(self.args.way).repeat(self.args.val_query)
            if torch.cuda.is_available():
                label = label.type(torch.cuda.LongTensor)
            else:
                label = label.type(torch.LongTensor)
            label_shot = torch.arange(self.args.way).repeat(self.args.shot)
            if torch.cuda.is_available():
                label_shot = label_shot.type(torch.cuda.LongTensor)
            else:
                label_shot = label_shot.type(torch.LongTensor)

            # Print previous information
            if epoch % 10 == 0:
                print('Best Epoch {}, Best Val acc={:.4f}'.format(
                    trlog['max_acc_epoch'], trlog['max_acc']))
            # Run meta-validation
            for i, batch in enumerate(self.val_loader, 1):
                if torch.cuda.is_available():
                    data, _ = [_.cuda() for _ in batch]
                else:
                    data = batch[0]
                p = self.args.shot * self.args.way
                data_shot, data_query = data[:p], data[p:]
                logits = self.model((data_shot, label_shot, data_query))
                loss = F.cross_entropy(logits, label)
                acc = count_acc(logits, label)
                val_loss_averager.add(loss.item())
                val_acc_averager.add(acc)

            # Update validation averagers
            val_loss_averager = val_loss_averager.item()
            val_acc_averager = val_acc_averager.item()
            # Write the tensorboardX records
            writer.add_scalar('data/val_loss', float(val_loss_averager), epoch)
            writer.add_scalar('data/val_acc', float(val_acc_averager), epoch)
            # Print loss and accuracy for this epoch
            print('Epoch {}, Val, Loss={:.4f} Acc={:.4f}'.format(
                epoch, val_loss_averager, val_acc_averager))

            # Update best saved model
            if val_acc_averager > trlog['max_acc']:
                trlog['max_acc'] = val_acc_averager
                trlog['max_acc_epoch'] = epoch
                self.save_model('max_acc')
            # Save model every 10 epochs
            if epoch % 10 == 0:
                self.save_model('epoch' + str(epoch))

            # Update the logs
            trlog['train_loss'].append(train_loss_averager)
            trlog['train_acc'].append(train_acc_averager)
            trlog['val_loss'].append(val_loss_averager)
            trlog['val_acc'].append(val_acc_averager)

            # Save log
            torch.save(trlog, osp.join(self.args.save_path, 'trlog'))

            if epoch % 10 == 0:
                print('Running Time: {}, Estimated Time: {}'.format(
                    timer.measure(),
                    timer.measure(epoch / self.args.max_epoch)))
        writer.close()
Exemplo n.º 51
0
def main(_run, _log,
         seed,
         dataset,
         filter_class_ids,
         input_image_size,
         patch_size,
         batch_size,
         num_epochs,
         loss_lambda):

    # Set the RNG seed for torch
    torch.manual_seed(seed)

    # Check input parameters are in expected format
    assert filter_class_ids is None or type(filter_class_ids) is list
    if type(filter_class_ids) is list:
        assert all(type(class_id) is int for class_id in filter_class_ids)
    else:
        _log.warning('Training on all classes!!')
        confirm = input('Continue? [y/n] ')
        if confirm.lower() != 'y':
            return None

    # Provision the `sacred` run directory for this experiment
    RUN_DIR = _run.observers[0].dir
    LOGS_DIR, IMAGES_DIR, PROTOTYPES_DIR = _provision_run_dir(RUN_DIR)

    # Initialize log writer for tensorboard
    writer = SummaryWriter(LOGS_DIR)

    # Load datasets for training and testing
    Dataset = DATASET_MAP[dataset]
    data_dir = DATA_DIR_MAP[dataset]
    train_dataset, train_dataset_with_non_random_transformation, \
        test_dataset = Dataset.load_dataset_splits(
        data_dir, input_image_size, filter_class_ids)

    # Initialize the data loader
    train_dataloader = DataLoader(
        train_dataset, collate_fn=Dataset.custom_collate_fn,
        batch_size=batch_size, shuffle=True)

    # Define variables for attributes
    num_attributes = train_dataset.num_attributes
    all_attribute_labels = range(1, num_attributes + 1)
    attribute_names = [train_dataset.get_attribute(al).name
                       for al in all_attribute_labels]

    # Initialize the model
    model = _make_cuda(SemanticAutoencoder(
        input_image_size, patch_size, num_attributes))

    # Initialize the loss function and optimizer
    epoch_loss = None
    criterion = _make_cuda(CustomLoss2(lambda_val=loss_lambda))
    optimizer = optim.Adam(ifilter(lambda p: p.requires_grad,
                                   model.parameters()))

    # Initiate training
    pbar, steps = tqdm(range(1, num_epochs + 1)), 0
    for epoch in pbar:
        epoch_loss = 0.

        model.train()  # Setting the model in training mode for training
        for image, label, attribute_labels, padding_idx in train_dataloader:
            steps += 1  # Incrementing the global step
            model.zero_grad()  # Clearing the gradients for each mini-batch

            # Create the input variable and get the output from the model
            x = _make_cuda(torch.autograd.Variable(image))
            z, z_patches, reconstructed_x = model(x)

            # Get the associated prototypes for each image in the batch
            prototype_labels = _make_cuda(attribute_labels)
            positive_prototypes = model.prototypes(prototype_labels)

            # Get the *non-associated* prototypes for each image in the batch
            negative_prototypes = list()
            for img_al in attribute_labels:
                negative_al = _make_cuda(torch.LongTensor(list(filter(
                    lambda al: al not in img_al,
                    all_attribute_labels))))
                negative_prototypes.append(model.prototypes(negative_al))

            # Compute the loss
            loss = criterion(reconstructed_x, z_patches,
                             positive_prototypes, padding_idx, x,
                             negative_prototypes=negative_prototypes)

            # Do backprop and update the weights
            loss.backward()
            optimizer.step()

            # Update the epoch loss and add the step loss to tensorboard
            epoch_loss += loss.item()
            writer.add_scalar('loss/step_loss', loss, steps)

        # Add the epoch loss to tensorboard and update the progressbar
        writer.add_scalar('loss/epoch_loss', epoch_loss, steps)
        pbar.set_postfix(epoch_loss=epoch_loss)

        model.eval()  # Setting the model in evaluation mode for testing
        if (epoch % 5 == 0) or (epoch == num_epochs):
            # Compute the nearest patch for each prototype
            nearest_patches_for_prototypes = \
                model.get_nearest_patches_for_prototypes(
                    train_dataset_with_non_random_transformation)

            # Update each prototype to be equal to the nearest patch
            model.reproject_prototypes(nearest_patches_for_prototypes)

            if (epoch % 1000 == 0) or (epoch == num_epochs):
                # Save the prototype visualization
                save_prototype_patch_visualization(
                    model, train_dataset_with_non_random_transformation,
                    nearest_patches_for_prototypes, PROTOTYPES_DIR)

                # Save the reconstructed images for the test dataset
                # for every 1000 epochs
                for i_, (image, image_label, attribute_labels, _) \
                        in enumerate(test_dataset):
                    x = image.view((1,) + image.size())
                    x = _make_cuda(torch.autograd.Variable(x))
                    z, z_patches, reconstructed_x = model(x)

                    reconstructed_image = \
                        get_image_from_tensor(reconstructed_x)
                    reconstructed_image.save(
                        os.path.join(IMAGES_DIR, '%d-%d.png' % (epoch, i_)))

                # Save the intermediate model
                model.save_weights(os.path.join(RUN_DIR, MODEL_FILE_NAME))

        # Add the prototype embeddings to tensorboard at the end
        if epoch == num_epochs:
            writer.add_embedding(
                model.prototypes.weight[1:],
                metadata=attribute_names,
                global_step=steps)

    # Save the final model and commit the tensorboard logs
    model.save_weights(os.path.join(RUN_DIR, MODEL_FILE_NAME))
    writer.close()

    return epoch_loss
Exemplo n.º 52
0
                loss_aux_classifier=loss_aux_classifier_mean.measure,
                epoch=i + 1)
        lr_encoder.step()
        lr_decoder.step()
        lr_discriminator.step()
        margin *= decay_margin
        equilibrium *= decay_equilibrium
        # margin non puo essere piu alto di equilibrium
        if margin > equilibrium:
            equilibrium = margin
        lambda_mse *= decay_mse
        if lambda_mse > 1:
            lambda_mse = 1
        progress.finish()

        writer.add_scalar('loss_encoder', loss_encoder_mean.measure,
                          step_index)
        writer.add_scalar('loss_decoder', loss_decoder_mean.measure,
                          step_index)
        writer.add_scalar('loss_discriminator',
                          loss_discriminator_mean.measure, step_index)
        writer.add_scalar('loss_reconstruction', loss_nle_mean.measure,
                          step_index)
        writer.add_scalar('loss_kld', loss_kld_mean.measure, step_index)
        writer.add_scalar('loss_aux_classifier',
                          loss_aux_classifier_mean.measure, step_index)
        writer.add_scalar('gan_gen', gan_gen_eq_mean.measure, step_index)
        writer.add_scalar('gan_dis', gan_dis_eq_mean.measure, step_index)
        step_index += 1

    exit(0)
        for state in range(self.env.observation_space.n):
            state_values = [self.calc_action_value(state, action)
                            for action in range(self.env.action_space.n)]
            self.values[state] = max(state_values)


if __name__ == "__main__":
    test_env = gym.make(ENV_NAME)
    agent = Agent()
    writer = SummaryWriter(comment="-v-iteration")

    iter_no = 0
    best_reward = 0.0
    while True:
        iter_no += 1
        agent.play_n_random_steps(100)
        agent.value_iteration()

        reward = 0.0
        for _ in range(TEST_EPISODES):
            reward += agent.play_episode(test_env)
        reward /= TEST_EPISODES
        writer.add_scalar("reward", reward, iter_no)
        if reward > best_reward:
            print("Best reward updated %.3f -> %.3f" % (best_reward, reward))
            best_reward = reward
        if reward > 0.80:
            print("Solved in %d iterations!" % iter_no)
            break
    writer.close()
Exemplo n.º 54
0
def train(args, train_dataset, model, tokenizer):
    """ Train the model """

    if args.local_rank in [-1, 0]:
        tb_writer = SummaryWriter()

    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,num_workers=16,pin_memory=False)

    if args.max_steps > 0:
        t_total = args.max_steps
        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
    else:
        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs

    # Prepare optimizer and schedule (linear warmup and decay)
    # 感觉这里是在把预训练模型的权重读入进来
    # for name, param in model.named_parameters():
	#     print(name,param.requires_grad)
	#     if('bert2.bert' in name):
    #         param.requires_grad = False
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) and 'bert2.bert'not in n], 'weight_decay': args.weight_decay},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)and 'bert2.bert'not in n], 'weight_decay': 0.0}
        ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    # optimizer = torch.optim.SGD(optimizer_grouped_parameters,lr= args.learning_rate,momentum=0)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)

    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)
        model.to(args.device)

    # Distributed training (should be after apex fp16 initialization)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)

    # Train!
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_dataset))
    logger.info("  Num Epochs = %d", args.num_train_epochs)
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                   args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    global_step = 0            #——————————————————————from here different
    tr_loss, logging_loss = 0.0, 0.0  # tr loss 是啥不知道,可能是交叉熵
    model.zero_grad()
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])   #local rank 也不是知道是啥
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)
    # f = open('o6.txt','a+',encoding='utf-8')
    # ff = open('each_loss2.txt','a+',encoding='utf-8')
    for epoch_idx, epoch in tqdm(enumerate(train_iterator), desc='training epoches'):
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
        # if epoch_idx < 1:
        #         continue  # temp
        for step, batch in tqdm(enumerate(epoch_iterator), desc='training batches'):
            model.train()
            batch = tuple(t.to(args.device) for t in batch)
            inputs = {'q_input_ids':       batch[0],
                      'q_attention_mask':  batch[1], 
                      'q_token_type_ids':  batch[2],  
                      'p_input_ids':       batch[3],
                      'p_attention_mask':  batch[4], 
                      'p_token_type_ids':  batch[5],  
                      'start_positions':   batch[6], 
                      'end_positions':     batch[7],
                      'right_num':         batch[8]}
            outputs = model(**inputs)
            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)  
            with open('ori4.txt','a+',encoding='utf-8') as f:
                f.write(str(loss.mean())+'------'+str(epoch_idx)+'\n') 
            
            # ff.write(str(outputs[2].mean())+'---2---'+str(epoch_idx)+'\n') 
            with open('ori4_each.txt','a+',encoding='utf-8') as ff:
                ff.write(str(outputs[1].mean())+'---1---'+str(epoch_idx)+'\n')
                # ff.write(str(outputs[2].mean())+'---2---'+str(epoch_idx)+'\n')  
                # ff.write(str(outputs[3].mean())+'---3---'+str(epoch_idx)+'\n') 
            # with open('true_train_op_detail.txt','a+',encoding='utf-8') as f:       
            #     f.write(str(float(outputs[1]))+str(float(outputs[2]))+str(float(outputs[3]))+'------'+str(epoch_idx)+'\n') 
            # 这个时候output出来的 是loss-> Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
            if args.n_gpu > 1:
                loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

            tr_loss += loss.item()
            if (step + 1) % args.gradient_accumulation_steps == 0:
                optimizer.step()  # 更新模型
                scheduler.step()  # 更新学习率
                model.zero_grad() # 模型的梯度置0
                global_step += 1  # 加一步

                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
                    # Log metrics
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
                        results = evaluate(args, model, tokenizer)
                        for key, value in results.items():
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
                    tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
                    logging_loss = tr_loss

                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
                    # Save model checkpoint
                    output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
                    if not os.path.exists(output_dir):
                        os.makedirs(output_dir)
                    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
                    model_to_save.save_pretrained(output_dir)
                    torch.save(args, os.path.join(output_dir, 'training_args.bin'))
                    logger.info("Saving model checkpoint to %s", output_dir)

            if args.max_steps > 0 and global_step > args.max_steps:
                epoch_iterator.close()
                break
        if args.max_steps > 0 and global_step > args.max_steps:
            train_iterator.close()
            break

        if args.local_rank in [-1, 0]:
            # Save model checkpoint by epoch
            output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(epoch_idx))
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
            model_to_save.save_pretrained(output_dir)
            torch.save(args, os.path.join(output_dir, 'training_args.bin'))
            logger.info("Saving model checkpoint to %s", output_dir)

    if args.local_rank in [-1, 0]:
        tb_writer.close()

    return global_step, tr_loss / global_step
Exemplo n.º 55
0
        mask = torch.Tensor([not done])
        next_state = torch.Tensor([next_state])
        reward = torch.Tensor([reward])

        memory.push(state, action, mask, next_state, reward)

        state = next_state

        if len(memory) > args.batch_size:
            for _ in range(args.updates_per_step):
                transitions = memory.sample(args.batch_size)
                batch = Transition(*zip(*transitions))

                value_loss, policy_loss = agent.update_parameters(batch)

                writer.add_scalar('loss/value', value_loss, updates)
                writer.add_scalar('loss/policy', policy_loss, updates)

                updates += 1
        if done:
            break

    writer.add_scalar('reward/train', episode_reward, i_episode)

    # Update param_noise based on distance metric
    if args.param_noise:
        episode_transitions = memory.memory[memory.position-t:memory.position]
        states = torch.cat([transition[0] for transition in episode_transitions], 0)
        unperturbed_actions = agent.select_action(states, None, None)
        perturbed_actions = torch.cat([transition[1] for transition in episode_transitions], 0)
Exemplo n.º 56
0
def train(
    version,
    dataroot='/data/nuscenes',
    nepochs=10000,
    gpuid=1,
    H=900,
    W=1600,
    resize_lim=(0.193, 0.225),
    final_dim=(128, 352),
    bot_pct_lim=(0.0, 0.22),
    rot_lim=(-5.4, 5.4),
    rand_flip=True,
    ncams=5,
    max_grad_norm=5.0,
    pos_weight=2.13,
    logdir='./runs',
    xbound=[-50.0, 50.0, 0.5],
    ybound=[-50.0, 50.0, 0.5],
    zbound=[-10.0, 10.0, 20.0],
    dbound=[4.0, 45.0, 1.0],
    bsz=4,
    nworkers=10,
    lr=1e-3,
    weight_decay=1e-7,
):
    grid_conf = {
        'xbound': xbound,
        'ybound': ybound,
        'zbound': zbound,
        'dbound': dbound,
    }
    data_aug_conf = {
        'resize_lim':
        resize_lim,
        'final_dim':
        final_dim,
        'rot_lim':
        rot_lim,
        'H':
        H,
        'W':
        W,
        'rand_flip':
        rand_flip,
        'bot_pct_lim':
        bot_pct_lim,
        'cams': [
            'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT',
            'CAM_BACK', 'CAM_BACK_RIGHT'
        ],
        'Ncams':
        ncams,
    }
    trainloader, valloader = compile_data(version,
                                          dataroot,
                                          data_aug_conf=data_aug_conf,
                                          grid_conf=grid_conf,
                                          bsz=bsz,
                                          nworkers=nworkers,
                                          parser_name='segmentationdata')

    device = torch.device('cpu') if gpuid < 0 else torch.device(
        f'cuda:{gpuid}')

    model = compile_model(grid_conf, data_aug_conf, outC=1)
    model.to(device)

    opt = torch.optim.Adam(model.parameters(),
                           lr=lr,
                           weight_decay=weight_decay)

    loss_fn = SimpleLoss(pos_weight).cuda(gpuid)

    writer = SummaryWriter(logdir=logdir)
    val_step = 1000 if version == 'mini' else 10000

    model.train()
    counter = 0
    for epoch in range(nepochs):
        np.random.seed()
        for batchi, (imgs, rots, trans, intrins, post_rots, post_trans,
                     binimgs) in enumerate(trainloader):
            t0 = time()
            opt.zero_grad()
            preds = model(
                imgs.to(device),
                rots.to(device),
                trans.to(device),
                intrins.to(device),
                post_rots.to(device),
                post_trans.to(device),
            )
            binimgs = binimgs.to(device)
            loss = loss_fn(preds, binimgs)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
            opt.step()
            counter += 1
            t1 = time()

            if counter % 10 == 0:
                print(counter, loss.item())
                writer.add_scalar('train/loss', loss, counter)

            if counter % 50 == 0:
                _, _, iou = get_batch_iou(preds, binimgs)
                writer.add_scalar('train/iou', iou, counter)
                writer.add_scalar('train/epoch', epoch, counter)
                writer.add_scalar('train/step_time', t1 - t0, counter)

            if counter % val_step == 0:
                val_info = get_val_info(model, valloader, loss_fn, device)
                print('VAL', val_info)
                writer.add_scalar('val/loss', val_info['loss'], counter)
                writer.add_scalar('val/iou', val_info['iou'], counter)

            if counter % val_step == 0:
                model.eval()
                mname = os.path.join(logdir, "model{}.pt".format(counter))
                print('saving', mname)
                torch.save(model.state_dict(), mname)
                model.train()
Exemplo n.º 57
0
        ## PLAY GAME
        metrics['epsilon'] = eps.get(step)
        game = utils.play_game(env, agent = dqn_epsilon_agent, th = metrics['epsilon'], memory = memory)
        metrics['run_reward'], metrics['run_episode_steps'] = game['cum_reward'], game['steps']
        step += metrics['run_episode_steps']

        ## TRAIN
        for _ in range(metrics['run_episode_steps']//param['batch_size']):
            metrics['run_loss'] = train_batch(param)
            
        if metrics['episode'] % 500 == 0:
            target_dqn.load_state_dict(dqn.state_dict())

        # Test agent:
        if metrics['episode'] % 100 == 0:
            game = utils.play_game(env, agent = dqn_epsilon_agent, th = 0.02, memory = memory)
            metrics['test_reward'], metrics['test_episode_steps'] = game['cum_reward'], game['steps']
            checkpoint.save(dqn, step = step, step_loss = -metrics['test_reward'])


        # REPORTING
        if metrics['episode'] % 100 == 0:
            for key, val in metrics.items():
                writer.add_scalar(key, val, global_step = step)
                
        # Animate agent:
        if metrics['episode'] % 2500 == 0:
            print("episode: {}, step: {}, reward: {}".format(metrics['episode'], step, metrics['run_reward']))
            game = utils.play_game(env, agent = dqn_epsilon_agent, th = 0.02, render = True, memory = memory)
            writer.add_video("test_game", game['frames'], global_step = step)
Exemplo n.º 58
0
                print('Dividing learning rate by 10')
                optimizer.param_groups[0]['lr'] /= 10.

            best_val_loss.append(val_loss)

except KeyboardInterrupt:
    print('-' * 89)
    print('Exiting from training early')
    print('-' * 89, file=logfile)
    print('Exiting from training early', file=logfile)

# Load the best saved model.
model_load(args.save)

# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
print('=' * 89)
print('=' * 89, file=logfile)
logstr = '| End of training | test loss {:5.2f} | test ppl {:8.2f} | ' \
         'test bpc {:8.3f}'.format(test_loss, math.exp(test_loss), test_loss / math.log(2))
print(logstr)
print(logstr, file=logfile)
print('=' * 89)
print('=' * 89, file=logfile)

# added by Ju
writer.add_scalar('data/test_loss', test_loss, epoch)
writer.add_scalar('data/test_ppl', math.exp(test_loss), epoch)
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
    return train_obs_v, train_act_v, reward_bound, reward_mean


if __name__ == "__main__":
    env = DiscreteOneHotWrapper(gym.make("FrozenLake-v0"))
    # env = gym.wrappers.Monitor(env, directory="mon", force=True)
    obs_size = env.observation_space.shape[0]
    n_actions = env.action_space.n

    net = Net(obs_size, HIDDEN_SIZE, n_actions)
    objective = nn.CrossEntropyLoss()
    optimizer = optim.Adam(params=net.parameters(), lr=0.01)
    writer = SummaryWriter(comment="-frozenlake-naive")

    for iter_no, batch in enumerate(iterate_batches(env, net, BATCH_SIZE)):
        obs_v, acts_v, reward_b, reward_m = filter_batch(batch, PERCENTILE)
        optimizer.zero_grad()
        action_scores_v = net(obs_v)
        loss_v = objective(action_scores_v, acts_v)
        loss_v.backward()
        optimizer.step()
        print("%d: loss=%.3f, reward_mean=%.1f, reward_bound=%.1f" % (
            iter_no, loss_v.item(), reward_m, reward_b))
        writer.add_scalar("loss", loss_v.item(), iter_no)
        writer.add_scalar("reward_bound", reward_b, iter_no)
        writer.add_scalar("reward_mean", reward_m, iter_no)
        if reward_m > 0.8:
            print("Solved!")
            break
    writer.close()
Exemplo n.º 60
0
    def train(self):
        ### DAE
        if self.args.baseline == 'DAE':
            p_dims = [
                self.args.latent_size, self.args.hidden_size_vae,
                self.args.input_size_vae
            ]
            model = MultiDAE(p_dims)
            optimizer = optim.Adam(model.parameters(),
                                   lr=self.args.lr_vae,
                                   weight_decay=0.0)
        else:
            model = VAE_RNN_rec(self.args.dims, self.args.input_size_rnn, self.args.embedding_size,\
                                self.args.num_layer, self.args.dropout_rate, self.args.bidirectional, self.args.class_num,\
                                self.args.hidden_size_rnn, self.args.condition, self.args.data_dir, self.args.activation)
            optimizer = {
                'encoder':
                optim.Adam(model.encoder.parameters(),
                           lr=self.args.lr_vae,
                           weight_decay=0.0),
                'decoder':
                optim.Adam(model.decoder.parameters(),
                           lr=self.args.lr_vae,
                           weight_decay=0.0)
            }

        model = model.to(self.args.device)
        dataloader = ItemRatingLoader(self.args.data_dir)

        if self.args.condition:
            optimizer['RNNEncoder'] = optim.Adam(model.RNNEncoder.parameters(),
                                                 lr=self.args.lr_rnn,
                                                 weight_decay=0.0)
            # weight = torch.FloatTensor([0.18, 0.28, 0.54]).to(args.device)
            # CEloss = nn.CrossEntropyLoss(weight = weight)
            CEloss = nn.CrossEntropyLoss()

        if self.args.load_model:
            model.load_state_dict(
                torch.load(self.args.log_dir + '/' + self.args.load_model +
                           '/' + 'model.pt'))
            self.args.timestamp = self.args.load_model[:10]
        if self.args.condition and self.args.load_pretrained:
            model.RNNEncoder.load_state_dict(
                torch.load(self.args.pretrained_dir + '/' +
                           self.args.load_pretrained + '/' + 'model.pt'))
            print("loaded pretrained model")

        writer = SummaryWriter(self.args.log_dir + "/" + self.args.timestamp +
                               "_" + self.args.config)
        train_data_rating = dataloader.load_train_data(
            os.path.join(self.args.data_dir, 'train.csv'))
        N = train_data_rating.shape[0]

        idxlist = np.array(range(N))

        # np.random.seed(98765)
        idx_pe = np.random.permutation(len(idxlist))
        idxlist = idxlist[idx_pe]

        update_count = 0.0
        for e in range(self.args.epoch):
            model.train()
            total_loss = 0
            if self.args.condition:
                train_data_item = dataloader.load_sequence_data_generator(
                    int(N / self.args.batch_size) + 1, 'train',
                    self.args.batch_size, idx_pe)

            for i, st_idx in enumerate(range(0, N, self.args.batch_size)):
                if self.args.condition:
                    order, item_feature, label = next(train_data_item)
                    end_idx = min(st_idx + self.args.batch_size, N)
                    x_unorder = train_data_rating[idxlist[st_idx:end_idx]]
                    X = x_unorder[order]
                else:
                    end_idx = min(st_idx + self.args.batch_size, N)
                    X = train_data_rating[idxlist[st_idx:end_idx]]

                if sparse.isspmatrix(X):
                    X = X.toarray()
                X = X.astype('float32')

                if self.args.condition:
                    optimizer["RNNEncoder"].zero_grad()
                    output, h = model.RNNEncoder(
                        item_feature.to(self.args.device))
                    rnn_loss = CEloss(output, label.to(self.args.device))
                    rnn_loss.backward(retain_graph=True)
                    # rnn_loss.backward()
                    # optimizer["RNNEncoder"].step()
                    # self.make_condition(h, label.data)
                    self.make_condition(h, label.data)

                if self.args.baseline:
                    optimizer.zero_grad()
                else:
                    optimizer["encoder"].zero_grad()
                    optimizer["decoder"].zero_grad()

                if self.args.condition:
                    if self.args.test_hidden == 'onehot':
                        h = self.tooh(label,
                                      self.args.class_num).to(self.args.device)
                    model_input = (torch.FloatTensor(X).to(self.args.device),
                                   h)
                    recon, mu, logvar = model(model_input)
                else:
                    if self.args.baseline:
                        recon = model(
                            torch.FloatTensor(X).to(self.args.device))
                    else:
                        recon, mu, logvar = model(
                            torch.FloatTensor(X).to(self.args.device))

                if self.args.baseline:
                    log_softmax_var = F.log_softmax(recon, dim=-1)
                    recon_loss = -torch.mean(
                        torch.sum(log_softmax_var *
                                  torch.FloatTensor(X).to(self.args.device),
                                  dim=-1))
                else:
                    recon_loss, kld = loss_function(
                        torch.FloatTensor(X).to(self.args.device), recon, mu,
                        logvar, self.args.dist)
                if self.args.anneal_steps > 0:
                    anneal = min(self.args.anneal_cap,
                                 1. * update_count / self.args.anneal_steps)
                    update_count += 1
                else:
                    anneal = self.args.anneal_cap
                if self.args.baseline:
                    vae_loss = recon_loss
                else:
                    vae_loss = recon_loss + anneal * kld
                vae_loss.backward()

                if self.args.baseline:
                    optimizer.step()
                else:
                    optimizer["encoder"].step()
                    optimizer["decoder"].step()

                if self.args.condition and self.args.joint_train:
                    optimizer["RNNEncoder"].step()
                # r20, r50, ndcg, rmse = self.test(model, anneal)
                # tensorboard
                if self.args.condition:
                    writer.add_scalar("Train rnn loss", rnn_loss,
                                      i + e * N / self.args.batch_size)
                writer.add_scalar("Train vae loss", vae_loss,
                                  i + e * N / self.args.batch_size)
                writer.add_scalar("Recon loss", recon_loss,
                                  i + e * N / self.args.batch_size)
                if not self.args.baseline:
                    writer.add_scalar("KLD", kld,
                                      i + e * N / self.args.batch_size)

                if i % 20 == 0:
                    if not self.args.baseline:
                        print(
                            f"recon : {recon_loss.item():.3} | kld : {kld.item():.3}"
                        )
                    if self.args.condition:
                        print(
                            f"epoch : {e} | train_vae_loss : {vae_loss.item():.3} | train_rnn_loss : {rnn_loss.item():.3}",
                            f"[{i*self.args.batch_size} / {N}", "(",
                            f"{(i/N*self.args.batch_size)*100:.3} %", ")]")
                    else:
                        print(
                            f"epoch : {e} | train_vae_loss : {vae_loss.item():.3}",
                            f"[{i*self.args.batch_size} / {N}", "(",
                            f"{(i/N*self.args.batch_size)*100:.3} %", ")]")
                total_loss += vae_loss
            # save model
            torch.save(
                model.state_dict(), self.args.log_dir + '/' +
                self.args.timestamp + '_' + self.args.config + '/model.pt')
            print("model saved!")
            print(
                f"epoch : {e} | train vae loss : {total_loss / (N/self.args.batch_size):.3} "
            )
            if self.args.condition:
                #save condition per epoch for evaluation
                for j in range(self.args.class_num):
                    hidden = "h_{}".format(j + 1)
                    torch.save(self.hidden_vecs[hidden],
                               f"{self.args.hiddenvec_dir}/{hidden}.pt")
                print("hidden vector saved!")
            # test per epoch
            r10, r20, r50, ndcg10, ndcg50, ndcg100 = self.test(model, anneal)
            # tensorboard
            # writer.add_scalar("Test_loss", test_loss, e)
            writer.add_scalar("Test_Recall10", r10, e)
            writer.add_scalar("Test_Recall20", r20, e)
            writer.add_scalar("Test_Recall50", r50, e)
            writer.add_scalar("Test_NDCG10", ndcg10, e)
            writer.add_scalar("Test_NDCG50", ndcg50, e)
            writer.add_scalar("Test_NDCG100", ndcg100, e)