Esempio n. 1
0
    def __init__(self,
                 layers=[],
                 n_batches=10,
                 loss='categorical_crossentropy',
                 metric='accuracy_score',
                 optimizer='adam',
                 optimizer_params={},
                 save_weights=True,
                 shuffle=True,
                 random_seed=None):
        self.layers = layers
        self.n_batches = n_batches  # mini-batches will be generated in the stratified manner
        self.loss = loss
        self.metric = metric
        self.optimizer = optimizer
        self.optimizer_params = optimizer_params
        self.save_weights = save_weights
        self.template = 'tmp/nn_{0}_{1:.4f}.json'
        self.shuffle = shuffle
        self.random_seed = random_seed

        self.best_layers_ = None
        self.best_epoch_ = None
        self.best_val_score_ = 0.
        self.n_layers_ = len(self.layers)

        self._loss = get_metric(self.loss)
        if self.loss == 'categorical_crossentropy':
            self._loss_grad = lambda actual, predicted: -(actual - predicted)
        self._metric = get_metric(self.metric)
        self._optimizer = get_optimizer(self.optimizer,
                                        **self.optimizer_params)
        self._tts = TrainTestSplitter(shuffle=self.shuffle,
                                      random_seed=self.random_seed)

        self._initialized = False
        self._training = False
        super(NNClassifier, self).__init__(
            _y_required=True)  # TODO: split into multiple NNs later
Esempio n. 2
0
 def __init__(self,
              n_neighbors=3,
              metric='euclidean',
              parallel=False,
              chunksize=5,
              n_jobs=-1):
     self.n_neighbors = n_neighbors
     self.parallel = parallel
     self.n_jobs = n_jobs if n_jobs != -1 else os.cpu_count()
     self.chunksize = chunksize
     self.metric = metrics.get_metric(metric)
     if not self.metric:
         raise ValueError(f'Unkown metric: {self.metric}')
Esempio n. 3
0
 def evaluate(self, X, y_true, metric='accuracy_score'):
     y_pred = self.predict(X)
     if len(y_pred.shape) == 2 and y_pred.shape[1] > 1 and (len(
             y_true.shape) == 1 or y_true.shape[1] == 1):
         y_true = one_hot(y_true)
     return get_metric(metric)(y_true, y_pred)
Esempio n. 4
0
 def set_metric(self) -> None:
     """Set metric"""
     self.metric = get_metric(self.cfg)
Esempio n. 5
0
                                           dsize=(padded_size, padded_size),
                                           interpolation=cv2.INTER_NEAREST)
            resized_mask_pred = resized_mask_pred[position[0]:position[1],
                                                  position[2]:position[3]]
        else:
            resized_mask_pred = cv2.resize(resized_mask_pred,
                                           dsize=(w, h),
                                           interpolation=cv2.INTER_NEAREST)

        resized_mask_pred = cv2.erode(resized_mask_pred, kernel, iterations=1)

    if min_length * skip_resize_ratio < max_length or max_length < skip_max_length:
        # allocate new memory
        step2_gray_img = np.copy(step2_out_img)
        step2_gray_img[step2_gray_img > 0] = 1
        step2_fmeasure, step2_pfmeasure, step2_psnr, step2_drd = get_metric(
            step2_gray_img, gt_mask)

        cv2.imwrite(
            '%s/step2_normal/%s/%s.png' %
            (save_root_dir, dibco_year, img_name), step2_out_img)
        save_fmeasure['step2_normal'][dibco_year][0].append(step2_fmeasure)
        save_fmeasure['step2_normal'][dibco_year][1].append(step2_pfmeasure)
        save_fmeasure['step2_normal'][dibco_year][2].append(step2_psnr)
        save_fmeasure['step2_normal'][dibco_year][3].append(step2_drd)
        csv_tmp = [
            img_name, step2_fmeasure, step2_pfmeasure, step2_psnr, step2_drd
        ]
    else:
        step2_normal_or_img = np.bitwise_or(resized_mask_pred, step2_out_img)
        step2_normal_or_img_metric = np.copy(step2_normal_or_img)
        step2_normal_or_img_metric[step2_normal_or_img_metric > 0] = 1
Esempio n. 6
0
    def test(self):
        torch.set_grad_enabled(False)

        epoch = self.optimizer.get_last_epoch()
        self.ckp.write_log('\nEvaluation:')
        self.ckp.add_log(
            torch.zeros(1, len(self.loader_test), len(self.scale))
        )

        # (self): my log
        mlog = {'psnr':[], 'ssim':[], 'mse':[]}

        self.model.eval()

        timer_test = utility.timer()
        if self.args.save_results: self.ckp.begin_background()
        for idx_data, d in enumerate(self.loader_test):
            for idx_scale, scale in enumerate(self.scale):
                d.dataset.set_scale(idx_scale)
                for lr, hr, filename in tqdm(d, ncols=80):
                    lr, hr = self.prepare(lr, hr)
                    sr = self.model(lr, idx_scale)
                    sr = utility.quantize(sr, self.args.rgb_range)

                    save_list = [sr]
                    self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
                        sr, hr, scale, self.args.rgb_range, dataset=d
                    )

                    # (self): log metrics
                    mlog['psnr'].append(es.get_metric(hr.cpu().numpy()[0], sr.cpu().numpy()[0], 'psnr'))
                    mlog['ssim'].append(es.get_metric(hr.cpu().numpy()[0], sr.cpu().numpy()[0], 'ssim'))
                    mlog['mse'].append(es.get_metric(hr.cpu().numpy()[0], sr.cpu().numpy()[0], 'mse'))

                    if self.args.save_gt:
                        save_list.extend([lr, hr])

                    if self.args.save_results:
                        self.ckp.save_results(d, filename[0], save_list, scale)

                self.ckp.log[-1, idx_data, idx_scale] /= len(d)
                best = self.ckp.log.max(0)
                self.ckp.write_log(
                    '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
                        d.dataset.name,
                        scale,
                        self.ckp.log[-1, idx_data, idx_scale],
                        best[0][idx_data, idx_scale],
                        best[1][idx_data, idx_scale] + 1
                    )
                )
        # print metrics
        print(f"max psnr: {np.max(mlog['psnr'])}, mean psnr: {np.mean(mlog['psnr'])}")
        print(f"max ssim: {np.max(mlog['ssim'])}, mean ssim:  {np.mean(mlog['ssim'])}")
        print(f"max mse: {np.max(mlog['mse'])}, mean mse: {np.mean(mlog['mse'])}")

        self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
        self.ckp.write_log('Saving...')

        if self.args.save_results:
            self.ckp.end_background()

        if not self.args.test_only:
            self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))

        self.ckp.write_log(
            'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
        )

        torch.set_grad_enabled(True)
Esempio n. 7
0
            axes[1, 1].plot(flux_in, throughput, color=color)
            axes[1, 2].plot(flux_out, throughput, color=color)

            axes[1, 0].plot(flux_in_mean, flux_out_mean, marker='o', color=color)
            axes[1, 1].plot(flux_in_mean, throughput_mean, marker='o', color=color)
            axes[1, 2].plot(flux_out_mean, throughput_mean, marker='o', color=color)

    axes[0, 0].set_yscale('log')
    axes[0, 1].set_yscale('log')
    axes[0, 2].set_yscale('log')
    axes[0, 3].set_yscale('log')
    axes[1, 0].set_yscale('log')
    axes[1, 0].set_xscale('log')
    axes[1, 1].set_yscale('log')
    axes[1, 1].set_xscale('log')
    axes[1, 2].set_yscale('log')
    axes[1, 2].set_xscale('log')

    axes[0, 3].legend()
    plt.show()

if __name__ == '__main__':
    obs = ObservatoryMaster(params, iteration=0)

    metric_name = 'pix_yield'

    metric_config = metrics.get_metric(metric_name, master_cam=obs.cam)
    metric_test = MetricTester(obs, metric_config)
    debugging_noise(metric_test)
    debugging_throughput(metric_test)
Esempio n. 8
0
def main(args):
    """
    main function
    """

    model_config = json.load(open(args.model_config, 'r'))
    if args.use_cuda:
        paddle.set_device("gpu")
    else:
        paddle.set_device("cpu")

    if args.is_distributed:
        strategy = fleet.DistributedStrategy()
        fleet.init(is_collective=args.use_cuda, strategy=strategy)

    train_loader = create_dataloader(
        data_dir=args.train_data,
        model_config=model_config)

    valid_loader = create_dataloader(
        data_dir=args.valid_data,
        model_config=model_config)

    encoder_model = ProteinEncoderModel(model_config, name='protein')
    model = ProteinModel(encoder_model, model_config)
    if args.is_distributed:
        model = fleet.distributed_model(model)

    # Generate parameter names needed to perform weight decay.
    # All bias and LayerNorm parameters are excluded.
    decay_params = [
        p.name for n, p in model.named_parameters()
        if not any(nd in n for nd in ["bias", "norm"])
    ]

    grad_clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
    optimizer = paddle.optimizer.AdamW(
        learning_rate=1e-4,
        epsilon=1e-06,
        weight_decay=0.01,
        parameters=model.parameters(),
        apply_decay_param_fun=lambda x: x in decay_params)

    if args.is_distributed:
        optimizer = fleet.distributed_optimizer(optimizer)
    criterion = ProteinCriterion(model_config)
    metric = get_metric(model_config['task'])

    if args.init_model:
        print("load init_model")
        # for hot_start
        if args.hot_start == 'hot_start':
            model.load_dict(paddle.load(args.init_model))
        # for pre_train
        else:
            encoder_model.load_dict(paddle.load(args.init_model))

    train_sum_loss = 0
    valid_min_loss = 10000
    steps_per_epoch = 20
    cur_step = 0
    while True:
        model.train()
        for (text, pos, label) in train_loader:
            # print("text: ", text)
            cur_step += 1
            pred = model(text, pos)
            label = label.reshape([-1, 1])
            pred = pred.reshape([-1, pred.shape[-1]])
            loss = criterion.cal_loss(pred, label)

            print("loss: ", loss)
            train_sum_loss += loss.numpy()
            loss.backward()
            optimizer.minimize(loss)
            model.clear_gradients()

            pred = pred.numpy()
            label = label.numpy()
            loss = loss.numpy()
            metric.update(pred, label, loss)
            if cur_step % 10 == 0:
                print('step %d, avg loss %.5f' % (cur_step, train_sum_loss / 10))
                metric.show()
                train_sum_loss = 0
                metric.clear()

            # save best_model
            if cur_step % steps_per_epoch == 0:
                print("eval begin_time: ", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                valid_cur_loss = eval(model, valid_loader, criterion, metric)
                print("valid_cur_loss: ", valid_cur_loss)
                print("eval end_time: ", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                if valid_cur_loss < valid_min_loss:
                    print("%s Save best model step_%d." % \
                            (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), cur_step))
                    paddle.save(encoder_model.state_dict(), 'models/epoch_best_encoder.pdparams')
                    paddle.save(model.state_dict(), 'models/epoch_best.pdparams')
                    valid_min_loss = valid_cur_loss

                    os.system("cp -rf models/epoch_best.pdparams models/step_%d.pdparams" % (cur_step))
                    os.system("cp -rf models/epoch_best_encoder.pdparams models/step_%d_encoder.pdparams" % (cur_step))
                model.train()