コード例 #1
0
    def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
        """ Initialize the GANLoss class.

        Parameters:
            gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
            target_real_label (bool) - - label for a real image
            target_fake_label (bool) - - label of a fake image

        Note: Do not use sigmoid as the last layer of Discriminator.
        LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
        """
        super(GANLoss, self).__init__()
        # self.register_buffer('real_label', torch.tensor(target_real_label))
        # self.register_buffer('fake_label', torch.tensor(target_fake_label))
        self.real_label = paddle.fluid.dygraph.to_variable(
            np.array(target_real_label))
        self.fake_label = paddle.fluid.dygraph.to_variable(
            np.array(target_fake_label))
        # self.real_label.stop_gradients = True
        # self.fake_label.stop_gradients = True

        self.gan_mode = gan_mode
        if gan_mode == 'lsgan':
            self.loss = nn.MSELoss()
        elif gan_mode == 'vanilla':
            self.loss = nn.BCELoss()  #nn.BCEWithLogitsLoss()
        elif gan_mode in ['wgangp']:
            self.loss = None
        else:
            raise NotImplementedError('gan mode %s not implemented' % gan_mode)
コード例 #2
0
# 优化器
optimizerG = paddle.optimizer.Adam(
    learning_rate=LR,
    parameters=generator.parameters(),
    beta1=0.5,
    beta2=0.999)

optimizerD = paddle.optimizer.Adam(
    learning_rate=LR,
    parameters=discriminator.parameters(), 
    beta1=0.5,
    beta2=0.999)
    
# 损失函数
bce_loss = nn.BCELoss()
l1_loss = nn.L1Loss()

# dataloader
data_loader_train = DataLoader(
    paired_dataset_train,
    batch_size=BATCH_SIZE,
    shuffle=True,
    drop_last=True
    )

data_loader_test = DataLoader(
    paired_dataset_test,
    batch_size=BATCH_SIZE
    )
results_save_path = 'work/results'
コード例 #3
0
    beta1,beta2=opt.beta1,opt.beta2
    losses =[[],[]]
    real_label = paddle.full( (opt.batch_size,1,1,1), 1., dtype='float32')
    fake_label = paddle.full( (opt.batch_size,1,1,1), 0., dtype='float32')
    X = 20  #窗口大小
    #一行子窗口数量
    num=math.sqrt(batch_size)
    x=round(num) if math.fabs( math.floor(num)**2-batch_size )<1e-6 else math.floor(num)+1 

    print("start training: ")
    print("---------------------------------")
    print("num = ",num)
    
    with paddle.fluid.dygraph.guard(place):
        #损失函数
        loss = nn.BCELoss()

        netD = Discriminator(channels_img=3, features_d=10)
        netG = Generator(z_dim=z_dim, channels_img=3, features_g=10)
        optimizerD = optim.Adam(parameters=netD.parameters(), learning_rate=lr, beta1=beta1, beta2=beta2)
        optimizerG = optim.Adam(parameters=netG.parameters(), learning_rate=lr, beta1=beta1, beta2=beta2)

        if not os.path.exists( opt.checkpoints_path ):
            os.mkdir( opt.checkpoints_path )
        if not os.path.exists( opt.output_path ):
            os.mkdir( opt.output_path)
        
        last = opt.img_size
        order_name = 9
        model_path = opt.checkpoints_path+ f"model_{last}_{order_name}/"
        
コード例 #4
0
ファイル: finetune.py プロジェクト: xueeinstein/PaddleHelix
def main(args):
    """
    Call the configuration function of the model, build the model and load data, then start training.
    model_config:
        a json file  with the hyperparameters,such as dropout rate ,learning rate,num tasks and so on;
    num_tasks:
        it means the number of task that each dataset contains, it's related to the dataset;
    DownstreamModel:
        It means the PretrainGNNModel for different strategies and it is an supervised GNN model which predicts the tasks.
    """
    compound_encoder_config = load_json_config(args.compound_encoder_config)
    model_config = load_json_config(args.model_config)
    if not args.dropout_rate is None:
        compound_encoder_config['dropout_rate'] = args.dropout_rate
        model_config['dropout_rate'] = args.dropout_rate
    task_names = get_downstream_task_names(args.dataset_name, args.data_path)
    model_config['num_tasks'] = len(task_names)

    ### build model
    compound_encoder = PretrainGNNModel(compound_encoder_config)
    model = DownstreamModel(model_config, compound_encoder)
    criterion = nn.BCELoss(reduction='none')
    encoder_params = compound_encoder.parameters()
    head_params = exempt_parameters(model.parameters(), encoder_params)
    encoder_opt = paddle.optimizer.Adam(args.encoder_lr,
                                        parameters=encoder_params)
    head_opt = paddle.optimizer.Adam(args.head_lr, parameters=head_params)
    print('Total param num: %s' % (len(model.parameters())))
    print('Encoder param num: %s' % (len(encoder_params)))
    print('Head param num: %s' % (len(head_params)))

    if not args.init_model is None and not args.init_model == "":
        compound_encoder.set_state_dict(paddle.load(args.init_model))
        print('Load state_dict from %s' % args.init_model)

    ### load data
    # featurizer:
    #     Gen features according to the raw data and return the graph data.
    #     Collate features about the graph data and return the feed dictionary.
    # splitter:
    #     split type of the dataset:random,scaffold,random with scaffold. Here is randomsplit.
    #     `ScaffoldSplitter` will firstly order the compounds according to Bemis-Murcko scaffold,
    #     then take the first `frac_train` proportion as the train set, the next `frac_valid` proportion as the valid set
    #     and the rest as the test set. `ScaffoldSplitter` can better evaluate the generalization ability of the model on
    #     out-of-distribution samples. Note that other splitters like `RandomSplitter`, `RandomScaffoldSplitter`
    #     and `IndexSplitter` is also available."

    dataset = get_dataset(args.dataset_name, args.data_path, task_names)
    dataset.transform(DownstreamTransformFn(), num_workers=args.num_workers)
    splitter = create_splitter(args.split_type)
    train_dataset, valid_dataset, test_dataset = splitter.split(dataset,
                                                                frac_train=0.8,
                                                                frac_valid=0.1,
                                                                frac_test=0.1)
    print("Train/Valid/Test num: %s/%s/%s" %
          (len(train_dataset), len(valid_dataset), len(test_dataset)))

    ### start train
    # Load the train function and calculate the train loss in each epoch.
    # Here we set the epoch is in range of max epoch,you can change it if you want.

    # Then we will calculate the train loss ,valid auc,test auc and print them.
    # Finally we save it to the model according to the dataset.
    list_val_auc, list_test_auc = [], []
    collate_fn = DownstreamCollateFn(
        atom_names=compound_encoder_config['atom_names'],
        bond_names=compound_encoder_config['bond_names'])
    for epoch_id in range(args.max_epoch):
        train_loss = train(args, model, train_dataset, collate_fn, criterion,
                           encoder_opt, head_opt)
        val_auc = evaluate(args, model, valid_dataset, collate_fn)
        test_auc = evaluate(args, model, test_dataset, collate_fn)

        list_val_auc.append(val_auc)
        list_test_auc.append(test_auc)
        test_auc_by_eval = list_test_auc[np.argmax(list_val_auc)]
        print("epoch:%s train/loss:%s" % (epoch_id, train_loss))
        print("epoch:%s val/auc:%s" % (epoch_id, val_auc))
        print("epoch:%s test/auc:%s" % (epoch_id, test_auc))
        print("epoch:%s test/auc_by_eval:%s" % (epoch_id, test_auc_by_eval))
        paddle.save(
            compound_encoder.state_dict(),
            '%s/epoch%d/compound_encoder.pdparams' %
            (args.model_dir, epoch_id))
        paddle.save(model.state_dict(),
                    '%s/epoch%d/model.pdparams' % (args.model_dir, epoch_id))

    outs = {
        'model_config': basename(args.model_config).replace('.json', ''),
        'metric': '',
        'dataset': args.dataset_name,
        'split_type': args.split_type,
        'batch_size': args.batch_size,
        'dropout_rate': args.dropout_rate,
        'encoder_lr': args.encoder_lr,
        'head_lr': args.head_lr,
        'exp_id': args.exp_id,
    }
    best_epoch_id = np.argmax(list_val_auc)
    for metric, value in [('test_auc', list_test_auc[best_epoch_id]),
                          ('max_valid_auc', np.max(list_val_auc)),
                          ('max_test_auc', np.max(list_test_auc))]:
        outs['metric'] = metric
        print('\t'.join(['FINAL'] + ["%s:%s" % (k, outs[k])
                                     for k in outs] + [str(value)]))