Ejemplo n.º 1
0
def main(_):

    vocab, rev_vocab = initialize_vocab(FLAGS.vocab_path)

    FLAGS.embed_path = FLAGS.embed_path or pjoin(
        "data", "squad", "glove.trimmed.{}.npz".format(FLAGS.embedding_size))

    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)
    file_handler = logging.FileHandler(pjoin(FLAGS.log_dir, "log.txt"))
    logging.getLogger().addHandler(file_handler)

    print(vars(FLAGS))
    with open(os.path.join(FLAGS.log_dir, "flags.json"), 'w') as fout:
        json.dump(FLAGS.__flags, fout)

    # ========= Load Dataset =========
    # You can change this code to load dataset in your own way

    dev_dirname = os.path.dirname(os.path.abspath(FLAGS.dev_path))
    dev_filename = os.path.basename(FLAGS.dev_path)
    context_data, question_data, question_uuid_data = prepare_dev(
        dev_dirname, dev_filename, vocab)
    dataset = {
        "val_context": context_data,
        "val_questions": question_data,
        "val_question_uuids": question_uuid_data
    }

    # ========= Model-specific =========
    # You must change the following code to adjust to your model

    encoder = Encoder(size=FLAGS.state_size,
                      vocab_dim=FLAGS.embedding_size,
                      FLAGS=FLAGS)
    decoder = Decoder(FLAGS=FLAGS)

    qa = QASystem(encoder, decoder, FLAGS)

    with tf.Session() as sess:
        #train_dir = get_normalized_train_dir(FLAGS.train_dir)

        train_dir = FLAGS.train_dir
        print("train_dir: ", train_dir)
        initialize_model(sess, qa, train_dir)

        print("Generating Answers")
        answers = generate_answers(sess, qa, dataset, rev_vocab)

        # write to json file to root dir
        print("Writing to json file")
        with io.open('dev-prediction.json', 'w', encoding='utf-8') as f:
            f.write(unicode(json.dumps(answers, ensure_ascii=False)))
Ejemplo n.º 2
0
    def __init__(self, ckpt_path, num_classes):
        super(VGG, self).__init__()
        # vgg_16bn
        model_ft, input_size = utils.initialize_model(model_name='vgg',
                                                      num_classes=num_classes,
                                                      feature_extract=False,
                                                      use_pretrained=True)

        # ckpt gpu->cpu
        model_ft.load_state_dict(
            torch.load(ckpt_path,
                       map_location=lambda storage, loc: storage)['model'])

        self.vgg = model_ft

        # disect the network to access its last convolutional layer
        self.features_conv = self.vgg.features[:43]

        # get the max pool of the features stem
        self.max_pool = nn.MaxPool2d(kernel_size=2,
                                     stride=2,
                                     padding=0,
                                     dilation=1,
                                     ceil_mode=False)

        # get the classifier of the vgg19
        self.classifier = self.vgg.classifier

        # placeholder for the gradients
        self.gradients = None
Ejemplo n.º 3
0
def confuseMatrix(dataLoader, model_name, ckpt_path, labels_name, num_classes):
    """
    baselinemodel 输出结果并分析结果
    """
    print("initialize model")
    model_ft, input_size = initialize_model(model_name,
                                            num_classes=num_classes,
                                            feature_extract=False,
                                            use_pretrained=False)
    model_ft.load_state_dict(torch.load(ckpt_path)['model'])
    model_ft.cuda()
    model_ft.eval()

    y_predict = []
    y_true = []
    print("validation ")
    for index, (inputs, labels) in enumerate(dataLoader):
        inputs = inputs.to(flags.gpu)
        labels = labels.to(flags.gpu)

        # val过程中有
        bs, ncrops, c, h, w = inputs.size()
        inputs = inputs.view(-1, c, h, w)
        outputs = model_ft(inputs)
        outputs = outputs.view(bs, ncrops, -1).mean(1)

        _, preds = torch.max(outputs, 1)
        y_predict.extend(preds.cpu().numpy())
        y_true.extend(labels.cpu().numpy())

    epoch_acc = recall_score(y_true, y_predict, average='macro')
    print(epoch_acc)
    cnf_matrix = confusion_matrix(y_true, y_predict, labels=labels_name)
    return cnf_matrix
Ejemplo n.º 4
0
    def __init__(self, args, rank):
        self.rank = rank
        self.epoch_loss = 0
        self.epoch_acc = 0

        self.args = args

        self.model, input_size, self.quant_model = initialize_model(
            args.model_name, get_num_classes(args.image_path))

        self.dataloaders_dict = preprocess_data(args.image_path,
                                                args.batch_size, input_size,
                                                args.num_workers, rank)

        self.train_iterator = iter(self.dataloaders_dict['train'])

        print("Params to learn:")
        params_to_update = []
        for name, param in self.model.named_parameters():
            if param.requires_grad == True:
                params_to_update.append(param)
                print("\t", name)

        self.optimizer = optim.Adam(params_to_update, lr=0.001)

        self.criterion = nn.CrossEntropyLoss()
Ejemplo n.º 5
0
def showModelState():
    a = torch.load(modelB, map_location='cpu')['state_dict']
    for k, v in a.items():
        print(k)

    model, inputsize = utils.initialize_model('densenet', 4, False, False)
    for k, v in model.state_dict().items():
        print(k)
Ejemplo n.º 6
0
    def __init__(self, args) -> None:
        if args.ngpu > 0:
            cmd = utils.set_cuda_visible_device(args.ngpu)
            os.environ['CUDA_VISIBLE_DEVICES'] = cmd[:-1]

        self.model = gnn(args)
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.model = utils.initialize_model(self.model, self.device, load_save_file=args.ckpt, gpu=(args.ngpu > 0))

        self.model.eval()
        self.embedding_dim = args.embedding_dim
Ejemplo n.º 7
0
def get_image_score():
    """
    获取每张图片的打分,格式:图片名字 标签 原始预测类 分类器打分1,分类器打分2,...
    """
    data = '/home/njuciairs/Hejia/xRaydata/zipXrayImages/fourClasses_val'
    model_name = 'densenet'
    model_ckpt = '/home/njuciairs/Hejia/local_LogAndCkpt/ckpt/densenet_33_ckpt.pkl'
    num_classes = 3

    dataLoader, class_to_idx = getDataLoader(data,
                                             isMyImagePathDataLoader=True)

    print("initialize model")
    model_ft, input_size = initialize_model(model_name,
                                            num_classes=num_classes,
                                            feature_extract=False,
                                            use_pretrained=False)
    model_ft.load_state_dict(torch.load(model_ckpt)['model'])
    model_ft.cuda()
    model_ft.eval()

    y_image_path = []
    y_predict = []
    y_predict_score = []
    y_true = []
    print("validation ")
    for index, (path, inputs, labels) in enumerate(dataLoader):
        inputs = inputs.to(flags.gpu)
        labels = labels.to(flags.gpu)

        # val过程中有
        bs, ncrops, c, h, w = inputs.size()
        inputs = inputs.view(-1, c, h, w)
        outputs = model_ft(inputs)
        outputs = outputs.view(bs, ncrops, -1).mean(1)

        _, preds = torch.max(outputs, 1)

        y_predict.extend(preds.cpu().numpy())
        y_true.extend(labels.cpu().numpy())
        y_image_path.extend(path)
        y_predict_score.extend(outputs.detach().cpu().numpy().tolist())

    record_csv = pd.DataFrame({
        'path': y_image_path,
        'label': y_true,
        'predict': y_predict,
        'score': y_predict_score
    })
    record_csv.to_csv(
        '/home/njuciairs/Hejia/xRay_DeepLearing/part1DNN/tables/cultPoint2stepResult/4classesData_3classes_record_densenet33_sensitive.csv',
        encoding='utf-8')
Ejemplo n.º 8
0
    def __init__(self, ckpt_path, num_classes):
        super(DenseNet, self).__init__()
        model_ft, input_size = utils.initialize_model(model_name='densenet',
                                                      num_classes=num_classes,
                                                      feature_extract=False,
                                                      use_pretrained=True)

        # ckpt gpu->cpu
        model_ft.load_state_dict(
            torch.load(ckpt_path,
                       map_location=lambda storage, loc: storage)['model'])
        self.densenet = model_ft

        # disect the network to access its last convolutional layer
        self.features_conv = self.densenet.features

        # add the average global pool
        self.global_avg_pool = nn.AvgPool2d(kernel_size=7, stride=1)

        self.classifier = self.densenet.classifier
        self.gradients = None
Ejemplo n.º 9
0
def train(dataset):
    global n_layers, learning_rate

    model = utils.initialize_model(n_layers)
    data_test, label_test, data_train, label_train = [], [], [], []
    test_size = 0.1
    epochs = 3

    for i in range(len(dataset['data'])):
        sample = utils.get_row(dataset, i)
        if random.random() < test_size:
            data_test.append(sample[0])
            label_test.append(sample[1])
        else:
            data_train.append(sample[0])
            label_train.append(sample[1])
    for epoch in range(epochs):
        for k in range(len(data_train)):
            sample, label = data_train[k], label_train[k]
            x_values, outputs = sample, []
            outputs.append(x_values)
            for layer in model:
                x_values = utils.run_layer(layer, x_values)
                outputs.append(x_values)
            predicted = x_values
            for i in range(len(model) - 1, -1, -1):
                layer = model[i]
                if i == len(model) - 1:
                    gradient = np.subtract(np.dot(
                        2, predicted), np.dot(
                            2, label)) * predicted * (1 - np.array(predicted))
                else:
                    gradient = np.matmul(np.transpose(model[i + 1]['W']),
                                         gradient) * outputs[i + 1] * (
                                             1 - np.array(outputs[i + 1]))
                layer = utils.update_layer(layer, gradient, learning_rate,
                                           outputs[i] if i > 0 else sample)
    test(model, data_test, label_test)

    return model
Ejemplo n.º 10
0
    def __init__(self, args):
        self.args = args

        self.workers = [
            DataWorker.remote(args, i) for i in range(args.num_workers)
        ]

        self.model, input_size, self.quant_model = initialize_model(
            args.model_name, get_num_classes(args.image_path))

        self.dataloaders_dict = preprocess_data(args.image_path,
                                                args.batch_size, input_size,
                                                args.num_workers, 0)

        print("Params to learn:")
        params_to_update = []
        for name, param in self.model.named_parameters():
            if param.requires_grad == True:
                params_to_update.append(param)
                print("\t", name)

        self.optimizer = optim.Adam(params_to_update, lr=0.001)

        self.criterion = nn.CrossEntropyLoss()
Ejemplo n.º 11
0
def main():

    args = init_args()

    # Prepare the dataloader
    train_dataloaders, validation_dataloaders, test_dataloader, args = initialize_dataloader(
        args, subset=None)

    # Prepare the model
    model, criterion, optimizer = initialize_model(args)

    best_validation_loss = evaluate(model, validation_dataloaders, criterion)
    log(-1, args, validation_loss=best_validation_loss)

    training_losses = []
    validation_losses = []
    model.train()
    epoch = 0

    # Iterate through the data
    while best_validation_loss > args.threshold:

        epoch += 1.0
        training_loss = 0.0
        n_batches = 0.0

        env = np.random.choice(args.training_agents)

        for observations, actions, target in tqdm(train_dataloaders[env]):

            # Zero out the gradient for this round of updates
            optimizer.zero_grad()

            # Conduct a forward pass of the transformer
            prediction = model.forward(observations, actions)

            # Compare the output of the model to the target
            prediction = torch.Tensor(prediction.flatten())
            target = torch.Tensor(target.flatten().float())
            loss = criterion(prediction, target)

            # Update the model
            loss.backward()
            optimizer.step()

            training_loss += loss.item()
            n_batches += 1.0

        # Check against the validation dataset
        validation_loss = evaluate(model, validation_dataloaders, criterion)

        # Scale by the batch size
        training_loss = training_loss / n_batches

        # Save the losses
        training_losses.append(training_loss)
        validation_losses.append(validation_loss)
        np.save(f'{args.model_dir}/log/training_losses.npy', training_losses)
        np.save(f'{args.model_dir}/log/validation_losses.npy',
                validation_losses)

        # Update the logs
        log(epoch,
            args,
            validation_loss=validation_loss,
            training_loss=training_loss)

        # Save model
        if validation_loss < best_validation_loss:
            save_model(model, args, epoch)
            best_validation_loss = validation_loss

    # Apply to test dataset
    test_loss = evaluate(model, test_dataloader, criterion)
    log(-2, args, test_loss=test_loss)
Ejemplo n.º 12
0
    level=logging.INFO,
    filename=log_dir,
    datefmt='%Y/%m/%d %H:%M:%S',
    format='%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(message)s')
logger = logging.getLogger(__name__)

console = logging.StreamHandler()
logger.addHandler(console)

#################################
# Build models
#################################

# Initialize the model for this run
model_ft, input_size = utils.initialize_model(model_name,
                                              num_classes,
                                              feature_extract,
                                              use_pretrained=True)

# Print the model we just instantiated
# print(model_ft)

#################################
# Prepare dataset
#################################

# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
    'train':
    transforms.Compose([
        transforms.RandomResizedCrop(input_size),
i_to_c = pickle.load(open(args.i_to_c, 'rb'))
n_char = len(c_to_i)

dataloaders = []

with open('data/testing.txt') as f:
    lines = f.readlines()
    lines = [l.strip().split('\t') for l in lines]
    s_to_human_score = {l[1]: l[2] for l in lines}

if args.model == 'Trans':
    model = models.TransformerModel(args, n_char, i_to_c)
else:
    model = models.RNN(args, n_char, i_to_c)

model = utils.initialize_model(model, device, args.save_files)

print("number of parameters :",
      sum(p.numel() for p in model.parameters() if p.requires_grad))

model.eval()

log_likelihoods = []
synthesis = []
sascores = []
ok_sascore = []
no_sascore = []
ok_ourscore = []
no_ourscore = []

with torch.no_grad():
Ejemplo n.º 14
0
with open(args.test_keys, 'rb') as fp:
    test_keys = pickle.load(fp)

#print simple statistics about dude data and pdbbind data
print(f'Number of train data: {len(train_keys)}')
print(f'Number of test data: {len(test_keys)}')

#initialize model
if args.ngpu > 0:
    cmd = utils.set_cuda_visible_device(args.ngpu)
    os.environ['CUDA_VISIBLE_DEVICES'] = cmd[:-1]
model = gnn(args)
print('number of parameters : ',
      sum(p.numel() for p in model.parameters() if p.requires_grad))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = utils.initialize_model(model, device)

#train and test dataset
train_dataset = MolDataset(train_keys, args.dude_data_fpath)
test_dataset = MolDataset(test_keys, args.dude_data_fpath)
num_train_chembl = len([0 for k in train_keys if 'CHEMBL' in k])
num_train_decoy = len([0 for k in train_keys if 'CHEMBL' not in k])
train_weights = [
    1 / num_train_chembl if 'CHEMBL' in k else 1 / num_train_decoy
    for k in train_keys
]
train_sampler = DTISampler(train_weights, len(train_weights), replacement=True)
train_dataloader = DataLoader(train_dataset, args.batch_size, \
     shuffle=False, num_workers = args.num_workers, collate_fn=collate_fn,\
     sampler = train_sampler)
test_dataloader = DataLoader(test_dataset, args.batch_size, \
Ejemplo n.º 15
0
    "nhop_gcn": config.nhop_gcn,  # for swl
    "nhop_gin": config.nhop_gin,  # for swl
    "nhop_min_triangle": config.nhop_min_triangle,  # for swl
    "nhop_motif_triangle": config.nhop_motif_triangle,  # for swl
    "stack_op": config.stack_op,  # for swl
    "no_identity": config.no_identity,  # for swl-gnn
    "role": "t",
    "nhop_bethe": config.nhop_bethe,
    "last": False
}

params_dict = augment_params_list(params_dict)

params_dict['precompute'] = False
# model = node_task_wrapper(SWL_GNN, params_dict).to(dev)
model = initialize_model(params_dict)

optimizer = optim.Adam(model.parameters(), lr=args.lr)


def compute_overlap(z_list):
    ybar_list = [torch.max(z, 1)[1] for z in z_list]
    overlap_list = []
    for y_bar in ybar_list:
        accuracy = max(torch.sum(y_bar == y).item()
                       for y in y_list) / args.n_nodes
        overlap = (accuracy - 1 / K) / (1 - 1 / K)
        overlap_list.append(overlap)
    return sum(overlap_list) / len(overlap_list)

Ejemplo n.º 16
0
ncpus             : {args.ncpus}
OMP_NUM_THREADS   : {os.environ.get('OMP_NUM_THREADS')}
Num of generations: {args.item_per_cycle} per CPU (Total {args.ncpus*args.item_per_cycle})
Model path        : {save_fpath}
Output path       : {output_filename}
Scaffold          : {args.scaffold}
Scaffold values   : {args.scaffold_properties} -> {scaffold_properties}
Target values     : {args.target_properties} -> {target_properties}
dim_of_node_vector: {args.dim_of_node_vector}
dim_of_edge_vector: {args.dim_of_edge_vector}
dim_of_FC         : {args.dim_of_FC}
stochastic        : {args.stochastic}
""")

    #initialize parameters of the model
    shared_model = utils.initialize_model(shared_model, save_fpath)

    # Copy the same scaffold SMILES for multiple generations.
    scaffolds = [args.scaffold for i in range(args.item_per_cycle)]
    # A whole SMILES can be given and become a latent vector for decoding,
    # but here it is given as None so that a latent is randomly sampled.
    wholes = [None for i in range(args.item_per_cycle)]
    condition1 = target_properties.copy()
    condition2 = scaffold_properties.copy()

    # A list of multiprocessing.managers.ListProxy to collect SMILES
    retval_list = [mp.Manager().list() for i in range(args.ncpus)]
    st = time.time()
    processes = []

    for pid in range(args.ncpus):
args = parser.parse_args([
    '--data_dir', '/content/drive/My Drive/Assignment 5 Dataset',
    '--batch_size', '64', '--model', 'res18', '--freeze', 'all', '--epochs',
    '50', '--lr', '1e-5', '--pretrained_weights',
    '/content/drive/My Drive/TrainedModels/vgg16_ft_87.13_0.26_15.pth',
    '--mode', 'train', '--save_dir', '/content'
])

data_dir = args.data_dir
batch_size = args.batch_size
model = args.model
freeze = args.freeze
Epochs = args.epochs
lr = args.lr
pretrained_weights = args.pretrained_weights
mode = args.mode
save_dir = args.save_dir

trainloader, valloader, testloader = load_data(data_dir, batch_size)
net = initialize_model(model, freeze)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)

if mode == 'train':
    train_loss, val_loss, train_acc, val_acc, learning_rates = train(
        net, trainloader, valloader, lr, save_dir)

elif mode == 'evaluate':
    net.load_state_dict(torch.load(pretrained_weights)['state_dict'])
    evaluate(net, testloader)
Ejemplo n.º 18
0
    model = model.DTILJ(args)
elif args.potential == "morse_all_pair":
    model = model.DTILJAllPair(args)
elif args.potential == "harmonic":
    model = model.DTIHarmonic(args)
elif args.potential == "gnn":
    model = model.GNN(args)
elif args.potential == "cnn3d":
    model = model.CNN3D(args)
elif args.potential == "cnn3d_kdeep":
    model = model.CNN3D_KDEEP(args)
else:
    print(f"No {args.potential} potential")
    exit(-1)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = utils.initialize_model(model, device, args.restart_file)

print(f"vina_hbond_coeff: {model.vina_hbond_coeff.data.cpu().numpy()[0]:.3f}")
print(f"vina_hydrophobic_coeff: \
{model.vina_hydrophobic_coeff.data.cpu().numpy()[0]:.3f}")
print(f"rotor_coeff: {model.rotor_coeff.data.cpu().numpy()[0]:.3f}")
print(f"vdw_coeff: {model.vdw_coeff.data.cpu().numpy()[0]:.3f}")
# exit(-1)
print("number of parameters : ",
      sum(p.numel() for p in model.parameters() if p.requires_grad))

# Dataloader
test_dataset = MolDataset(test_keys, args.data_dir, id_to_y)
test_data_loader = DataLoader(test_dataset,
                              args.batch_size,
                              shuffle=False,
Ejemplo n.º 19
0
def experiment(**kwargs):
    """Run a dataset-adv experiment. Pull from DB or use defaults."""
    # Set default training parameters
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(('Using device: {}'.format(device)))
    hps = utils.default_hps('biggan')
    hps['epochs'] = 1

    # Update params with kwargs
    pull_from_db = kwargs['pull_from_db']
    if pull_from_db:
        exp = db.get_experiment_trial(False)
        if exp is None:
            raise RuntimeError('All experiments are complete.')
    else:
        exp = kwargs
    for k, v in exp.items():
        if k in hps:
            hps[k] = v
            print(('Setting {} to {}'.format(k, v)))

    # Create results directory
    utils.make_dir(hps['results_dir'])

    # Other params we won't write to DB
    save_examples = False
    im_dir = 'screenshots'
    trainable = True
    reset_inner_optimizer = True  # Reset adam params after every epoch
    if hps['dataset'] == 'biggan':
        num_classes = 1000
        model_output = 1000
    elif hps['dataset'] == 'psvrt':
        num_classes = 2
        model_output = 2
    else:
        raise NotImplementedError(hps['dataset'])
    net_loss = nn.CrossEntropyLoss(reduction='mean')

    # Create results directory
    utils.make_dir(hps['results_dir'])

    # Add hyperparams and model info to DB
    dt = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d_%H:%M:%S')
    hps['dt'] = dt
    run_name = '{}_{}_{}'.format(hps['dataset'], hps['model_name'], dt)

    # Initialize net
    net, img_size = utils.initialize_model(
        dataset=hps['dataset'],
        model_name=hps['model_name'],
        num_classes=model_output,
        siamese=hps['siamese'],
        siamese_version=hps['siamese_version'],
        trainable=trainable,
        pretrained=hps['pretrained'])
    if hps['dataset'] == 'biggan':
        img_size = 224
    elif hps['dataset'] == 'psvrt':
        img_size = 80  # 160

    ds = import_module('data_generators.{}'.format(hps['dataset']))
    P = ds.Generator(dataset=hps['dataset'],
                     img_size=img_size,
                     device=device,
                     siamese=hps['siamese'],
                     task=hps['task'],
                     wn=hps['wn'],
                     num_classes=num_classes)
    if hps['adv_version'] == 'flip':
        [p.register_hook(reversal) for p in P.parameters()]

    net = net.to(device)
    P = P.to(device)
    net_optimizer = utils.get_optimizer(net=net,
                                        optimizer=hps['optimizer'],
                                        lr=hps['inner_lr'],
                                        amsgrad=hps['amsgrad'],
                                        trainable=trainable)
    if hps['dataset'] == 'biggan':
        outer_params = P.named_parameters()
        outer_params = [v for k, v in outer_params if 'model' not in k]
    else:
        outer_params = P.parameters()
    r_optimizer = getattr(optim, hps['optimizer'])(outer_params,
                                                   lr=hps['outer_lr'],
                                                   amsgrad=hps['amsgrad'])

    # Optimize r
    inner_losses, outer_losses = [], []
    inner_loop_steps, outer_loop_steps = [], []
    epochs = int(hps['epochs'])
    inner_loop_criterion = hps['inner_loop_criterion']
    outer_loop_criterion = hps['outer_loop_criterion']
    if inner_loop_criterion:
        inner_steps = hps['inner_steps']
    else:
        inner_steps = int(hps['inner_steps'])
    if outer_loop_criterion:
        outer_steps = hps['outer_steps']
    else:
        outer_steps = int(hps['outer_steps'])
    for epoch in tqdm(list(range(epochs)), total=epochs, desc='Epoch'):
        # Inner loop starts here
        net.train()
        net._initialize()  # Reset thetas
        P.set_not_trainable()
        if reset_inner_optimizer:
            if epoch == 0:
                reset_adam_state = net_optimizer.state
            net_optimizer.state = reset_adam_state  # Reset adam parameters
        with tqdm(total=inner_steps) as inner_pbar:
            if inner_loop_criterion:
                L = np.inf
                i = 0
                while L > inner_steps:
                    L = inner_loop(net=net,
                                   net_loss=net_loss,
                                   net_optimizer=net_optimizer,
                                   P=P,
                                   device=device,
                                   inner_pbar=inner_pbar,
                                   batch_size=hps['batch_size'])
                    i += 1
            else:
                for i in range(inner_steps):
                    L = inner_loop(net=net,
                                   net_loss=net_loss,
                                   net_optimizer=net_optimizer,
                                   P=P,
                                   device=device,
                                   inner_pbar=inner_pbar,
                                   batch_size=hps['batch_size'])
            inner_loop_steps += [i]

        # TODO: Compute hessian over init_training_steps here
        # TODO: Pass adams from inner_optimizer to r_optimizer
        inner_losses += [L.cpu().data.numpy()]

        # Outer loop starts here
        net.eval()  # Enable test-time batch norms
        P.set_trainable()
        if save_examples:
            utils.plot_examples(path=os.path.join(
                im_dir, '{}_outer_init_{}'.format(dt, epoch)),
                                n=16,
                                P=P)
        with tqdm(total=outer_steps) as inner_pbar:
            if outer_loop_criterion:
                L = np.inf
                i = 0
                while L > outer_steps:
                    Lo, generative_losses, r_loss, grads = outer_loop(
                        batch_size=hps['batch_size'],
                        outer_batch_size_multiplier=hps[
                            'outer_batch_size_multiplier'],  # noqa
                        adv_version=hps['adv_version'],
                        num_classes=num_classes,
                        net_optimizer=net_optimizer,
                        r_optimizer=r_optimizer,
                        net=net,
                        net_loss=net_loss,
                        device=device,
                        loss=hps['loss'],
                        P=P,
                        outer_steps=outer_steps,
                        alpha=hps['alpha'],
                        beta=hps['beta'],
                        inner_pbar=inner_pbar)
                    i += 1
            else:
                for i in range(outer_steps):
                    Lo, generative_losses, r_loss, grads = outer_loop(
                        batch_size=hps['batch_size'],
                        outer_batch_size_multiplier=hps[
                            'outer_batch_size_multiplier'],  # noqa
                        adv_version=hps['adv_version'],
                        num_classes=num_classes,
                        net_optimizer=net_optimizer,
                        r_optimizer=r_optimizer,
                        net=net,
                        net_loss=net_loss,
                        device=device,
                        loss=hps['loss'],
                        P=P,
                        outer_steps=outer_steps,
                        alpha=hps['alpha'],
                        beta=hps['beta'],
                        i=i,
                        inner_pbar=inner_pbar)
            outer_losses += [Lo]

        path = os.path.join(hps['results_dir'],
                            '{}_gradients'.format(run_name))
        if pull_from_db:
            # Update DB
            results_dict = {'_id': exp['_id'], 'file_path': path}
            db.update_grad_experiment([results_dict])

        # Save epoch results
        if save_examples:
            utils.plot_examples(path=os.path.join(
                im_dir, '{}_outer_optim_{}'.format(run_name, epoch)),
                                n=16,
                                P=P)
        for k, v in grads.items():
            if v is not None:
                try:
                    v = v.detach().cpu().numpy()
                except Exception as e:
                    print('Failed to detach {}'.format(k))
                    v = v.cpu().numpy()
            grads[k] = v
        grads.update(hps)
        np.savez(path, **grads)
    print('Finished the experiment!')
Ejemplo n.º 20
0
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))
    with open('./record.txt', 'a') as f:
        f.writelines(['\n', str(flags.model_vrsion), '   ', str(best_acc)])

    return val_acc_history, val_loss_history, train_acc_history, train_loss_history


if __name__ == '__main__':
    print("Initializing Datasets and Dataloaders...")
    model_ft, input_size = utils.initialize_model(
        flags.model_name,
        flags.num_classes,
        flags.feature_extract,
        use_pretrained=flags.useImageNetPretrained)

    if flags.useCheXNetPretrained:
        print("reload cheXNet...")
        model_ft = utils.loadMyStateDict(model_ft, flags.reload_ckpt)

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(input_size),
        transforms.RandomVerticalFlip(),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),  # 自动转换 range [0, 255] -> [0.0,1.0]
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])
Ejemplo n.º 21
0
def experiment(**kwargs):
    """Run a dataset-adv experiment. Pull from DB or use defaults."""
    # Set default training parameters
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(('Using device: {}'.format(device)))
    hps = utils.default_hps('biggan')
    hps = utils.default_hps('psvrt')
    hps['epochs'] = 1

    # Update params with kwargs
    pull_from_db = kwargs['pull_from_db']
    if pull_from_db:
        exp = db.get_experiment_trial(True)
        if exp is None:
            raise RuntimeError('All experiments are complete.')
    else:
        exp = kwargs
    for k, v in exp.items():
        if k in hps:
            hps[k] = v
            print(('Setting {} to {}'.format(k, v)))

    # Create results directory
    utils.make_dir(hps['results_dir'])

    # Other params we won't write to DB
    save_every = 1000
    save_examples = False
    im_dir = 'screenshots'
    trainable = True
    reset_inner_optimizer = False  # Reset adam params after every epoch
    reset_theta = False
    if hps['dataset'] == 'biggan':
        num_classes = 1000
        model_output = 1000
    elif hps['dataset'] == 'psvrt':
        num_classes = 2
        model_output = 2
    else:
        raise NotImplementedError(hps['dataset'])
    net_loss = nn.CrossEntropyLoss(reduction='mean')

    # Create results directory
    utils.make_dir(hps['results_dir'])

    # Add hyperparams and model info to DB
    dt = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d-%H_%M_%S')
    hps['dt'] = dt
    run_name = '{}_{}_{}'.format(hps['dataset'], hps['model_name'], dt)

    # Initialize net
    net, img_size = utils.initialize_model(
        dataset=hps['dataset'],
        model_name=hps['model_name'],
        num_classes=model_output,
        siamese=hps['siamese'],
        siamese_version=hps['siamese_version'],
        trainable=trainable,
        pretrained=hps['pretrained'])
    if hps['dataset'] == 'biggan':
        img_size = 224
        # net.track_running_stats = False
    elif hps['dataset'] == 'psvrt':
        img_size = 160

    ds = import_module('data_generators.{}'.format(hps['dataset']))
    P = ds.Generator(dataset=hps['dataset'],
                     img_size=img_size,
                     device=device,
                     siamese=hps['siamese'],
                     task=hps['task'],
                     wn=hps['wn'],
                     num_classes=num_classes)
    if hps['adv_version'] == 'flip':
        [p.register_hook(utils.reversal) for p in P.parameters()]

    net = net.to(device)
    P = P.to(device)
    net_optimizer = utils.get_optimizer(net=net,
                                        optimizer=hps['optimizer'],
                                        lr=hps['inner_lr'],
                                        amsgrad=hps['amsgrad'],
                                        trainable=trainable)
    if hps['dataset'] == 'biggan':
        outer_params = [v for k, v in P.named_parameters() if 'model' not in k]
        if P.embedding_grad:
            outer_params = [{
                'params': outer_params
            }, {
                'params': P.get_embed()[0][1],
                'lr': hps['emb_lr']
            }]
            # outer_params += [P.embedding]
    else:
        outer_params = P.parameters()
    r_optimizer = getattr(optim, hps['optimizer'])(outer_params,
                                                   lr=hps['outer_lr'],
                                                   amsgrad=hps['amsgrad'])

    # Add tensorboard if requested
    if hps['gen_tb']:
        writer = SummaryWriter(log_dir=os.path.join('runs', run_name))
        print('Saving tensorboard to: {}'.format(os.path.join(
            'runs', run_name)))
    else:
        writer = None

    # Optimize r
    inner_losses, outer_losses = [], []
    inner_loop_steps, outer_loop_steps = [], []
    all_params = []
    epochs = int(hps['epochs'])
    inner_loop_criterion = hps['inner_loop_criterion']
    outer_loop_criterion = hps['outer_loop_criterion']
    if hps['inner_loop_criterion']:
        inner_steps = hps['inner_steps']
    else:
        inner_steps = int(hps['inner_steps'])
    if hps['outer_loop_criterion']:
        outer_steps = hps['outer_steps']
    else:
        outer_steps = int(hps['outer_steps'])
    for epoch in tqdm(list(range(epochs)), total=epochs, desc='Epoch'):
        # Inner loop starts here
        net.train()
        P.set_not_trainable()
        if reset_theta:
            net._initialize()
        if reset_inner_optimizer:
            if epoch == 0:
                reset_adam_state = net_optimizer.state
            net_optimizer.state = reset_adam_state  # Reset adam parameters
        with tqdm() as inner_pbar:
            if inner_loop_criterion:
                L = np.inf
                i = 0
                while L > inner_steps:
                    L = utils.inner_loop(net=net,
                                         net_loss=net_loss,
                                         net_optimizer=net_optimizer,
                                         P=P,
                                         device=device,
                                         inner_pbar=inner_pbar,
                                         batch_size=hps['batch_size'])
                    i += 1
            else:
                for i in range(inner_steps):
                    L = utils.inner_loop(net=net,
                                         net_loss=net_loss,
                                         net_optimizer=net_optimizer,
                                         P=P,
                                         device=device,
                                         inner_pbar=inner_pbar,
                                         batch_size=hps['batch_size'])
            inner_loop_steps += [i]

        # TODO: Pass adams from inner_optimizer to r_optimizer
        inner_losses += [L.item()]  # cpu().data.numpy()]

        # Outer loop starts here
        if hps['use_bn']:
            net.eval()  # Careful!!
        else:
            net.train()
        P.set_trainable()
        if save_examples:
            utils.plot_examples(path=os.path.join(
                im_dir, '{}_outer_init_{}'.format(run_name, epoch)),
                                n_subplots=16,
                                n_batches=10,
                                P=P)
        try:
            with tqdm() as inner_pbar:
                rmu = 0.
                if outer_loop_criterion:
                    L = np.inf
                    i = 0
                    while L > outer_steps:
                        Lo, generative_losses, r_loss, batch, rmu, net_ce, params = utils.outer_loop(  # noqa
                            batch_size=hps['batch_size'],
                            outer_batch_size_multiplier=hps[
                                'outer_batch_size_multiplier'],  # noqa
                            adv_version=hps['adv_version'],
                            num_classes=num_classes,
                            net_optimizer=net_optimizer,
                            r_optimizer=r_optimizer,
                            net=net,
                            net_loss=net_loss,
                            running_mean=rmu,
                            device=device,
                            loss=hps['loss'],
                            P=P,
                            alpha=hps['alpha'],
                            beta=hps['beta'],
                            writer=writer,
                            i=i,
                            inner_pbar=inner_pbar)
                        if (hps['save_i_params']
                                and i % hps['save_i_params'] == 0):
                            all_params += [utils.prep_params(params)]
                        i += 1
                else:
                    for i in range(outer_steps):
                        Lo, generative_losses, r_loss, batch, rmu, net_ce, params = utils.outer_loop(  # noqa
                            batch_size=hps['batch_size'],
                            outer_batch_size_multiplier=hps[
                                'outer_batch_size_multiplier'],  # noqa
                            adv_version=hps['adv_version'],
                            num_classes=num_classes,
                            net_optimizer=net_optimizer,
                            r_optimizer=r_optimizer,
                            net=net,
                            net_loss=net_loss,
                            running_mean=rmu,
                            device=device,
                            loss=hps['loss'],
                            P=P,
                            alpha=hps['alpha'],
                            beta=hps['beta'],
                            i=i,
                            writer=writer,
                            inner_pbar=inner_pbar)
                        if (hps['save_i_params']
                                and i % hps['save_i_params'] == 0):
                            all_params += [utils.prep_params(params)]
                outer_losses += [Lo.item()]  # cpu().data.numpy()]
                outer_loop_steps += [i]
        except Exception as e:
            print('Outer optimization failed. {}\n'
                  'Saving results and exiting.'.format(e))
            if pull_from_db:
                # Update DB with results
                results_dict = {
                    'experiment_id': exp['_id'],
                    'inner_loss': inner_losses[epoch].tolist(),
                    'outer_loss': outer_losses[epoch].tolist(),
                    'inner_loop_steps': inner_loop_steps[epoch],
                    'outer_loop_steps': outer_loop_steps[epoch],
                    'net_loss': net_ce.item(),  # cpu().data.numpy(),
                    'params': json.dumps(utils.prep_params(P)),
                }
                db.add_results([results_dict])
            break

        # Save epoch results
        if save_examples:
            utils.plot_examples(path=os.path.join(
                im_dir, '{}_outer_optim_{}'.format(run_name, epoch)),
                                n_subplots=16,
                                n_batches=10,
                                P=P)
        # pds += [utils.prep_params(P)]
        if epoch % save_every == 0:
            np.save(
                os.path.join(hps['results_dir'],
                             '{}_inner_losses'.format(run_name)), inner_losses)
            np.save(
                os.path.join(hps['results_dir'],
                             '{}_outer_losses'.format(run_name)), outer_losses)
            save_params = utils.prep_params(P)
            if P.embedding_grad:
                save_params['embedding'] = P.get_embed()[0][1].detach().cpu(
                ).numpy()  # noqa
                save_params[
                    'embedding_original'] = P.embedding_original.detach().cpu(
                    ).numpy()  # noqa
            np.save(
                os.path.join(hps['results_dir'],
                             '{}_all_params'.format(run_name)), all_params)
            np.savez(
                os.path.join(hps['results_dir'],
                             '{}_final_params'.format(run_name)),
                **save_params)

        if len(inner_loop_steps):
            np.save(
                os.path.join(hps['results_dir'],
                             '{}_inner_steps'.format(run_name)),
                inner_loop_steps)
            np.save(
                os.path.join(hps['results_dir'],
                             '{}_outer_steps'.format(run_name)),
                outer_loop_steps)
        if pull_from_db:
            # Update DB with results
            results_dict = {
                'experiment_id': exp['_id'],
                'inner_loss': inner_losses[epoch].tolist(),
                'outer_loss': outer_losses[epoch].tolist(),
                'inner_loop_steps': inner_loop_steps[epoch],
                'outer_loop_steps': outer_loop_steps[epoch],
                'net_loss': net_ce.item(),  # cpu().data.numpy(),
                'params': json.dumps(utils.prep_params(P)),
            }
            db.add_results([results_dict])
    print('Finished {}!'.format(run_name))
def evaluate_dataset(args, device, tokenizer, train_set, test_set,
                     new_num_classes):
    """
    Function that evaluates on a given dataset.
    Inputs:
        args - Namespace object from the argument parser
        device - PyTorch device instance
        tokenizer - BERT tokenizer instance
        train_set - Dataloader instance containing the training set
        test_set - Dataloader instance containing test set
        new_num_classes - Number classes of the evaluation dataset
    """

    gathered_results = {}

    # get the old number of classes
    task_label_dict = {
        'GoEmotions': 27,
        'crowdflower': 8,
        'dailydialog': 7,
        'electoraltweets': 10,
        'emoint': 4,
        'emotion-cause': 6,
        'grounded_emotions': 2,
        'ssec': 7,
        'tales-emotion': 7,
        'tec': 6,
    }
    old_num_classes = task_label_dict[args.dataset]

    # load the model from the given checkpoint
    print('Loading model from checkpoint..')
    model, optimizer = initialize_model(args, device, tokenizer,
                                        old_num_classes)
    checkpoint = torch.load(args.checkpoint_path)
    model.load_state_dict(checkpoint['bert_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    print('Model loaded')

    # replace the final linear layer
    model.replace_clf(new_num_classes)
    model.to(device)

    # perform a training epoch
    print('Starting training..')
    gathered_results['training'] = {}
    for i in range(1, args.num_epochs + 1):
        print('Epoch ' + str(i) + ':')
        train_results = perform_epoch(args,
                                      model,
                                      optimizer,
                                      train_set,
                                      device,
                                      train=True)
        gathered_results['training']['epoch' + str(i)] = train_results
        print(train_results)
    print('Training finished')

    # test the model
    print('Starting testing..')
    with torch.no_grad():
        test_results = perform_epoch(args,
                                     model,
                                     optimizer,
                                     test_set,
                                     device,
                                     train=False)
    print('Test results:')
    print(test_results)
    print('Testing finished')

    # save the testing measures
    gathered_results['testing'] = test_results

    # return the results
    return gathered_results
Ejemplo n.º 23
0
def main():
    now = time.localtime()
    s = "%04d-%02d-%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon,
                                           now.tm_mday, now.tm_hour,
                                           now.tm_min, now.tm_sec)
    print(s)

    parser = argparse.ArgumentParser()

    parser.add_argument("--lr",
                        help="learning rate",
                        type=float,
                        default=0.0001)
    parser.add_argument("--epoch", help="epoch", type=int, default=10000)
    parser.add_argument("--ngpu", help="number of gpu", type=int, default=1)
    parser.add_argument("--batch_size",
                        help="batch_size",
                        type=int,
                        default=32)
    parser.add_argument("--num_workers",
                        help="number of workers",
                        type=int,
                        default=7)
    parser.add_argument("--n_graph_layer",
                        help="number of GNN layer",
                        type=int,
                        default=4)
    parser.add_argument("--d_graph_layer",
                        help="dimension of GNN layer",
                        type=int,
                        default=140)
    parser.add_argument("--n_FC_layer",
                        help="number of FC layer",
                        type=int,
                        default=4)
    parser.add_argument("--d_FC_layer",
                        help="dimension of FC layer",
                        type=int,
                        default=128)
    parser.add_argument("--dude_data_fpath",
                        help="file path of dude data",
                        type=str,
                        default='data/')
    parser.add_argument("--save_dir",
                        help="save directory of model parameter",
                        type=str,
                        default='./save/')
    parser.add_argument("--initial_mu",
                        help="initial value of mu",
                        type=float,
                        default=4.0)
    parser.add_argument("--initial_dev",
                        help="initial value of dev",
                        type=float,
                        default=1.0)
    parser.add_argument("--dropout_rate",
                        help="dropout_rate",
                        type=float,
                        default=0.0)
    parser.add_argument("--train_keys",
                        help="train keys",
                        type=str,
                        default='keys/train_keys.pkl')
    parser.add_argument("--test_keys",
                        help="test keys",
                        type=str,
                        default='keys/test_keys.pkl')
    args = parser.parse_args()
    print(args)

    #hyper parameters
    num_epochs = args.epoch
    lr = args.lr
    ngpu = args.ngpu
    batch_size = args.batch_size
    dude_data_fpath = args.dude_data_fpath
    save_dir = args.save_dir

    #make save dir if it doesn't exist
    if not os.path.isdir(save_dir):
        os.system('mkdir ' + save_dir)
        print('save_dir({}) created'.format(save_dir))
        pass

    print('save_dir:{}'.format(save_dir))
    print('+' * 10)

    #read data. data is stored in format of dictionary. Each key has information about protein-ligand complex.
    with open(args.train_keys, 'rb') as fp:
        train_keys = pickle.load(fp)
        #
        # train_keys: type=list, len=730, ['andr_C36276925', 'dhi1_C08592133', 'hivpr_C59233791', 'hivrt_C66397637', 'cah2_C62892628', ... ]
        #
        print('train_keys({}) loaded from pickle --> type:{}, len:{}, ex:\n{}'.
              format(args.train_keys, type(train_keys), len(train_keys),
                     train_keys[:5]))
        pass

    print('+' * 3)

    with open(args.test_keys, 'rb') as fp:
        test_keys = pickle.load(fp)
        #
        # test_keys: type=list, len=255, ['fnta_C59365794', 'ace_C22923016', 'aces_C21842010', 'kith_C11223989', 'kpcb_C37928874', ... ]
        #
        print('test_keys({}) loaded from pickle --> type:{}, len:{}, ex:\n{}'.
              format(args.test_keys, type(test_keys), len(test_keys),
                     test_keys[:5]))
        pass

    print('+' * 10)

    #print simple statistics about dude data and pdbbind data
    print(f'Number of train data: {len(train_keys)}')
    print(f'Number of test data: {len(test_keys)}')

    if 0 < args.ngpu:
        cmd = utils.set_cuda_visible_device(args.ngpu)
        print('utils.set_cuda_visible_device({}) --> cmd:{}'.format(
            args.ngpu, cmd))
        os.environ['CUDA_VISIBLE_DEVICES'] = cmd[:-1]
        pass

    model = gnn(args)

    print('+' * 10)

    print('number of parameters : ',
          sum(p.numel() for p in model.parameters() if p.requires_grad))

    #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and 0 < args.ngpu else "cpu")

    print('device: {}'.format(device))

    # initialize model
    model = utils.initialize_model(model, device)

    print('#' * 80)

    print('dude_data_fpath:{}'.format(args.dude_data_fpath))

    #train and test dataset
    train_dataset = MolDataset(train_keys, args.dude_data_fpath)
    test_dataset = MolDataset(test_keys, args.dude_data_fpath)

    print('#' * 80)

    num_train_chembl = len([0 for k in train_keys if 'CHEMBL' in k])
    num_train_decoy = len([0 for k in train_keys if 'CHEMBL' not in k])

    print('#1:num_train_chembl:{}, num_train_decoy:{}'.format(
        num_train_chembl, num_train_decoy))

    num_train_chembl = len([0 for k in train_keys if 'CHEMBL' in k])
    num_train_decoy = len(train_keys) - num_train_chembl

    print('#2:num_train_chembl:{}, num_train_decoy:{}'.format(
        num_train_chembl, num_train_decoy))

    #train_weights = [1/num_train_chembl if 'CHEMBL' in k else 1/num_train_decoy for k in train_keys]
    train_weight_chembl = 1.0 / num_train_chembl
    train_weight_decoy = 1.0 / num_train_decoy
    train_weights = [
        train_weight_chembl if 'CHEMBL' in k else train_weight_decoy
        for k in train_keys
    ]

    print('main: sum(train_weights):{}'.format(sum(train_weights)))
    print(
        'train_weight_chembl:{} / train_weight_decoy:{}, len(train_weights):{}'
        .format(train_weight_chembl, train_weight_decoy, len(train_weights)))

    train_sampler = DTISampler(train_weights,
                               len(train_weights),
                               replacement=True)

    print('main: args.batch_size:{}, args.num_workers:{}'.format(
        args.batch_size, args.num_workers))

    #
    # train_dataset: object of MolDataset(torch.utils.data.Dataset)
    #
    train_dataloader = DataLoader(train_dataset, args.batch_size, \
         shuffle=False, num_workers = args.num_workers, collate_fn=collate_fn,\
         sampler = train_sampler)

    #
    # test_dataset: object of MolDataset(torch.utils.data.Dataset)
    #
    test_dataloader = DataLoader(test_dataset, args.batch_size, \
         shuffle=False, num_workers = args.num_workers, collate_fn=collate_fn, \
         )

    #optimizer
    #optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)

    #loss function --> BCELoss (Binary Classification Error)
    #loss_fn = nn.BCELoss()
    loss_fn = nn.CrossEntropyLoss()

    test_roc_list = list()
    best_test_roc = 0.0

    for epoch in range(num_epochs):
        st = time.time()
        #collect losses of each iteration
        train_losses = []
        test_losses = []

        #collect true label of each iteration
        train_true = []
        test_true = []

        #collect predicted label of each iteration
        train_pred = []
        test_pred = []

        model.train()  # sets the model in training mode.
        #print('model.training:{}'.format(model.training))

        for i_batch, sample in enumerate(train_dataloader):
            model.zero_grad()
            H, A1, A2, Y, V, keys = sample

            n_queried, n_max_n1, n_max_n2, n_max_adj, n_file_opened = train_dataset.get_n_queried(
            )

            if epoch == 0 and i_batch == 0:
                print('#1:{}/{} H:type:{}, shape:{}\n{}'.format(
                    i_batch, epoch, type(H), H.shape, H))
                print('    A1:type:{}, shape:{}\n{}'.format(
                    type(A1), A1.shape, A1))
                print('    A2:type:{}, shape:{}\n{}'.format(
                    type(A2), A2.shape, A2))
                print('    Y:type:{}, shape:{}\n{}'.format(
                    type(Y), Y.shape, Y))
                print('    V:type:{}, shape:{}\n{}'.format(
                    type(V), V.shape, V))
                print('    keys:type:{}\n{}'.format(type(keys), keys))
                print(
                    '    train_dataset: n_queried:{}, n_max_n1:{}, n_max_n2:{}, n_max_adj:{}, n_file_opened:{}'
                    .format(n_queried, n_max_n1, n_max_n2, n_max_adj,
                            n_file_opened))
                print('+' * 10)
                pass

            H, A1, A2, Y, V = H.to(device), A1.to(device), A2.to(device),\
                                Y.to(device), V.to(device)

            if epoch == 0 and i_batch == 0:
                print('#2:{}/{} H:type:{}, shape:{}\n{}'.format(
                    i_batch, epoch, type(H), H.shape, H))
                print('    A1:type:{}, shape:{}\n{}'.format(
                    type(A1), A1.shape, A1))
                print('    A2:type:{}, shape:{}\n{}'.format(
                    type(A2), A2.shape, A2))
                print('    Y:type:{}, shape:{}\n{}'.format(
                    type(Y), Y.shape, Y))
                print('    V:type:{}, shape:{}\n{}'.format(
                    type(V), V.shape, V))
                print('    keys:type:{}\n{}'.format(type(keys), keys))
                print(
                    '    train_dataset: n_queried:{}, n_max_n1:{}, n_max_n2:{}, n_max_adj:{}, n_file_opened:{}'
                    .format(n_queried, n_max_n1, n_max_n2, n_max_adj,
                            n_file_opened))
                print('+' * 10)
                pass

            #train neural network
            pred = model.train_model((H, A1, A2, V))
            #pred = model.module.train_model((H, A1, A2, V))
            pred = pred.cpu()
            pred_softmax = pred.detach().numpy()
            pred_softmax = softmax(pred_softmax, axis=1)[:, 1]

            if epoch == 0 and i_batch == 0:
                print('{}/{} pred:shape:{}\n{}\nY.shape:{}'.format(
                    i_batch, epoch, pred.shape, pred, Y.shape))
                print('+' * 10)
                print('{}/{} pred_softmax:shape:{}\n{}'.format(
                    i_batch, epoch, pred_softmax.shape, pred_softmax))
                print('+' * 10)
                pass

            loss = loss_fn(pred, Y)

            if epoch == 0 and i_batch == 0:
                print('{}/{} loss:shape:{}\n{}'.format(i_batch, epoch,
                                                       loss.shape, loss))
                print('+' * 10)
                pass

            loss.backward()
            optimizer.step()

            #collect loss, true label and predicted label
            train_losses.append(loss.data.cpu().numpy())
            train_true.append(Y.data.cpu().numpy())
            #train_pred.append(pred.data.cpu().numpy())
            train_pred.append(pred_softmax)
            #if i_batch>10 : break

            pass  # end of for i_batch,sample

        model.eval()  # equivalent with model.train(mode=False)
        for i_batch, sample in enumerate(test_dataloader):
            model.zero_grad()

            H, A1, A2, Y, V, keys = sample
            H, A1, A2, Y, V = H.to(device), A1.to(device), A2.to(device),\
                              Y.to(device), V.to(device)

            #train neural network
            pred = model.train_model((H, A1, A2, V))
            pred_softmax = pred.detach().numpy()
            pred_softmax = softmax(pred_softmax, axis=1)[:, 1]

            loss = loss_fn(pred, Y)

            #collect loss, true label and predicted label
            test_losses.append(loss.data.cpu().numpy())
            test_true.append(Y.data.cpu().numpy())
            #test_pred.append(pred.data.cpu().numpy())
            test_pred.append(pred_softmax)
            #if i_batch>10 : break

            if epoch == 0 and i_batch == 0:
                print('eval: Y.shape:{}, pred.shape:{}, pred_softmax.shape:{}'.
                      format(Y.shape, pred.shape, pred_softmax.shape))
                pass
            pass

        train_losses = np.mean(np.array(train_losses))
        test_losses = np.mean(np.array(test_losses))

        train_pred = np.concatenate(np.array(train_pred), 0)
        test_pred = np.concatenate(np.array(test_pred), 0)

        train_true = np.concatenate(np.array(train_true), 0)
        test_true = np.concatenate(np.array(test_true), 0)

        #print('#' * 80)
        #print('train_pred:\n{}'.format(train_pred))
        #print('+' * 7)
        ##print(softmax(train_pred, axis=1))

        #print('+' * 10)
        #print('+' * 10)

        #print('train_true:\n{}'.format(train_true))
        #print('#' * 80, flush=True)

        train_roc = roc_auc_score(train_true, train_pred)
        test_roc = roc_auc_score(test_true, test_pred)

        end = time.time()
        if epoch == 0:
            print(
                'epoch\ttrain_losses\ttest_losses\ttrain_roc\ttest_roc\telapsed_time'
            )
            pass
        #print('#' * 80)
        #print ('epoch\ttrain_losses\ttest_losses\ttrain_roc\ttest_roc\telapsed_time')
        #print ("%s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f" \
        print ('%s\t%.6f\t%.6f\t%.6f\t%.6f\t%.6f\t%s' \
               % (epoch, train_losses, test_losses, train_roc, test_roc, end-st, datetime.datetime.fromtimestamp(end).strftime('%Y-%m-%d %H:%M:%S.%f')),
               end='')

        #name = save_dir + '/save_'+str(epoch)+'.pt'
        #torch.save(model.state_dict(), name)
        if best_test_roc < test_roc:
            name = save_dir + '/save_' + str(epoch) + '.pt'
            torch.save(model.state_dict(), name)
            print(' updated')

            best_test_roc = test_roc
            pass
        else:
            print('')
            pass

        test_roc_list.append(test_roc)
        pass
    pass
Ejemplo n.º 24
0
import torch.optim.lr_scheduler as LRscheduler
import datasets
import utils

# Parameters
input_size = 784
num_classes = 10

# Model you wish to evaluate
file_path = r'./saved models/Model 5 - DenseNet, lr=0.001, ss=1, gm=0.1 , bs=64.pkl'
model_name = file_path.split('/')[1]
model_name = model_name.split('.pkl')[0]

state = torch.load(file_path, lambda storage, loc: storage)

model = utils.initialize_model(3)

optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

# Decay LR by a factor of 0.1 every 7 epochs
# lr_scheduler = LRscheduler.StepLR(optimizer, step_size=ss, gamma=gm)
# TODO: CHECK OTHER TYPES OF SCHEDULER!!  https://pytorch.org/docs/stable/optim.html
lr_scheduler = LRscheduler.ReduceLROnPlateau(optimizer, eps=1e-3)
model, optimizer = utils.load_checkpoint(model, optimizer, lr_scheduler,
                                         file_path)

if torch.cuda.is_available():
    print('GPU detected - Enabling Cuda!')
    model = model.cuda()
else:
    print('No GPU detected!')
Ejemplo n.º 25
0
# img_dir = "./data/excavator_cls/val/excavator/"
img_root = "./data/excavator_cls/val/excavator/"

num_classes = 2

# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

#################################
# Prepare model
#################################

# Initialize the model for this run
model_ft, input_size = utils.initialize_model(model_name,
                                              num_classes,
                                              False,
                                              use_pretrained=False)

model_ft.load_state_dict(torch.load(model_dir))

model_ft.eval()

# Send the model to GPU
model_ft = model_ft.to(device)

input_size = 224

#################################
# Prepare data
#################################
                           train_y_tensor)

train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data,
                              sampler=train_sampler,
                              batch_size=BATCH_SIZE)

val_data = TensorDataset(val_text_tensor, val_mask_tensor, val_y_tensor)
val_sampler = SequentialSampler(val_data)
val_dataloader = DataLoader(val_data,
                            sampler=val_sampler,
                            batch_size=BATCH_SIZE)

#%%
set_seed(42)
bert_classifier, optimizer, scheduler = initialize_model(
    epochs=EPOCHS, model=model, train_dataloader=train_dataloader)

loss_fn = nn.CrossEntropyLoss()

train(bert_classifier,
      loss_fn,
      optimizer,
      scheduler,
      train_dataloader,
      val_dataloader,
      epochs=EPOCHS,
      evaluation=True)

#%%
probs = predict_test(bert_classifier, val_dataloader)
evaluate_roc(probs, val_y_tensor)
Ejemplo n.º 27
0
sample_path = './sample.txt'
save_dir = './saved models/'

for bs in batch_size:
    # Create corpuses
    train_corpus = utils.Corpus()
    ids_train = train_corpus.get_data(train_path, bs)
    ids_valid = train_corpus.get_data(valid_path, bs)
    train_vocab_size = len(train_corpus.dictionary)

    for seq_len in seq_lengths:
        num_train_batches = ids_train.size(1) // seq_len
        num_valid_batches = ids_valid.size(1) // seq_len

        for lr in learning_rate:
            model = utils.initialize_model(model_num, train_vocab_size,
                                           embed_size)
            model = utils.use_cuda(model)
            print('Training vocabulary size: {}'.format(train_vocab_size))
            print('Model: {}'.format(model.name))
            print('Number of parameters = {}'.format(
                sum(p.numel() for p in model.parameters())))

            run_name = "{}, seq_len={}, lr={}, bs={}".format(
                model.name, seq_len, lr, bs)
            file_path = os.path.join(save_dir, run_name + '.pkl')

            # Loss and Optimizer
            criterion = nn.CrossEntropyLoss()
            optimizer = torch.optim.Adam(model.parameters(), lr=lr)
            #TODO: CHANGE PARAMETERS - EPS, PATIENCE, etc.. CHECK OTHER TYPES OF SCHEDULER!!  https://pytorch.org/docs/stable/optim.html
            lr_scheduler = LRscheduler.ReduceLROnPlateau(optimizer, eps=1e-7)
def main():
    # Initialize the model for this run
    model_ft, input_size = initialize_model(model_name,
                                            num_classes,
                                            feature_extract,
                                            use_pretrained=True)
    model_ft.to(device)

    # Temporary header
    # directory - normal, bacteria, TB, COVID-19, virus
    dir_test = '/home/ubuntu/segmentation/output/COVID-19/'
    label = 3  # set 3 for COVID-19 for virus class

    # Data loader
    test_masked_images = sorted(glob.glob(dir_test + '*.npz'))
    #test_masks = sorted(glob.glob(dir_test + '*.mask.npy'))

    for masked_img in test_masked_images:

        test_masked_img = np.load(masked_img)
        #test_mask = np.load(mask)

        test_masked_img = Image.fromarray(test_masked_img).resize((1024, 1024))
        #test_mask = Image.fromarray(test_mask).resize((1024,1024))

        #test_img = np.asarray(test_img)
        #test_mask = np.round(np.asarray(test_mask))

        #test_masked = np.multiply(test_img, test_mask)

        test_normalized = test_masked_img

        h_whole = test_normalized.shape[0]  # original w
        w_whole = test_normalized.shape[1]  # original h

        background = np.zeros((h_whole, w_whole))
        background_indicer = np.zeros((h_whole, w_whole))

        sum_prob_wt = 0.0

        for i in range(header.repeat):

            non_zero_list = np.nonzero(test_normalized)

            random_index = random.randint(0, len(non_zero_list[0]) - 1)

            non_zero_row = non_zero_list[0][
                random_index]  # random non-zero row index
            non_zero_col = non_zero_list[1][
                random_index]  # random non-zero col index

            X_patch = test_normalized[
                int(max(0, non_zero_row - (header.img_size / 2))
                    ):int(min(h_whole, non_zero_row + (header.img_size / 2))),
                int(max(0, non_zero_col - (header.img_size / 2))
                    ):int(min(w_whole, non_zero_col + (header.img_size / 2)))]

            X_patch_img = data_transforms(
                augmentation(Image.fromarray(X_patch), rand_p=0.0,
                             mode='test'))
            X_patch_img_ = np.squeeze(np.asarray(X_patch_img))

            X_patch_1 = np.expand_dims(X_patch_img_, axis=0)
            X_patch_2 = np.expand_dims(X_patch_img_, axis=0)
            X_patch_3 = np.expand_dims(X_patch_img_, axis=0)

            X_ = np.concatenate((X_patch_1, X_patch_2, X_patch_3), axis=0)
            X_ = np.expand_dims(X_, axis=0)

            X = torch.from_numpy(X_)
            X = X.to(device)

            checkpoint = torch.load(
                os.path.join(header.save_dir,
                             str(header.inference_epoch) + '.pth'))
            model_ft.load_state_dict(checkpoint['model_state_dict'])
            model_ft.eval()
            outputs = model_ft(X)
            outputs_prob = F.softmax(outputs)

            prob = outputs_prob[0][label]
            prob_wt = prob.detach().cpu().numpy()

            gradcam = GradCAM.from_config(model_type='resnet',
                                          arch=model_ft,
                                          layer_name='layer4')

            mask, logit = gradcam(X, class_idx=label)
            mask_np = np.squeeze(mask.detach().cpu().numpy())
            indicer = np.ones((224, 224))

            mask_np = np.asarray(
                cv2.resize(
                    mask_np,
                    dsize=(
                        int(min(w_whole, non_zero_col +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_col - (header.img_size / 2))),
                        int(min(h_whole, non_zero_row +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_row - (header.img_size / 2))))))

            indicer = np.asarray(
                cv2.resize(
                    indicer,
                    dsize=(
                        int(min(w_whole, non_zero_col +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_col - (header.img_size / 2))),
                        int(min(h_whole, non_zero_row +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_row - (header.img_size / 2))))))

            mask_add = np.zeros((1024, 1024))
            mask_add[
                int(max(0, non_zero_row - (header.img_size / 2))
                    ):int(min(h_whole, non_zero_row + (header.img_size / 2))),
                int(max(0, non_zero_col - (header.img_size / 2))
                    ):int(min(w_whole, non_zero_col +
                              (header.img_size / 2)))] = mask_np
            mask_add = mask_add * prob_wt

            indicer_add = np.zeros((1024, 1024))
            indicer_add[
                int(max(0, non_zero_row - (header.img_size / 2))
                    ):int(min(h_whole, non_zero_row + (header.img_size / 2))),
                int(max(0, non_zero_col - (header.img_size / 2))
                    ):int(min(w_whole, non_zero_col +
                              (header.img_size / 2)))] = indicer
            indicer_add = indicer_add

            background = background + mask_add
            background_indicer = background_indicer + indicer_add  # number in this indicer means how many time the area included.

            sum_prob_wt = sum_prob_wt + prob_wt

        final_mask = np.divide(background, background_indicer + 1e-7)

        final_mask = np.expand_dims(np.expand_dims(final_mask, axis=0), axis=0)
        torch_final_mask = torch.from_numpy(final_mask)

        test_img = np.asarray(Image.fromarray(test_img).resize((1024, 1024)))
        test_img = (test_img - test_img.min()) / test_img.max()
        test_img = np.expand_dims(test_img, axis=0)
        test_img = np.concatenate((test_img, test_img, test_img), axis=0)
        torch_final_img = torch.from_numpy(np.expand_dims(test_img, axis=0))

        final_cam, cam_result = visualize_cam(torch_final_mask,
                                              torch_final_img)

        final_cam = (final_cam - final_cam.min()) / final_cam.max()
        final_cam_np = np.swapaxes(np.swapaxes(np.asarray(final_cam), 0, 2), 0,
                                   1)
        test_img_np = np.swapaxes(np.swapaxes(test_img, 0, 2), 0, 1)

        final_combined = test_img_np + final_cam_np
        final_combined = (final_combined -
                          final_combined.min()) / final_combined.max()

        plt.imshow(final_combined)
        plt.savefig(
            test_masked_img.split('.image.npy')[0] + '.patch.heatmap_' +
            '.png')
Ejemplo n.º 29
0
                                       shuffle=True,
                                       num_workers=4)
        for x in ['train', 'val']
    }

    class_names = image_datasets['train'].classes
    num_classes = len(class_names)

    print('Size of training dataset: ' + str((len(image_datasets['train']))) +
          '    Size of validation dataset: ' +
          str(len(image_datasets['val'])) + '    Number of classes: ' +
          str(num_classes))

    # Initialize the model
    model_ft, input_size = initialize_model(num_classes,
                                            feature_extract,
                                            use_pretrained=True)

    # Send the model to GPU
    model_ft = model_ft.to(device)

    # Use multiple GPUs if available
    if torch.cuda.device_count() > 1:
        model_ft = nn.DataParallel(model_ft)

    # Gather and show the parameters to be optimized/updated in this run.
    params_to_update = model_ft.parameters()
    if feature_extract:
        params_to_update = []
        for name, param in model_ft.named_parameters():
            if param.requires_grad:
Ejemplo n.º 30
0
        os.mkdir('data')


    transform=transforms.Compose([
        transforms.ToTensor()
        ])

    dataset1 = datasets.MNIST('./data', train=True, download=True,
                       transform=transform)
    dataset2 = datasets.MNIST('./data', train=False,
                       transform=transform)

    mov_train_loader = torch.utils.data.DataLoader(dataset1, shuffle=True, batch_size=256)
    mov_test_loader = torch.utils.data.DataLoader(dataset2, shuffle=True, batch_size=256)

    ref_train_loader = torch.utils.data.DataLoader(dataset1, shuffle=True, batch_size=256)
    ref_test_loader = torch.utils.data.DataLoader(dataset2, shuffle=True, batch_size=256)

    model = U.to_cuda(N.DisplNet(6))
    model = U.initialize_model(model)

    criterion = U.to_cuda(torch.nn.MSELoss())

    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
    epochs = 10
    for epoch in range(1, epochs + 1):
        train(epochs, model, mov_train_loader, ref_train_loader, optimizer, epoch, criterion)
        test(model, mov_test_loader, ref_test_loader, criterion)

    torch.save(model.state_dict(), "mnist_cnn.pt")