def eval(txt, cluster_num, round, device):
    eval_dataset = Eval_Dataset('images.txt', transforms.ToTensor())
    eval_dataloader = DataLoader(eval_dataset)

    net = SiameseNetwork().to(device)
    net.load_state_dict(
        torch.load("parameters/Parm_Round_" + str(round + 1) + ".pth"))
    features = []

    for img in eval_dataloader:
        with torch.no_grad():
            img = img.to(device)
            output = net.forward_once(img).data.cpu().numpy().squeeze()
            features.append(output)

    X, new_labellist = feature2label(features, cluster_num)
    return X, new_labellist
def train(labellist, batch_size, train_number_epochs, learning_rate, round,
          device):

    train_dataset = SiameseDataset('images.txt', labellist,
                                   transforms.ToTensor())
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)

    # training
    net = SiameseNetwork().to(device)
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)

    counter = []
    loss_history = []
    iteration_number = 0

    for epoch in range(train_number_epochs):
        total_loss = 0
        start_time = datetime.now()
        for i, data in enumerate(train_dataloader):
            img0, img1, label = data
            img0, img1, label = img0.to(device), img1.to(device), label.to(
                device)

            optimizer.zero_grad()
            output1, output2 = net(img0, img1)
            loss_contrastive = criterion(output1, output2, label)
            loss_contrastive.backward()
            total_loss += loss_contrastive.item()
            optimizer.step()
            if i % 20 == 0:
                iteration_number += 20
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())
        end_time = datetime.now()
        print("Epoch number: {} , Current loss: {:.4f}, Epoch Time: {}".format(
            epoch + 1, total_loss / (i + 1), end_time - start_time))

    torch.save(net.state_dict(),
               "parameters/Parm_Round_" + str(round + 1) + ".pth")
    return counter, loss_history
예제 #3
0
    def __init__(self, path_wieght: str, path_data: str, similarity,
                 path_feat: str):
        self.path_weight = path_wieght
        self.path_data = path_data
        self.similarity = similarity
        self.flickr_dataset = ImageFlickrFeatures(
            path_feat)  #dbs/features_contrastive.db
        # self.ranking = ranking

        imagenet_net = ResNet34()
        sketches_net = ResNet34()

        # print("Adapting output layers...")

        siamese_net = SiameseNetwork(sketches_net, imagenet_net)
        siamese_net.load_state_dict(
            torch.load(self.path_weight)
        )  # r'C:\Users\aleja\Desktop\Tareas\Reconocimiento Virtual con Deep Learning\T2\best_SiameseNetwork_contrastive.pth'
        self.net = siamese_net
        self.ranking = Ranker(self.path_data,
                              image_dataset_features=self.flickr_dataset,
                              feature_extractor=self.net,
                              similarity_fn=self.similarity)
예제 #4
0
def main():
    """Interface for training and evaluating using the command line"""
    global args
    args = parser.parse_args()

    model = SiameseNetwork(1, args.embedding_size)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # If a checkpoint provided, load it's values
    if args.checkpoint:
        state = torch.load(args.checkpoint, map_location=device)
        model.load_state_dict(state['state_dict'])
    else:
        state = None

    # Run the model on a GPU if available
    model.to(device)


    # Train the network
    if args.mode == 'train':
        dataset = GEDDataset(args.data, which_set='train', adj_dtype=np.float32, transform=None)
        model, optimiser, epoch = train(model, dataset, batch_size=args.batch_size, embed_size=args.embedding_size, num_epochs=args.epochs,
              learning_rate=args.learning_rate, save_to=args.save_dir, resume_state=args.checkpoint, device=device)

    if args.save_dir:
        # Save the model checkpoint
        state = {
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'optimiser': optimiser.state_dict(),
        }
        save_checkpoint(state, args.save_dir)

    # Whether to store the predictions from eval for plotting
    store_res = args.make_plot

    if args.mode == 'train' and args.post_training_eval:
        args.which_set = 'val'
    if args.mode == 'eval' or args.post_training_eval:
        dataset = GEDDataset(args.data, which_set=args.which_set, adj_dtype=np.float32, transform=None)
        results = eval(model, dataset, batch_size=args.batch_size, store_results=store_res, device=device)

    # Finally, if plotting the results:
    if args.make_plot:
        # Assert that the data has been evaluated
        if not (args.mode == 'eval' or args.post_training_eval):
            raise AttributeError('The flags provided did not specify to evaluate the dataset, which is required for'
                                 'plotting')
        # Make a plot of the results
        print('Making the plot')
        plot_prediction(results[0], results[1])
예제 #5
0
    def __init__(self) -> None:
        self.now = datetime.now().strftime("%Y.%m.%d-%H:%M:%S")
        self.device = torch.device('cuda:0')

        # Initializing window dataset hiperparams
        self.window_size = 40
        self.num_test_person = 20

        # Initializing window datasets
        self.train_window_dataset = SiameseDataset(
            dataset='train',
            is_train=False,
            window_size=self.window_size,
            num_test_person=self.num_test_person).base_dataset
        self.val_window_dataset = SiameseDataset(
            dataset="val",
            is_train=False,
            window_size=self.window_size,
            num_test_person=self.num_test_person).base_dataset
        self.test_window_dataset = SiameseDataset(
            dataset='test', is_train=False,
            window_size=self.window_size).base_dataset

        # Initializing the trained model
        #self.model_checkpoint = 'model_params/siamese_network/version_5_bs_256_baselr_1e-3_multisteplr_milestones_5-15-25_gamma_0.33_lstmoutsize_170-latentsize_170-outfcsize_32/version-5-siamese-epoch=28-val_acc_epoch=0.9355.ckpt'
        self.model_checkpoint = 'model_params/siamese_network/version_6_bs_512_baselr_3e-3_multisteplr_milestones_5-10-15-25_gamma_0.33_lstmoutsize_160-latentsize_32-outfcsize_16-abs+cos+sed/version-6-siamese-epoch=33-val_acc_epoch=0.9358.ckpt'
        self.model = SiameseNetwork().load_from_checkpoint(
            checkpoint_path=self.model_checkpoint).to(
                device=self.device).eval()

        # Creating core database
        self.averaging_types = ['mean', 'median']
        self.averaging_type = self.averaging_types[0]
        #self.database = self._create_database()

        self.metric_method = 'micro'
        self.similarity_thres = 0.95

        self.file_log_path = f'test_logs/{self.now}--{self.model_checkpoint.rsplit("/")[-2]}--{self.model_checkpoint.rsplit("/")[-1]}/{self.now}-simthres_{self.similarity_thres}-avgtype_{self.averaging_type}/'
        self.metrics_file_path = self.file_log_path + 'metrics.txt'
예제 #6
0
    n_val = int(len(dataset) * args.val_size)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])
    train_loader = DataLoader(train, batch_size=args.batch_size)
    val_loader = DataLoader(val, batch_size=args.batch_size // 4)

    # load backbones
    print("[*] Initializing weights...")
    imagenet_net = ResNet34()
    sketches_net = ResNet34()
    # sketches_net.load_state_dict(torch.load(args.sketches_backbone_weights))
    print("[+] Weights loaded")

    print("[*] Initializing model, loss and optimizer")
    contrastive_net = SiameseNetwork(sketches_net, imagenet_net)
    contrastive_net.to(args.device)
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(contrastive_net.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum)
    else:
        optimizer = torch.optim.Adam(contrastive_net.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, T_0=args.t_0)
    contrastive_loss = contrastive_loss()
    cross_entropy_loss = torch.nn.CrossEntropyLoss()
    print("[+] Model, loss and optimizer were initialized successfully")

    if not args.debug:
        wandb.init(project='homework1-cc7221', entity='p137')
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import tqdm
from torch.utils.data import DataLoader

from config import Config
from models import AutoEncoder, SiameseNetwork

config = Config()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 

autoencoder = AutoEncoder(config)
siamese_network = SiameseNetwork(config)

autoencoder_file = '/autoencoder_epoch175_loss1.1991.pth'
siamese_file = '/siamese_network_epoch175_loss1.1991.pth'

if config.load_model:
    autoencoder.load_state_dict(torch.load(config.saved_models_folder + autoencoder_file))
    siamese_network.load_state_dict(torch.load(config.saved_models_folder + siamese_file))

autoencoder.to(device)
autoencoder.train()

siamese_network.to(device)
siamese_network.train()

params = list(autoencoder.parameters()) + list(siamese_network.parameters())
예제 #8
0
        description="Train model utility",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--siamese-weights',
                        default='weights/best_SiameseNetwork_triplet.pth',
                        type=str,
                        help='Path to Siamese network weights')
    parser.add_argument('--flickr-15k',
                        default='data/Flickr15K',
                        type=str,
                        help='Path to flickr dataset folder')
    parser.add_argument('--output',
                        default='features.db',
                        type=str,
                        help='Output path of feature db')

    args = parser.parse_args()

    imagenet_net = ResNet34()
    sketches_net = ResNet34()

    print("[*] Adapting output layers and loading weights...")
    siamese_net = SiameseNetwork(sketches_net, imagenet_net)
    siamese_net.load_state_dict(torch.load(args.siamese_weights))
    print("[+] Done!")

    dataset = ImageFlickr15K(path=args.flickr_15k)
    saver = ImageFlickrSaver(args.output,
                             model=siamese_net,
                             image_dataset=dataset)
    saver.export()
예제 #9
0
train_dataset = MadoriDataset(
) if Config.network == 'siamese' else TriMadoriDataset()
val_dataset = MadoriDataset(
    train=False) if Config.network == 'siamese' else TriMadoriDataset(
        train=False)

# data loaders
train_dataloader = DataLoader(train_dataset,
                              shuffle=True,
                              batch_size=Config.batch_size)
val_dataloader = DataLoader(val_dataset,
                            shuffle=False,
                            batch_size=Config.batch_size)

# models
net = SiameseNetwork() if Config.network == 'siamese' else TripletNetwork()
net = net.to(device)
criterion = ContrastiveLoss() if Config.network == 'siamese' else TripletLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0005)


def train_siamese():
    train_loss_history, val_loss_history = [], []
    lowest_epoch_train_loss = lowest_epoch_val_loss = float('inf')

    for epoch in tqdm(range(Config.train_number_epochs)):
        # training
        net.train()
        epoch_train_loss = 0
        for batch_no, data in enumerate(train_dataloader):
            img0, img1, label = data
예제 #10
0
            labels = list(labels.detach().cpu().numpy())

            # group similarities
            group_similarities = list(zip(list(paths), labels, distances))
            similarities.extend(group_similarities)

        similarities = sorted(similarities, key=lambda x: x[2], reverse=True)

        return query_class, similarities


if __name__ == '__main__':

    from models import ResNet34
    from similarities import CosineSimilarity
    from feats import ImageFlickrFeatures

    imagenet_net = ResNet34()
    sketches_net = ResNet34()

    siamese_net = SiameseNetwork(sketches_net, imagenet_net)
    siamese_net.load_state_dict(torch.load("weights/best_SiameseNetwork_contrastive.pth"))

    s = torch.nn.CosineSimilarity()
    flickr_dataset = ImageFlickrFeatures("dbs/features.db")
    r = Ranker('/home/rudy/Documents/cc7221/tarea2/data/Flickr15K',
               image_dataset_features=flickr_dataset,
               feature_extractor=siamese_net,
               similarity_fn=s)
    rank = r.get_rank('data/Flickr15K/queries/1001.png')
예제 #11
0
import torchvision.transforms as transforms
import tqdm
from torch.utils.data import DataLoader

from config import Config
from models import Encoder, SiameseNetwork

batch_size = 8
threshold = 0.9

config = Config()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

encoder = Encoder(config)
siamese_network = SiameseNetwork(config)

encoder.load_state_dict(
    torch.load(config.saved_models_folder +
               '/encoder_epoch500_loss0.0009.pth'))
encoder.to(device)
encoder.eval()

siamese_network.load_state_dict(
    torch.load(config.saved_models_folder +
               '/siamese_network_epoch500_loss0.0009.pth'))
siamese_network.to(device)
siamese_network.eval()

transform = transforms.Compose(
    [transforms.Grayscale(num_output_channels=1),
예제 #12
0
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import tqdm
from torch.utils.data import DataLoader

from config import Config
from models import Encoder, SiameseNetwork

config = Config()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

encoder = Encoder(config)
siamese_network = SiameseNetwork(config)

if config.load_model:
    encoder.load_state_dict(
        torch.load(config.saved_models_folder + '/encoder_epoch25.pth'))
    siamese_network.load_state_dict(
        torch.load(config.saved_models_folder +
                   '/siamese_network_epoch25.pth'))

encoder.to(device)
encoder.train()

siamese_network.to(device)
siamese_network.train()

params = list(encoder.parameters()) + list(siamese_network.parameters())
예제 #13
0
    train, val = random_split(dataset, [n_train, n_val])
    train_loader = DataLoader(train, batch_size=args.batch_size)
    val_loader = DataLoader(val, batch_size=args.batch_size)
    print("[+] Dataset initialized successfully")

    # load backbones
    print("[*] Initializing weights...")
    imagenet_net = ResNet34()
    sketches_net = ResNet34()
    # sketches_net.load_state_dict(torch.load(args.sketches_backbone_weights))
    print("[+] Weights loaded")

    print("[*] Adapting output layers...")

    print("[*] Initializing model, loss and optimizer")
    siamese_net = SiameseNetwork(sketches_net, imagenet_net)
    siamese_net.to(args.device)
    optimizer = torch.optim.Adam(siamese_net.parameters(), lr=args.lr)
    triplet_loss = triplet_loss()
    cross_entropy_loss = torch.nn.CrossEntropyLoss()
    print("[+] Model, loss and optimizer were initialized successfully")

    if not args.debug:
        wandb.init(project='homework1-cc7221', entity='p137')
        config = wandb.config
        config.model = siamese_net.__class__.__name__ + "_triplet"
        config.device = device
        config.batch_size = args.batch_size
        config.epochs = args.epochs
        config.learning_rate = args.lr
예제 #14
0
    # 5) eval
    if args.eval_mode:
        # +) eval dataset
        print('========== eval dataset ==========')
        eval_dataset = data_utils.Dataset(track=args.track,
                                          data='eval',
                                          size=args.input_size,
                                          feature=args.feature,
                                          tag=args.data_tag)
        eval_loader = DataLoader(eval_dataset,
                                 batch_size=args.eval_batch_size,
                                 shuffle=False,
                                 num_workers=8)
        # +) load model
        print('========== eval process ==========')
        model = SiameseNetwork(args.embedding_size).to(device)
        eval_checkpoint_path = '{}/epoch_{}.pth'.format(
            model_save_path, str(args.eval_num_checkpoint))
        model.load_state_dict(torch.load(eval_checkpoint_path))
        print('model loaded from ', eval_checkpoint_path)
        # +) eval
        eval_output_path = '{}/{}.result'.format(model_save_path,
                                                 str(args.eval_num_checkpoint))
        evaluate(eval_dataset, eval_loader, model, device, eval_output_path)
        print('eval output saved to ', eval_output_path)

    # 6) train & dev
    else:
        # +) dev dataset
        print('========== dev dataset ==========')
        dev_dataset = data_utils.Dataset(track=args.track,
import torchvision.transforms as transforms
import tqdm
from torch.utils.data import DataLoader

from config import Config
from models import AutoEncoder, SiameseNetwork

batch_size = 100
threshold = 0.7

config = Config()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

autoencoder = AutoEncoder(config)
siamese_network = SiameseNetwork(config)

autoencoder_file = '/autoencoder_epoch225_loss0.7295.pth'
siamese_file = '/siamese_network_epoch225_loss0.7295.pth'

autoencoder.load_state_dict(
    torch.load(config.saved_models_folder + autoencoder_file))
autoencoder.to(device)
autoencoder.train()

siamese_network.load_state_dict(
    torch.load(config.saved_models_folder + siamese_file))
siamese_network.to(device)
siamese_network.train()

transform = transforms.Compose(
예제 #16
0
n_epochs = args.n_epochs
batches = args.batchs
batch_size = args.batch_size

net = args.net
dataset = args.dataset
data_root = args.data_root

save_dir = 'outputs/{}-way-{}-shot_{}_{}/segment'.format(class_per_set,
                                                         sample_per_class,
                                                         net,
                                                         dataset)
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

m = SiameseNetwork(network_type=args.net).cuda()
m.load_state_dict(torch.load(args.resume))
CNN_net = m._modules['net']

dice_all1 = np.zeros((22,2))
dice_all2 = np.zeros((22,2))
for i in range(0, 22):
    print("CLASS  --  ", i)
    vols = np.load(os.path.join(data_root, 'testtom_vol_{}.npy'.format(str(i).zfill(2))))
    segs = np.load(os.path.join(data_root, 'testtom_seg_{}.npy'.format(str(i).zfill(2))))
    vols = preprocess_image(vols)

    dice_c1 = np.zeros((segs.shape[0], 1))
    dice_c2 = np.zeros((segs.shape[0], 1))
    for j in range(segs.shape[0]):
        vol = vols[:,:,j,:,:,:]
예제 #17
0
                                      num_output_classes=num_output_classes,
                                      hidden_size=args.lstm_hidden_dim,
                                      attention_kernel_size=args.encoder_output_size,
                                      use_bias=True,
                                      num_att_layers=2,
                                      embedding_matrix=embedding_matrix)

    model_2 = StackedAttentionNetwork(desc_input_shape=(args.batch_size, 102),
                                      img_input_shape=(args.batch_size, 512, 14, 14),
                                      num_output_classes=2,
                                      hidden_size=args.lstm_hidden_dim,
                                      attention_kernel_size=args.encoder_output_size,
                                      use_bias=True,
                                      num_att_layers=2,
                                      embedding_matrix=embedding_matrix)

siamese_model = SiameseNetwork(item_1_model=model_1, item_2_model=model_2, encoder_output_size=args.encoder_output_size,
                               fc1_size=args.fc1_size, fc2_size=args.fc2_size, use_bias=True)

experiment = ExperimentBuilder(network_model=siamese_model,
                               experiment_name=args.experiment_name,
                               num_epochs=args.num_epochs,
                               learning_rate=args.lr,
                               weight_decay_coefficient=args.weight_decay_coefficient,
                               continue_from_epoch=args.continue_from_epoch,
                               device=device,
                               train_data=training_data_loader,
                               val_data=valid_data_loader,
                               test_data=test_data_loader)  # build an experiment object
experiment_metrics, test_metrics = experiment.run_experiment()  # run experiment and return experiment metrics
예제 #18
0
dataset = args.dataset
data_root = args.data_root

if not os.path.exists('outputs/{}-way-{}-shot_{}_{}'.format(class_per_set,
                                                            sample_per_class,
                                                            net,
                                                            dataset)):
    os.makedirs('outputs/{}-way-{}-shot_{}_{}'.format(class_per_set,
                                                      sample_per_class,
                                                      net,
                                                      dataset))

data = SubtomogramNShotDataset(data_root=data_root,
                               batch_size=batch_size, class_per_set=class_per_set, sample_per_class=sample_per_class)

model = SiameseNetwork(network_type=args.net).cuda()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)

# training phase
continue_tarining = False
if continue_tarining:
    pre_model = ''
    print("Using Model ", pre_model, " to continue training!")
    model.load_state_dict(torch.load(pre_model))

print("====================Start Training:====================")
print("===================={}-Way-{}-Shot-{}-{} Learning====================".format(class_per_set,
                                                                                     sample_per_class,
                                                                                     net,
                                                                                     dataset))