Exemple #1
0
 def generate(self, dataset_name=['ADNI', 'NACC', 'AIBL'], epoch=None):
     if epoch:
         self.netG.load_state_dict(
             torch.load('{}G_{}.pth'.format(self.checkpoint_dir, epoch)))
     else:
         self.netG.load_state_dict(
             torch.load('{}G_{}.pth'.format(self.checkpoint_dir,
                                            self.optimal_epoch)))
     sources = [
         "/data/datasets/ADNI_NoBack/", "/data/datasets/NACC_NoBack/",
         "/data/datasets/AIBL_NoBack/"
     ]
     targets = ["./ADNIP_NoBack/", "./NACCP_NoBack/", "./AIBLP_NoBack/"]
     data = []
     if 'ADNI' in dataset_name:
         data += [
             Data(sources[0],
                  class1='ADNI_1.5T_NL',
                  class2='ADNI_1.5T_AD',
                  stage='all',
                  shuffle=False)
         ]
     if 'NACC' in dataset_name:
         data += [
             Data(sources[1],
                  class1='NACC_1.5T_NL',
                  class2='NACC_1.5T_AD',
                  stage='all',
                  shuffle=False)
         ]
     if 'AIBL' in dataset_name:
         data += [
             Data(sources[2],
                  class1='AIBL_1.5T_NL',
                  class2='AIBL_1.5T_AD',
                  stage='all',
                  shuffle=False)
         ]
     dataloaders = [
         DataLoader(d, batch_size=1, shuffle=False) for d in data
     ]
     Data_lists = [d.Data_list for d in data]
     with torch.no_grad():
         self.netG.train(False)
         for i in range(len(dataloaders)):
             dataloader = dataloaders[i]
             target = targets[i]
             Data_list = Data_lists[i]
             for j, (input, label) in enumerate(dataloader):
                 output = input.cuda() + self.netG(input.cuda())
                 if not os.path.isdir(target):
                     os.mkdir(target)
                 np.save(target + Data_list[j],
                         output.data.cpu().numpy().squeeze())
Exemple #2
0
def eval_cnns(cnn1, cnn2):
    data  = []
    names = ['ADNI', 'NACC', 'FHS ', 'AIBL']

    sources = ["/data/datasets/ADNI_NoBack/", "/data/datasets/NACC_NoBack/", "/data/datasets/FHS_NoBack/", "/data/datasets/AIBL_NoBack/"]
    data += [Data(sources[0], class1='ADNI_1.5T_NL', class2='ADNI_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(sources[1], class1='NACC_1.5T_NL', class2='NACC_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(sources[2], class1='FHS_1.5T_NL', class2='FHS_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(sources[3], class1='AIBL_1.5T_NL', class2='AIBL_1.5T_AD', stage='all', shuffle=False)]
    dataloaders = [DataLoader(d, batch_size=1, shuffle=False) for d in data]

    data = []
    targets = ["/data/datasets/ADNIP_NoBack/", "/data/datasets/NACCP_NoBack/", "/data/datasets/FHSP_NoBack/", "/data/datasets/AIBLP_NoBack/"]
    data += [Data(targets[0], class1='ADNI_1.5T_NL', class2='ADNI_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(targets[1], class1='NACC_1.5T_NL', class2='NACC_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(targets[2], class1='FHS_1.5T_NL', class2='FHS_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(targets[3], class1='AIBL_1.5T_NL', class2='AIBL_1.5T_AD', stage='all', shuffle=False)]
    dataloaders_p = [DataLoader(d, batch_size=1, shuffle=False) for d in data]

    print('testing now!')

    accs_ori = []
    accs_gen = []

    with torch.no_grad():
        cnn1.model.train(False)
        cnn2.model.train(False)

        for i in range(len(names)):
            name = names[i]
            dataloader = dataloaders[i]
            dataloader_p = dataloaders_p[i]

            preds1 = []
            labels = []
            for i, (input, label) in enumerate(dataloader):
                input = input.cuda()
                pred = cnn1.model(input)
                preds1.append(torch.argmax(pred).item())
                labels.append(label.item())
            acc_ori = accuracy_score(labels, preds1)
            accs_ori += [acc_ori]
            print('1.5  accu on', name, 'is:', acc_ori)

            preds2 = []
            labels = []
            for i, (input, label) in enumerate(dataloader_p):
                input = input.cuda()
                pred = cnn2.model(input)
                preds2.append(torch.argmax(pred).item())
                labels.append(label.item())
            acc_gen = accuracy_score(labels, preds2)
            accs_gen += [acc_gen]
            print('1.5+ accu on', name, 'is:', acc_gen)
    return accs_ori, accs_gen
Exemple #3
0
def load_model(filename, device, model_name="FABFM", seed=1234):
    if os.path.isfile(filename):
        from dataloader import Data
        # Load file
        checkpoint = torch.load(filename)

        # Load dataset setting
        neg = checkpoint["neg"]
        ds = Data(root_dir="../data/ta_feng/")
        train, test, _ = ds.get_data(neg=neg)
        n_usr = len(ds.usrset)
        n_itm = len(ds.itemset)

        # Load network
        model_name = checkpoint["name"]
        optimizer = checkpoint["optimizer"]
        k = checkpoint["k"]
        gamma = checkpoint["gamma"]
        alpha = checkpoint["alpha"]
        epoch = checkpoint["epoch"]
        for _ in range(epoch + 1):
            random.seed(seed)
            seed = random.randint(0, 9999)

        if model_name == "FABFM":
            d = checkpoint["d"]
            h = checkpoint["h"]
            from models.fixed_abfm import FABFM
            model = FABFM(n_usr, n_itm, k, d, h, gamma,
                          alpha).to(device=device)
        elif model_name == "ABFM":
            from models.abfm import ABFM
            model = ABFM(n_usr, n_itm, k, gamma, alpha).to(device=device)
        elif model_name == "BFM":
            from models.bfm import BFM
            norm = checkpoint["norm"]
            model = BFM(n_usr, n_itm, k, gamma, alpha).to(device=device)
        model.load_state_dict(checkpoint["state_dict"])

        return model, train, test, ds.n_train, ds.n_test, n_usr, n_itm, seed
    else:
        print(f"There is not {filename}")
        return None
Exemple #4
0
def main():
    import csv

    from dataloader import Data
    from models.bfm import BFM
    from models.abfm import ABFM

    # model paths
    with open("./path") as f:
        paths = [row[0] for row in csv.reader(f, delimiter="\n")]
    # paths = [paths[15]]

    ds = Data(root_dir="./data/ta_feng/")
    itemset = ds.itemset

    #load network
    n_usr = len(ds.usrset)
    n_itm = len(ds.itemset)
    k = 32

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = ABFM(n_usr, n_itm, k).to(device=device)
    # model = BFM(n_usr, n_itm, k).to(device=device)

    for path in paths:
        # reset test dataset
        _, test, _ = ds.get_data()
        n_test = ds.n_test
        print(f"{path:-^60}")
        model.load_state_dict(torch.load(path))

        result, r_result, diversity = evaluate(test, n_test, model, n_usr,
                                               n_itm, device)
        # result = r_at_n(test, n_test, model, n, m)
        print(f"HLU       : {result}")
        print(f"R@10      : {r_result}")
        print(f"Diversity : {diversity}")
        print("{:-^60}".format(""))
def train_val(args):
    model = build_model(args)

    batch_size = 16
    data_dir = 'data'
    data_folders = ['udacity', '1', '2', '3', '4', '5', '6']
    model_dir = 'models'
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    data_loader = Data(data_dir, data_folders, batch_size=batch_size)
    train_data, val_data = data_loader.getData()
    train_gen, val_gen = data_loader.getGenerators()

    model.compile(loss='mse', optimizer='adam')
    model.fit_generator(train_gen,
                        steps_per_epoch=ceil(len(train_data) / batch_size),
                        validation_data=val_gen,
                        validation_steps=ceil(len(val_data) / batch_size),
                        epochs=4,
                        verbose=1)

    model.save(os.path.join(model_dir, 'model.h5'))
Exemple #6
0
    def __init__(self, hidden_size, cond_embed_size, output_size, target_path,
                 criterion, epoch, train_or_not, lr, input_embed_size,
                 teacher_forcing_ratio, ratio_kind):
        # initialize variable
        self.hidden_size = hidden_size
        self.cond_embed_size = cond_embed_size
        self.output_size = output_size
        self.target_path = target_path
        self.criterion = criterion
        self.train_or_not = train_or_not
        self.epoch = epoch
        self.learning_rate = lr
        self.teacher_forcing_ratio = teacher_forcing_ratio
        self.input_embed_size = input_embed_size
        self.ratio_kind = ratio_kind
        filename = self.get_bleuname()
        self.weight_name = 'CVAE_' + filename.replace('.csv', '') + '.pt'

        # initialize using class
        self.C2D = Char2Dict(cond_embed_size)
        self.DataLoader = Data(target_path)
        self.Encoder = EncoderRNN(input_embed_size, hidden_size,
                                  cond_embed_size).to(device)
        self.Decoder = DecoderRNN(input_embed_size, hidden_size,
                                  output_size).to(device)
        self.CVAE = CVAE(encoder=self.Encoder,
                         decoder=self.Decoder,
                         hidden_size=self.hidden_size,
                         cond_embed_size=self.cond_embed_size,
                         C2D=self.C2D,
                         Train=self.train_or_not,
                         output_size=self.output_size,
                         teacher_forcing_ratio=self.teacher_forcing_ratio,
                         input_embed_size=self.input_embed_size)
        self.CVAE_optimizer = optim.SGD(self.CVAE.parameters(),
                                        lr=self.learning_rate,
                                        momentum=0.9)
Exemple #7
0
def eval_iqa_validation(metrics=['piqe']):
    eng = matlab.engine.start_matlab()
    data = Data("./ADNIP_NoBack/",
                class1='ADNI_1.5T_NL',
                class2='ADNI_1.5T_AD',
                stage='valid',
                shuffle=False)
    dataloader = DataLoader(data, batch_size=1, shuffle=False)
    Data_list = data.Data_list
    for m in metrics:
        iqa_gens = []
        for j, (input_p, _) in enumerate(dataloader):
            input_p = input_p.squeeze().numpy()
            iqa_gens += [iqa_tensor(input_p, eng, Data_list[j], m, './iqa/')]
        print('Average ' + m + ' on ' + 'ADNI validation ' + ' is:')
        iqa_gens = np.asarray(iqa_gens)
        print('1.5* : ' + str(np.mean(iqa_gens)) + ' ' + str(np.std(iqa_gens)))
    eng.quit()
Exemple #8
0
def PRF_cnns(cnn1, cnn2):
    data  = []
    names = ['ADNI', 'NACC', 'FHS ', 'AIBL']

    sources = ["/data/datasets/ADNI_NoBack/", "/data/datasets/NACC_NoBack/", "/data/datasets/FHS_NoBack/", "/data/datasets/AIBL_NoBack/"]
    data += [Data(sources[0], class1='ADNI_1.5T_NL', class2='ADNI_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(sources[1], class1='NACC_1.5T_NL', class2='NACC_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(sources[2], class1='FHS_1.5T_NL', class2='FHS_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(sources[3], class1='AIBL_1.5T_NL', class2='AIBL_1.5T_AD', stage='all', shuffle=False)]
    dataloaders = [DataLoader(d, batch_size=1, shuffle=False) for d in data]

    data = []
    targets = ["/data/datasets/ADNIP_NoBack/", "/data/datasets/NACCP_NoBack/", "/data/datasets/FHSP_NoBack/", "/data/datasets/AIBLP_NoBack/"]
    data += [Data(targets[0], class1='ADNI_1.5T_NL', class2='ADNI_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(targets[1], class1='NACC_1.5T_NL', class2='NACC_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(targets[2], class1='FHS_1.5T_NL', class2='FHS_1.5T_AD', stage='all', shuffle=False)]
    data += [Data(targets[3], class1='AIBL_1.5T_NL', class2='AIBL_1.5T_AD', stage='all', shuffle=False)]
    dataloaders_p = [DataLoader(d, batch_size=1, shuffle=False) for d in data]

    print('testing now!')

    accs_ori = []
    accs_gen = []

    with torch.no_grad():
        cnn1.model.train(False)
        cnn2.model.train(False)

        ps_1 = []
        rs_1 = []
        fs_1 = []
        ps_2 = []
        rs_2 = []
        fs_2 = []
        for i in range(len(names)):
            name = names[i]
            dataloader = dataloaders[i]
            dataloader_p = dataloaders_p[i]

            preds1 = []
            labels = []
            for i, (input, label) in enumerate(dataloader):
                input = input.cuda()
                pred = cnn1.model(input)
                preds1.append(torch.argmax(pred).item())
                labels.append(label.item())
            p, r, f, _ = report(labels, preds1)
            ps_1 += [p]
            rs_1 += [r]
            fs_1 += [f]
            # '''
            print('1.5  PRF on', name, ':')
            print('\tprecision:', p)
            print('\trecall:', r)
            print('\tf score:', f)
            # '''

            preds2 = []
            labels = []
            for i, (input, label) in enumerate(dataloader_p):
                input = input.cuda()
                pred = cnn2.model(input)
                preds2.append(torch.argmax(pred).item())
                labels.append(label.item())
            p, r, f, _ = report(labels, preds2)
            ps_2 += [p]
            rs_2 += [r]
            fs_2 += [f]
            # '''
            print('1.5+ PRF on', name, ':')
            print('\tprecision:', p)
            print('\trecall:', r)
            print('\tf score:', f)
            # '''
    return ps_1, rs_1, fs_1, ps_2, rs_2, fs_2
import os
import numpy as np
import PIL
import cv2
import IPython.display
from IPython.display import clear_output
from datetime import datetime
from dataloader import Data, TestData

try:
    from keras_contrib.layers.normalization import InstanceNormalization
except Exception:
    from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization

# Initialize dataloader
data = Data()
test_data = Data()

# Saves Model in every N minutes
TIME_INTERVALS = 2
SHOW_SUMMARY = True

INPUT_SHAPE = (256, 256, 3)
EPOCHS = 2
BATCH = 1

# 25% i.e 64 width size will be mask from both side
MASK_PERCENTAGE = .25

EPSILON = 1e-9
ALPHA = 0.0004
Exemple #10
0
print("Import Train Data...")

img_size = 256  # try 128

training_folders = [
    "../../Data/Processed/train/epidural",
    "../../Data/Processed/train/intraparenchymal",
    "../../Data/Processed/train/subarachnoid",
    "../../Data/Processed/train/intraventricular",
    "../../Data/Processed/train/subdural",
]

train_data = Data(
    training_folders,
    maximum_per_folder=1,  #5000
    size=img_size,
    tl_model="alexnet",
    in_channels=3,
)

val_folders = [
    "../../Data/Processed/val/epidural",
    "../../Data/Processed/val/intraparenchymal",
    "../../Data/Processed/val/subarachnoid",
    "../../Data/Processed/val/intraventricular",
    "../../Data/Processed/val/subdural",
]

val_data = Data(
    val_folders,
    maximum_per_folder=1,  #1500 
Exemple #11
0
def main():
    import sys
    sys.path.append("../")
    import datetime
    from torch.utils.data import DataLoader

    from dataloader import Data

    seed=1234
    seed_everything(seed)

    ds = Data()
    train, test, valid = ds.get_data()

    """
    n: # users
    m: # items
    k: latent vec dim
    """
    n = len(ds.usrset)
    m = len(ds.itemset)
    k = 4

    gamma=[1,1,1,1]
    alpha=0.0
    norm=False

    # lr=0.0001
    lr=0.01
    momentum=0
    weight_decay=0.01

    epochs=21
    neg=2

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = BFM(n, m, k, gamma, alpha).to(device=device)

    # \alpha*||w||_2 is L2 reguralization
    # weight_decay option is for reguralization
    # weight_decay number is \alpha
    optimizer = optim.SGD(model.parameters(), \
                          lr=lr, \
                          momentum=momentum, \
                          weight_decay=weight_decay)
    # optimizer = optim.Adam(model.parameters(), \
    #                       lr=lr, \
    #                       weight_decay=weight_decay)
    criterion = nn.BCEWithLogitsLoss()

    # traced = torch.jit.script(model)
    # x = next(train)
    # x, label = x[0], x[1]
    # traced = torch.jit.trace(model, example_inputs=(x, label, torch.tensor([1.])))
    # print(traced.code)

    """
    cnt = 0
    for x in train:
        optimizer.zero_grad()
        x, label = x[0], x[1]
        delta = torch.tensor([[1.]])
        y = model(x, delta=delta, pmi=1)
        if label.item()==-1:
            loss = criterion(y, torch.tensor([[0.]]))
        else:
            loss = criterion(y, label)
        loss.backward()
        optimizer.step()
        if cnt%2500==0:
            print(f"Loss : {loss:.6f} at {cnt:6d}, " \
                  f"Label : {label.item():2.0f},   " \
                  f"# basket item : {x.sum().item()-2:3.0f}")
        cnt+=1
    print(cnt) # => 957264
    os.makedirs("../trained", exist_ok=True)
    torch.save(model.state_dict(), "../trained/BFM_alldelta1.pt")
    """

    # Saved directory
    today = datetime.date.today()
    c_time = datetime.datetime.now().strftime("%H-%M-%S")
    save_dir = f"../trained/bfm/{today}/{c_time}"
    os.makedirs(save_dir, exist_ok=True)
    model_name = "BFM"

    # Load trained parameters
    loaded = False
    if loaded:
        model_path = "../trained/2019-11-08/BFM_4.pt"
        model.load_state_dict(torch.load(model_path))
        epochs = 5


    # Print Information
    print("{:-^60}".format("Data stat"))
    print(f"# User        : {n}\n" \
          f"# Item        : {m}\n" \
          f"Neg sample    : {neg}")
    print("{:-^60}".format("Optim status"))
    print(f"Optimizer     : {optimizer}\n" \
          f"Criterion     : {criterion}\n" \
          f"Learning rate : {lr}\n" \
          f"Momentum      : {momentum}\n" \
          f"Weight decay  : {weight_decay}")
    print("{:-^60}".format("Model/Learning status"))
    print(f"Model name    : {model_name}\n" \
          f"Mid dim       : {k}\n" \
          f"Gamma         : {gamma}\n" \
          f"Alpha         : {alpha}\n" \
          f"Normalize     : {norm}\n" \
          f"Epochs        : {epochs}\n" \
          f"Loaded        : {loaded}")
    if loaded:
        print(f"Learned model : {model_path}")
    print("{:-^60}".format("Description"))
    print("Changed random seed to get train data\n"\
         "Use double type for all layers.")
    print("{:-^60}".format("Path"))
    print(f"{save_dir}")
    print("{:-^60}".format(""), flush=True)


    for e in range(epochs):
        cnt = 0
        ave_loss = 0
        random.seed(seed)
        seed = random.randint(0, 9999)
        print("{:-^60}".format(f"epoch {e}, seed={seed}"))

        train, _, _ = ds.get_data(neg=neg, seed=seed)
        for x in train:
            optimizer.zero_grad()
            x, label = x[0].to(device), x[1].to(device)
            loss = model(x, delta=label, pmi=1)
            if label==-1:
                loss = criterion(loss, label+1)
            else:
                loss = criterion(loss, label)

            loss.backward()
            optimizer.step()

            with torch.no_grad():
                ave_loss += loss.item()
            cnt+=1
            if cnt%2500==0:
                print(f"Last loss : {loss.item():>10.6f} at {cnt:6d}, " \
                      f"Label : {label.item():2.0f},   " \
                      f"# basket item : {x.sum().item()-2:3.0f},   " \
                      f"Average loss so far : {ave_loss/cnt:>9.6f}", flush=True)
        # torch.save(model.state_dict(), f"{save_dir}/{model_name}_{e}.pt")
        # Better way to save model?
        state = {"name": model_name, "epoch": e, "state_dict": model.state_dict(),
                 "neg":neg, "optimizer": optimizer.state_dict(),
                 "k": k, "gamma": gamma, "alpha": alpha, "norm": norm}
        torch.save(state, f"{save_dir}/{model_name}_{e}.pt")

        print("{:-^60}".format("end"))
Exemple #12
0
def main():
    import sys
    sys.path.append("../")
    import datetime
    from torch.utils.data import DataLoader

    from dataloader import Data

    seed = 1234
    seed_everything(seed)

    ds = Data()
    _, _, _ = ds.get_data()
    """
    n: # users
    m: # items
    k: latent vec dim
    """
    n_usr = len(ds.usrset)
    n_itm = len(ds.itemset)
    k = 16

    gamma = [1, 1, 0, 1]
    alpha = 0.0
    d = k
    h = 2
    norm = False

    lr = 0.0001
    momentum = 0
    # weight_decay=1e-5
    weight_decay = 0

    # epochs=21
    epochs = 100
    neg = 2

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = FABFM(n_usr, n_itm, k, d, h, gamma, alpha, norm).to(device=device)

    # \alpha*||w||_2 is L2 reguralization
    # weight_decay option is for reguralization
    # weight_decay number is \alpha
    # optimizer = optim.SGD(model.parameters(), \
    #                       lr=lr, \
    #                       momentum=momentum, \
    #                       weight_decay=weight_decay)
    optimizer = optim.Adam(model.parameters(), \
                          lr=lr, \
                          weight_decay=weight_decay)
    criterion = nn.BCEWithLogitsLoss()

    # Saved directory
    today = datetime.date.today()
    c_time = datetime.datetime.now().strftime("%H-%M-%S")
    save_dir = f"../trained/fixed_abfm/{today}/{c_time}"
    os.makedirs(save_dir, exist_ok=True)
    model_name = "FABFM"

    # Load trained parameters
    loaded = False
    if loaded:
        model_path = ""
        model, train, test, l_train, l_test, n_usr, n_itm, seed = load_model(
            model_path, device, model_name="FABFM", seed=seed)
        epochs = 101

    # Print Information
    print("{:-^60}".format("Data stat"))
    print(f"# User        : {n_usr}\n" \
          f"# Item        : {n_itm}\n" \
          f"Neg sample    : {neg}")
    print("{:-^60}".format("Optim status"))
    print(f"Optimizer     : {optimizer}\n" \
          f"Criterion     : {criterion}\n" \
          f"Learning rate : {lr}\n" \
          f"Momentum      : {momentum}\n" \
          f"Weight decay  : {weight_decay}")
    print("{:-^60}".format("Model/Learning status"))
    print(f"Model name    : {model_name}\n" \
          f"Mid dim k     : {k}\n" \
          f"Q,K,M dim d   : {d}\n" \
          f"Head          : {h}\n" \
          f"Gamma         : {gamma}\n" \
          f"Alpha         : {alpha}\n" \
          f"Epochs        : {epochs}\n" \
          f"Norm          : {norm}\n" \
          f"Loaded        : {loaded}")
    if loaded:
        print(f"Learned model : {model_path}")
    print("{:-^60}".format("Description"))
    print("Addition and normalization with h*n_b.\n"\
          "Attention is applied to only t_b and u_b.\n"\
          "Use same vector for target and basket.\n"\
          "Changed random seed to get train data.\n"\
          "Add layer normalization after attn_V*O\n"\
          # "Change L2 norm coefficient to 1e-5(0.00001).\n"\
          "no basket interaction\n"\
          # "Normalize basket items interaction.\n"\
          "Without L2 norm.\n"\
          # "Pos:Neg=1:1\n"\
          "Use double type for all layers.")
    print("{:-^60}".format("Path"))
    print(f"{save_dir}")
    print("{:-^60}".format(""), flush=True)

    for e in range(epochs):
        cnt = 0
        ave_loss = 0
        random.seed(seed)
        seed = random.randint(0, 9999)
        print("{:-^60}".format(f"epoch {e}, seed={seed}"))

        train, _, _ = ds.get_data(neg=neg, seed=seed)
        for x in train:
            optimizer.zero_grad()
            x, label = x[0].to(device).double(), x[1].to(device).double()
            loss = model(x, delta=label, pmi=1)
            if label == -1:
                loss = criterion(loss, label + 1)
            else:
                loss = criterion(loss, label)
            loss.backward()
            optimizer.step()

            with torch.no_grad():
                ave_loss += loss.item()
            cnt += 1
            if cnt % 2500 == 0:
                print(f"Last loss : {loss.item():>10.6f} at {cnt:6d}, " \
                      f"Label : {label.item():2.0f},   " \
                      f"# basket item : {x.sum().item()-2:3.0f},   " \
                      f"Average loss so far : {ave_loss/cnt:>9.6f}", flush=True)
        # print(cnt) # => 957264
        # torch.save(model.state_dict(), f"{save_dir}/{model_name}_{e}.pt")
        # Better way to save model?
        state = {
            "name": model_name,
            "epoch": e,
            "state_dict": model.state_dict(),
            "neg": neg,
            "optimizer": optimizer.state_dict(),
            "k": k,
            "d": d,
            "h": h,
            "gamma": gamma,
            "alpha": alpha,
            "norm": norm
        }
        torch.save(state, f"{save_dir}/{model_name}_{e}.pt")

        print("{:-^60}".format("end"))
Exemple #13
0
print("Import Train Data...")

training_folders = [
    "../../Data/Processed/train/epidural",
    "../../Data/Processed/train/intraparenchymal",
    "../../Data/Processed/train/subarachnoid",
    "../../Data/Processed/train/intraventricular",
    "../../Data/Processed/train/subdural",
    "../../Data/Processed/train/nohem",
]

train_data = Data(training_folders, {
    "epidural": "any",
    "intraparenchymal": "any",
    "subarachnoid": "any",
    "intraventricular": "any",
    "subdural": "any",
},
                  maximum_per_folder=500,
                  multi_pool=False,
                  size=256)

print("Amound of train data being used:", len(train_data))

model = AlexNetDetector1(256).cuda()
model.name = "shrey_test"

print(len(train_data.label_dict))

print("Starting training")
train(model, train_data, batch_size=32, learning_rate=0.0001, use_cuda=True)
Exemple #14
0
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from dataloader import Data
import tqdm
import os
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt

test_data = Data("path/to/your/dataset/folder/", train=False)
test_data = DataLoader(test_data, batch_size=16, shuffle=True, num_workers=8)

model_path = "path/to/your/model.pth"
model = torch.load(model_path)


class FeatureExtracter(nn.Module):
    """
    Extract the last fc layer features of the FusionBiGradNet
    In this section, we run the model without the classifier
    and to save the output features of the network
    """
    def __init__(self, model):
        super(FeatureExtracter, self).__init__()
        if isinstance(model, torch.nn.DataParallel):
            model = model.module
        # remove the last FC layer
        self.features = nn.Sequential(*list(model.children())[:-2])

    def forward(self, x):
Exemple #15
0
#! /usr/bin/env python
#-*- coding:UTF-8 -*-

from torch.utils.data import dataset, DataLoader
from dataloader import Data
from torchvision import transforms
# created python file
from args import get_parser

#============================================
myparser = get_parser()
opts = myparser.parse_args()
#===========================================
train_transforms = transforms.Compose([
    transforms.Scale(
        256),  # rescale the image keeping the original aspect ratio
    transforms.CenterCrop(256),  # we get only the center of that rescaled
    transforms.RandomCrop(224),  # random crop within the center crop 
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])
traindata = Data(opts.train_list_str, None, None, train_transforms, None, None,
                 opts.data_path, opts.num_classes)

testdata = traindata[1]
print(testdata)
Exemple #16
0
class Autoencoder:
    def __init__(self, hidden_size, cond_embed_size, output_size, target_path,
                 criterion, epoch, train_or_not, lr, input_embed_size,
                 teacher_forcing_ratio, ratio_kind):
        # initialize variable
        self.hidden_size = hidden_size
        self.cond_embed_size = cond_embed_size
        self.output_size = output_size
        self.target_path = target_path
        self.criterion = criterion
        self.train_or_not = train_or_not
        self.epoch = epoch
        self.learning_rate = lr
        self.teacher_forcing_ratio = teacher_forcing_ratio
        self.input_embed_size = input_embed_size
        self.ratio_kind = ratio_kind
        filename = self.get_bleuname()
        self.weight_name = 'CVAE_' + filename.replace('.csv', '') + '.pt'

        # initialize using class
        self.C2D = Char2Dict(cond_embed_size)
        self.DataLoader = Data(target_path)
        self.Encoder = EncoderRNN(input_embed_size, hidden_size,
                                  cond_embed_size).to(device)
        self.Decoder = DecoderRNN(input_embed_size, hidden_size,
                                  output_size).to(device)
        self.CVAE = CVAE(encoder=self.Encoder,
                         decoder=self.Decoder,
                         hidden_size=self.hidden_size,
                         cond_embed_size=self.cond_embed_size,
                         C2D=self.C2D,
                         Train=self.train_or_not,
                         output_size=self.output_size,
                         teacher_forcing_ratio=self.teacher_forcing_ratio,
                         input_embed_size=self.input_embed_size)
        self.CVAE_optimizer = optim.SGD(self.CVAE.parameters(),
                                        lr=self.learning_rate,
                                        momentum=0.9)

    def loss_sum(self, loss, Encoder_output, ep):
        mu = self.CVAE.linearmu(Encoder_output.cpu()).to(device)
        logvar = self.CVAE.linearlogvar(Encoder_output.cpu()).to(device)
        KLD = (-0.5) * torch.sum(1 + 2 * logvar - mu.pow(2) -
                                 logvar.exp().pow(2))
        # normal
        if self.ratio_kind == 0:
            return loss + KLD
    # cyckicak
        elif self.ratio_kind == 1:
            if ep % 5 == 0:
                return loss
            elif ep % 5 <= 3:
                return loss + (ep / 3) * KLD
            elif ep % 5 > 3:
                return loss + KLD

    # monotonic
        elif self.ratio_kind == 2:
            if ep <= 5:
                return loss + (ep / 5) * KLD
            else:
                return loss + KLD
        else:
            return loss + 0.002 * KLD

    def get_bleuname(self):
        # normal
        if self.ratio_kind == 0:
            return 'normal_BLEU.csv'
    # cyckicak
        elif self.ratio_kind == 1:
            return 'cyckicak_BLEU.csv'

    # monotonic
        elif self.ratio_kind == 2:
            return 'monotonic_BLEU.csv'
        else:
            return 'CVAE_train.csv'

    def get_train_set(self, c):
        present, third_person, present_progressive, simple_past = self.DataLoader.seperate_tense(
        )
        if c == 0:
            data = present
        elif c == 1:
            data = third_person
        elif c == 2:
            data = present_progressive
        else:
            data = simple_past

        return data

    def writefile(self):
        filename = self.get_bleuname()
        write_list = self.train()
        with open(filename, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile)
            for i in range(len(write_list)):
                writer.writerow(write_list[i])


# Training

    def train(self):
        write_list = []
        Belu = Autoencoder(hidden_size=256,
                           cond_embed_size=8,
                           output_size=28,
                           target_path='./lab3/test_data.txt',
                           criterion=nn.CrossEntropyLoss(),
                           epoch=30,
                           train_or_not=False,
                           lr=0.001,
                           input_embed_size=64,
                           teacher_forcing_ratio=1,
                           ratio_kind=self.ratio_kind)

        self.CVAE.train()

        for ep in range(self.epoch):
            overall_loss = 0
            acc = 0
            # Training with different tense
            for c in range(4):
                data = self.get_train_set(c)
                for i in range(len(data)):

                    CVAE_case = data[i]
                    self.CVAE_optimizer.zero_grad()

                    # encode & decode
                    predict_word, decoded_word, pred_word, Encoder_output = self.CVAE(
                        CVAE_case)

                    loss = self.criterion(pred_word, decoded_word)
                    total_loss = self.loss_sum(loss, Encoder_output, ep)
                    (total_loss).backward()
                    overall_loss += total_loss

                    self.CVAE_optimizer.step()
                    if predict_word == CVAE_case[1]:
                        acc += 1

            total_len = (len(self.get_train_set(0)) +
                         len(self.get_train_set(1)) +
                         len(self.get_train_set(2)) +
                         len(self.get_train_set(3)))
            msg = '# Epoch : {}, Avg_loss = {}'.format(
                ep + 1, overall_loss / total_len)
            print(msg)
            acc_msg = 'Accuracy : {:.3f}%'.format(acc / total_len * 100)
            print(acc_msg)
            torch.save(self.CVAE.state_dict(), self.weight_name)
            score = Belu.test()

            write_list.append([ep, score])

        return write_list

    def test(self):
        self.CVAE.load_state_dict(torch.load(self.weight_name))
        self.CVAE.eval()
        acc = 0
        bleu_score = 0
        test_set = self.DataLoader.read_test_file()
        for i in range(len(test_set)):
            CVAE_case = test_set[i]
            predict_word = self.CVAE(CVAE_case)

            print('Given_word : {}'.format(CVAE_case[0]))
            print('Expected_word : {}'.format(CVAE_case[1]))
            print('Predict_word : {}'.format(predict_word))
            print(' ')

            if CVAE_case[1] == predict_word:
                acc += 1
            bleu_score += self.CVAE.compute_bleu(predict_word, CVAE_case[1])
        print('Test accuracy : {} %'.format(acc / len(test_set) * 100))
        print('Test_Avg belu score : ', bleu_score / len(test_set))
        print()
        return bleu_score

    def generate(self):
        self.CVAE.load_state_dict(torch.load('CVAE.pt'))
        self.CVAE.eval()
        for i in range(10):
            noise = torch.randn(1, 1, self.hidden_size).to(device)
            test_case1, test_case2, test_case3, test_case4 = self.CVAE.Decode_test(
                noise,
                0), self.CVAE.Decode_test(noise, 1), self.CVAE.Decode_test(
                    noise, 2), self.CVAE.Decode_test(noise, 3)
            print('# Case {}: [{}, {}, {}, {}]'.format(i + 1, test_case1,
                                                       test_case2, test_case3,
                                                       test_case4))
Exemple #17
0
def main():
    # Alexnet3
    # Threshold
    # All SubModels
    # Max is label
    # Compare with test label, can be 0
    # Load our test data
    
    """ FINAL MODEL PATHS HERE """
    # Detector
    detect_path = r"../Models/yeet/detect_alex3_sig, imgs=32k, bs=32, epoch=20, lr=0.0001/19_epoch.pt" 
    #detect_path = r"../Models/yeet/detect_alex3_sig, imgs=32k, bs=512, epoch=30, lr=0.001/29_epoch.pt"
    model_detect = torch.load(detect_path).cuda().eval()
    # Following are hem type models
    # TODO CONFIRM PATH
    # TODO THIS MODEL NAME IS WEIRD
    subdural_path = r"../Models/yeet/alexSubdural_sig,imgs=32k,bs=512,epochs=30,lr=0.01,d0.4/29_epoch.pt" 
    model_subdural = torch.load(subdural_path).cuda().eval()

    intrav_path = r"../Models/yeet/alexIntrav_sig,imgs=32k,bs=32,epochs=30,lr=0.0001,d0.4/29_epoch.pt" 
    model_intrav = torch.load(intrav_path).cuda().eval()

    subara_path = r"../Models/yeet/alexSubara_sig,imgs=32k,bs=512,epochs=30,lr=0.01,d0.4/29_epoch.pt" 
    model_subara = torch.load(subara_path).cuda().eval()

    intrap_path = r"../Models/yeet/alexIntrap_sig,imgs=32k,bs=32,epochs=30,lr=0.01,d0.4/29_epoch.pt" 
    model_intrap = torch.load(intrap_path).cuda().eval()
    hem_types = { 
            model_subdural : "subdural", 
            model_intrav : "intraventricular", 
            model_subara : "subarachnoid", 
            model_intrap : "intraparenchymal"
    }
    """ FINAL MODEL PATHS """

    test = [
        #"../../Data/Processed/train/epidural",
        "../../Data/Processed/val/intraparenchymal",
        "../../Data/Processed/val/subarachnoid",
        "../../Data/Processed/val/intraventricular",
        "../../Data/Processed/val/subdural",
        #"../../Data/Processed/val/nohem",    
    ]
    # Do not replace any label
    test_data = Data(test, maximum_per_folder = 50,  tl_model = "alexnet", in_channels=3)
    # Batch size of 1 to simplify
    test_data_loader = torch.utils.data.DataLoader(test_data, batch_size=1)
    threshold = 0.5
    # Iterate through test_data
    correct = 0
    total = 0
    for img, label in test_data_loader:
        #To Enable GPU Usage
        img = img.cuda()
        label = label.cuda()
        #############################################
        hem_detected = float(model_detect(img))
        print("Model:", hem_detected)
        if hem_detected >= threshold:
            predictions = {}
            for model, pred_label in hem_types.items():
                fwd_pass = float(model(img))

                predictions[pred_label] = float(fwd_pass)
                #predictions[pred_label] = float(torch.nn.functional.sigmoid(fwd_pass))
            
            # Get probability of Epidural
            #epidural_prob = 0.0
            #for _, prob in predictions.items():
             #   epidural_prob += prob
            #predictions["epidural"] = 1 - epidural_prob
            
            # Get maxiumum probability
            print(predictions)
            predicted_label = max(predictions, key=predictions.get)

        else:
            predicted_label = "nohem"
        
        print("Predicted {0} when it was {1}".format( predicted_label,test_data._label_dict[float(label)]))
        print("-------------------------------")
        if predicted_label == test_data._label_dict[float(label)]: 
            correct += 1
        total += 1
    
    print("Test Accuracy is : " + str(correct/total))
Exemple #18
0
                        help="dropout rate")
    parser.add_argument('--padding',
                        type=int,
                        default=50,
                        help="padding length")
    parser.add_argument('--model_path', type=str, \
                        default='../data/model/lr:0.001-batch_size:128-epochs:10-embedding_dim:256-head_num:8-bleu:0.17267120509754869-date:2020-12-06-01-02-translate_params.pkl', help="model path")
    args = parser.parse_args()

    # 指定device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 设置运行在哪张gpu上面
    torch.cuda.set_device(args.gpu)

    # 准备数据读取数据
    trainData = Data(padding=args.padding, mode='train')
    validData = Data(padding=args.padding, mode='valid')
    testData = Data(padding=args.padding, mode='test')

    # 准备dataloader, 使用tqdm封装打印进度条, 方便查看运行进度
    trainDataLoader = DataLoader(trainData,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=32,
                                 drop_last=True)
    validDataLoader = DataLoader(validData,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=32,
                                 drop_last=True)
    testDataLoader = DataLoader(testData,
Exemple #19
0
print("Import Train Data...")

img_size = 256

training_folders = [
    "../../Data/Processed/train/epidural",
    "../../Data/Processed/train/intraparenchymal",
    "../../Data/Processed/train/subarachnoid",
    "../../Data/Processed/train/intraventricular",
    "../../Data/Processed/train/subdural",
    #     "../../Data/Processed/train/nohem",
]

train_data = Data(
    training_folders,
    maximum_per_folder=50,  #5000
    multi_pool=False,
    size=img_size)

val_folders = [
    "../../Data/Processed/val/epidural",
    "../../Data/Processed/val/intraparenchymal",
    "../../Data/Processed/val/subarachnoid",
    "../../Data/Processed/val/intraventricular",
    "../../Data/Processed/val/subdural",
    #     "../../Data/Processed/val/nohem",
]

val_data = Data(
    val_folders,
    maximum_per_folder=10,  #1500
Exemple #20
0
def main():
    import sys
    sys.path.append("../")
    import datetime
    from torch.utils.data import DataLoader

    from dataloader import Data

    ds = Data()
    train, test, valid = ds.get_data()
    """
    n: # users
    m: # items
    k: latent vec dim
    """
    n = len(ds.usrset)
    m = len(ds.itemset)
    k = 32

    gamma = [1, 1, 1, 1]
    alpha = 0.0

    lr = 0.0001
    momentum = 0
    weight_decay = 0.01

    epochs = 21
    neg = 0

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = ABFM(n, m, k, gamma, alpha).to(device=device)

    # \alpha*||w||_2 is L2 reguralization
    # weight_decay option is for reguralization
    # weight_decay number is \alpha
    optimizer = optim.SGD(model.parameters(), \
                          lr=lr, \
                          momentum=momentum, \
                          weight_decay=weight_decay)

    # Saved directory
    today = datetime.date.today()
    c_time = datetime.datetime.now().strftime("%H-%M-%S")
    save_dir = f"../trained/abfm/{today}/{c_time}"
    os.makedirs(save_dir, exist_ok=True)
    model_name = "ABFM"

    # Load trained parameters
    loaded = False
    if loaded:
        model_path = "../trained/2019-11-08/ABFM_4.pt"
        model.load_state_dict(torch.load(model_path))
        epochs = 5

    # Print Information
    print("{:-^60}".format("Data stat"))
    print(f"# User        : {n}\n" \
          f"# Item        : {m}\n" \
          f"Neg sample    : {neg}\n")
    print("{:-^60}".format("Optim status"))
    print(f"Optimizer     : {optimizer}\n" \
          f"Learning rate : {lr}\n" \
          f"Momentum      : {momentum}\n" \
          f"Weight decay  : {weight_decay}")
    print("{:-^60}".format("Model/Learning status"))
    print(f"Model name    : {model_name}\n" \
          f"Mid dim       : {k}\n" \
          f"Gamma         : {gamma}\n" \
          f"Alpha         : {alpha}\n" \
          f"Epochs        : {epochs}\n" \
          f"Loaded        : {loaded}")
    if loaded:
        print(f"Learned model : {model_path}")
    print("{:-^60}".format("Description"))
    print("Without nagative samples")
    print("{:-^60}".format(""))

    for e in range(epochs):
        print("{:-^60}".format(f"epoch {e}"))
        cnt = 0
        ave_loss = 0
        train, _, _ = ds.get_data(neg=neg)
        for x in train:
            optimizer.zero_grad()
            x, label = x[0].to(device), x[1].to(device)
            loss = model(x, delta=label, pmi=1)
            loss.backward()
            optimizer.step()

            with torch.no_grad():
                ave_loss += loss.item()
            cnt += 1
            if cnt % 2500 == 0:
                print(f"Last loss : {loss.item():>9.6f} at {cnt:6d}, " \
                      f"Label : {label.item():2.0f},   " \
                      f"# basket item : {x.sum().item()-2:3.0f},   " \
                      f"Average loss so far : {ave_loss/cnt:>9.6f}")
        # print(cnt) # => 957264
        torch.save(model.state_dict(), f"{save_dir}/{model_name}_{e}.pt")
        print("{:-^60}".format("end"))
Exemple #21
0
training_folders = [
    "../../Data/Processed/train/epidural",
    "../../Data/Processed/train/intraparenchymal",
    "../../Data/Processed/train/subarachnoid",
    "../../Data/Processed/train/intraventricular",
    "../../Data/Processed/train/subdural",
    #    "../../Data/Processed/train/nohem",
]

train_data = Data(
    training_folders,
    #           {
    #              "epidural":"any",
    #             "intraparenchymal":"any",
    #            "subarachnoid":"any",
    #           "intraventricular":"any",
    #          "subdural":"any",
    #     },
    maximum_per_folder=1000,  #5000
    size=img_size,
    tl_model=None,
)

print("Import Val Data...")
val_folders = [
    "../../Data/Processed/val/epidural",
    "../../Data/Processed/val/intraparenchymal",
    "../../Data/Processed/val/subarachnoid",
    "../../Data/Processed/val/intraventricular",
    "../../Data/Processed/val/subdural",
    #"../../Data/Processed/val/nohem",
Exemple #22
0
def eval_iqa_all(metrics=['brisque']):
    # print('Evaluating IQA results on all datasets:')
    eng = matlab.engine.start_matlab()

    data = []
    names = ['ADNI', 'NACC', 'AIBL']

    sources = [
        "/data/datasets/ADNI_NoBack/", "/data/datasets/NACC_NoBack/",
        "/data/datasets/AIBL_NoBack/"
    ]
    data += [
        Data(sources[0],
             class1='ADNI_1.5T_NL',
             class2='ADNI_1.5T_AD',
             stage='test',
             shuffle=False)
    ]
    data += [
        Data(sources[1],
             class1='NACC_1.5T_NL',
             class2='NACC_1.5T_AD',
             stage='all',
             shuffle=False)
    ]
    data += [
        Data(sources[2],
             class1='AIBL_1.5T_NL',
             class2='AIBL_1.5T_AD',
             stage='all',
             shuffle=False)
    ]
    dataloaders = [DataLoader(d, batch_size=1, shuffle=False) for d in data]

    data = []
    # targets = ["/home/sq/gan2020/ADNIP_NoBack/", "/home/sq/gan2020/NACCP_NoBack/", "/home/sq/gan2020/AIBLP_NoBack/"]
    targets = ["./ADNIP_NoBack/", "./NACCP_NoBack/", "./AIBLP_NoBack/"]
    data += [
        Data(targets[0],
             class1='ADNI_1.5T_NL',
             class2='ADNI_1.5T_AD',
             stage='test',
             shuffle=False)
    ]
    data += [
        Data(targets[1],
             class1='NACC_1.5T_NL',
             class2='NACC_1.5T_AD',
             stage='all',
             shuffle=False)
    ]
    data += [
        Data(targets[2],
             class1='AIBL_1.5T_NL',
             class2='AIBL_1.5T_AD',
             stage='all',
             shuffle=False)
    ]
    dataloaders_p = [DataLoader(d, batch_size=1, shuffle=False) for d in data]

    Data_lists = [d.Data_list for d in data]

    out_dir = './iqa/'
    if os.path.isdir(out_dir):
        shutil.rmtree(out_dir)
    os.mkdir(out_dir)

    out_str = ''
    for m in metrics:
        for id in range(len(names)):
            name = names[id]
            Data_list = Data_lists[id]
            dataloader = dataloaders[id]
            dataloader_p = dataloaders_p[id]
            iqa_oris = []
            iqa_gens = []
            for i, (input, _) in enumerate(dataloader):
                input = input.squeeze().numpy()
                iqa_oris += [iqa_tensor(input, eng, Data_list[i], m, out_dir)]
            for j, (input_p, _) in enumerate(dataloader_p):
                input_p = input_p.squeeze().numpy()
                iqa_gens += [
                    iqa_tensor(input_p, eng, Data_list[j], m, out_dir)
                ]
            o = 'Average ' + m + ' on ' + name + ' is:'
            print(o)
            out_str += o
            iqa_oris = np.asarray(iqa_oris)
            iqa_gens = np.asarray(iqa_gens)
            o = '\t1.5 : ' + str(np.mean(iqa_oris)) + ' ' + str(
                np.std(iqa_oris))
            print(o)
            out_str += o
            o = '\t1.5* : ' + str(np.mean(iqa_gens)) + ' ' + str(
                np.std(iqa_gens))
            print(o)
            out_str += o
            o = '\tp_value (1.5 & 1.5+): ' + str(p_val(iqa_oris, iqa_gens))
            print(o)
            out_str += o
    with open(out_dir + 'iqa.txt', 'w') as f:
        f.write(out_str)
    eng.quit()
Exemple #23
0
def main():
    #######################################1.data loader###########################################
    train_transforms = transforms.Compose([
        transforms.Scale(
            256),  # rescale the image keeping the original aspect ratio
        transforms.CenterCrop(256),  # we get only the center of that rescaled
        transforms.RandomCrop(224),  # random crop within the center crop 
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    val_transforms = transforms.Compose([
        transforms.Scale(
            256),  # rescale the image keeping the original aspect ratio
        transforms.CenterCrop(224),  # we get only the center of that rescaled
        transforms.ToTensor(),
    ])
    test_transforms = transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(256),
        transforms.ToTensor(),
    ])

    traindata = Data(opts.train_list_str, None, None, train_transforms, None,
                     None, opts.data_path, opts.num_classes)
    valdata = Data(None, None, opts.val_list_str, None, val_transforms, None,
                   opts.data_path, opts.num_classes)
    testdata = Data(None, opts.test_list_str, None, None, None,
                    test_transforms, opts.data_path, opts.num_classes)

    train_loader = DataLoader(traindata,
                              batch_size=opts.batch_size,
                              shuffle=True,
                              num_workers=opts.workers,
                              pin_memory=True)
    print('Training loader prepared')

    val_loader = DataLoader(valdata,
                            batch_size=opts.batch_size,
                            shuffle=False,
                            num_workers=opts.workers,
                            pin_memory=True)
    print('Validation loader prepared')

    #testloader=Dataloader(testdata,testloader,)

    ##########################################2.model#################################################
    model = imvstxt()
    model.visionMLP = torch.nn.DataParaller(model.visionMLP, devce_ids=[0, 1])
    if opts.cuda:
        model.cuda()

    ########################################3.train && optimer###########################################

    #define loss function (criterion) and optimzer
    #cosine similarity between embeddings ->input1 ,input2,target
    if opts.cuda:
        cosine_crit = nn.CosineEmbeddingLoss(0.1).cuda()
    else:
        cosine_crit = nn.CosineEmbeddingLoss(0.1)

    if opts.semantic_reg:
        weights_class = torch.Tensor(opts.numClasses).fill_(1)
        weights_class[0] = 0  # the background class is set to 0, i.e. ignore
        # CrossEntropyLoss combines LogSoftMax and NLLLoss in one single class
        class_crit = nn.CosineEmbeddingLoss(weigth=weights_class).cuda()
        # we will use two different criterion
        criterion = [cosine_crit, class_crit]
    else:
        criterion = cosine_crit

    ##creating different parameter groups
    vision_params = list(map(id, model.visionMLP.parameters()))
    base_params = filter(lambda p: id(p) not in vision_params,
                         model.parameters())

    optim = optim.Adam([{
        'params': base_params
    }, {
        'params': model.visionMLP.parameters(),
        'lr': opts.lr * opts.freeVision
    }],
                       lr=opts.lr * opts.freeText)

    #if checkpoint exsit
    if opts.resume:
        if os.path.isfile(opts.resume):
            print("=> loading checkpoint '{}'".format(opts.resume))
            checkpoint = torch.load(opts.resume)
            opts.start_epoch = checkpoint['epoch']
            best_val = checkpoint['best_val']
            model.load_state_dict(checkpoint['state_dict'])
            optim.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                opts.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(opts.resume))
            best_val = float('inf')
    else:
        best_val = float('inf')

    #model
    trainer = trainer.Trainer(cuda=opts.cuda,
                              model=model,
                              optimizer=optim,
                              criterion=criterion,
                              train_loader=train_loader,
                              val_loader=val_loader,
                              max_iter=opts.max_iter)
    try:
        trainer.train(best_val)
    except:
        raise
Exemple #24
0
training_folders = [
    "../../Data/Processed/train/epidural",
    "../../Data/Processed/train/intraparenchymal",
    "../../Data/Processed/train/subarachnoid",
    "../../Data/Processed/train/intraventricular",
    "../../Data/Processed/train/subdural",
    "../../Data/Processed/train/nohem",
]

train_data = Data(
    training_folders,
    {
        "epidural": "any",
        "intraparenchymal": "any",
        "subarachnoid": "any",
        "intraventricular": "any",
        "subdural": "any",
    },
    maximum_per_folder=5000,  #5000
    size=img_size,
    in_channels=3,
)

#print(train_data._label_dict)
print("Import Val Data...")
val_folders = [
    "../../Data/Processed/train/epidural",
    "../../Data/Processed/val/intraparenchymal",
    "../../Data/Processed/val/subarachnoid",
    "../../Data/Processed/val/intraventricular",
    "../../Data/Processed/val/subdural",
Exemple #25
0
def load_model(filename, device, model_name="FABFM", choice="test"):
    if os.path.isfile(filename):
        from dataloader import Data
        # Load file
        checkpoint = torch.load(filename)

        # Load dataset setting
        if choice == "test":
            neg = 1
            test_neg = True
        else:
            # neg = checkpoint["neg"]
            # For experiment
            neg = 1
            test_neg = False
        ds = Data(root_dir="./data/ta_feng/")
        train, test, _ = ds.get_data(neg=neg, test_neg=test_neg)
        n_usr = len(ds.usrset)
        n_itm = len(ds.itemset)

        # Load network
        model_name = checkpoint["name"]
        optimizer = checkpoint["optimizer"]
        k = checkpoint["k"]
        gamma = checkpoint["gamma"]
        alpha = checkpoint["alpha"]
        norm = checkpoint["norm"]
        if model_name == "FABFM":
            d = checkpoint["d"]
            h = checkpoint["h"]
            from models.fixed_abfm import FABFM
            # model = FABFM(n_usr, n_itm, k, d, h, gamma, alpha).to(device=device)
            model = FABFM(n_usr, n_itm, k, d, h, gamma, alpha,
                          norm=norm).to(device=device)
        elif model_name == "ABFM":
            from models.abfm import ABFM
            model = ABFM(n_usr, n_itm, k, gamma, alpha).to(device=device)
        elif model_name == "BFM":
            from models.bfm import BFM
            model = BFM(n_usr, n_itm, k, gamma, alpha).to(device=device)
        model.load_state_dict(checkpoint["state_dict"])

        print("{:-^60}".format("Data stat"))
        print(f"# User        : {n_usr}\n" \
              f"# Item        : {n_itm}\n" \
              f"Neg sample    : {neg}")
        print("{:-^60}".format("Optim status"))
        # print(f"lr            : {optimizer['param_groups'][0]['lr']}\n"\
        #       f"Momentum      : {optimizer['param_groups'][0]['momentum']}\n"\
        #       f"Dampening     : {optimizer['param_groups'][0]['dampening']}\n"\
        #       f"Weight_decay  : {optimizer['param_groups'][0]['weight_decay']}\n"\
        #       f"Nesterov      : {optimizer['param_groups'][0]['nesterov']}")
        print("{:-^60}".format("Model/Learning status"))
        print(f"Mid dim       : {k}\n" \
              f"Gamma         : {gamma}\n" \
              f"Alpha         : {alpha}")
        print("{:-^60}".format(""), flush=True)

        return model, train, test, ds.n_train, ds.n_test, n_usr, n_itm
    else:
        print(f"There is not {filename}")
        return None
Exemple #26
0
import os
import numpy as np
import PIL

import cv2
import IPython.display
from IPython.display import clear_output
from datetime import datetime
from dataloader import Data, TestData
import urllib.request
# import matplotlib.pyplot as plttdata

# Initialize dataloader
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
data = Data()
test_data = Data()

# Saves Model in every N minutes
TIME_INTERVALS = 2
SHOW_SUMMARY = True

INPUT_SHAPE = (256, 256, 3)
EPOCHS = 500
BATCH = 1

# 25% i.e 64 width size will be mask from both side
MASK_PERCENTAGE = .25

EPSILON = 1e-9
ALPHA = 0.0004
Exemple #27
0
                    default=1e-3,
                    help='epsilon to clip output heatmap')
parser.add_argument('--tfboard_path',
                    type=str,
                    default='tfboard/loss',
                    help='path to save tensorboard log')
opt = parser.parse_args()

if os.path.exists(opt.tfboard_path):
    for i in os.listdir(opt.tfboard_path):
        os.remove(os.path.join(opt.tfboard_path, i))

device = opt.device
eps = opt.eps

traindata = Data(opt.train_path)
train_loader = DataLoader(traindata,
                          batch_size=opt.batch,
                          shuffle=True,
                          num_workers=2,
                          pin_memory=True,
                          drop_last=True)
valdata = Data(opt.eval_path)
val_loader = DataLoader(valdata,
                        batch_size=1,
                        shuffle=True,
                        num_workers=2,
                        pin_memory=True,
                        drop_last=True)

if len(opt.pretrained) == 0:
    "../../Data/Processed/train/epidural",
    "../../Data/Processed/train/intraparenchymal",
    "../../Data/Processed/train/subarachnoid",
    "../../Data/Processed/train/intraventricular",
    "../../Data/Processed/train/subdural",
    "../../Data/Processed/train/nohem",
]

train_data = Data(
    training_folders,
    {
        "epidural": "any",
        "intraparenchymal": "any",
        "subarachnoid": "any",
        "intraventricular": "any",
        "subdural": "any",
    },
    maximum_per_folder=10,  #5000
    multi_pool=False,
    size=img_size,
    tl_model="alexnet",
    in_channels=1,
)

print("Import Val Data...")

val_folders = [
    "../../Data/Processed/val/epidural",
    "../../Data/Processed/val/intraparenchymal",
    "../../Data/Processed/val/subarachnoid",
    "../../Data/Processed/val/intraventricular",
Exemple #29
0
    parser.add_argument('--batch_size',
                        type=int,
                        default=1000,
                        help='The batch size used for training')
    parser.add_argument(
        '--num_workers',
        type=int,
        default=8,
        help='The numbers of processors used for loading the data')

    args = parser.parse_args()

    #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    os.makedirs('saved_models', exist_ok=True)

    data = DataLoader(Data("./data/training"),
                      batch_size=args.batch_size,
                      shuffle=True,
                      num_workers=args.num_workers)

    model = ANN()
    criterion = torch.nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.0002,
                                 betas=(0.9, 0.999))

    if args.epoch > 0:
        model.load_state_dict(
            torch.load('saved_models/model_%d.pth' % args.epoch))

    for epoch in range(args.epoch, args.num_epochs):