Example #1
0
def test_3rd_snapshot():
    te_set = PosterSet(POSTER_PATH, split, 'test',  gen_d=gen_d, augment=False, resize=None, ten_crop=None)#, debug=True)
    te_load = DataLoader(te_set, batch_size=64, shuffle=False, num_workers=3, drop_last=True)
    model = SmallerNetwork(INP_SIZE, 23)
    state = torch.load(SNAP_PATH + "snap3rd.nn")
    model.load_state_dict(state['state_dict'])
    if CUDA_ON: model.cuda()
    model.eval()
    loss = 0
    skipped = 0

    for X, y in tqdm(te_load, desc='3rd'):
        X, y = Var(X, volatile=True), Var(y)
        if CUDA_ON:
            X, y = X.cuda(), y.cuda()
    
        out = model(X)

        for i in range(out.size(0)):
            try:
                loss += accuracy(out.data[i], y.data[i])
            except:
                skipped += 1
    
    return loss / (len(te_set) - skipped)
Example #2
0
def test_5th_snapshot():
    te_set = PosterSet(POSTER_PATH, split, 'test',  gen_d=gen_d, augment=False, resize=None, ten_crop=CROP_SIZE)#, debug=True)
    te_load = DataLoader(te_set, batch_size=64, shuffle=False, num_workers=3, drop_last=True)
    model = MidrangeNetwork(CROP_SIZE, 23)
    state = torch.load(SNAP_PATH + "snap5th.nn")
    model.load_state_dict(state['state_dict'])
    if CUDA_ON: model.cuda()
    model.eval()
    loss = 0
    skipped = 0

    for X, y in tqdm(te_load, desc='5th'):
        X, y = Var(X, volatile=True), Var(y)
        
        bs, ncrops, c, h, w = X.size()

        if CUDA_ON:
            X, y = X.cuda(), y.cuda()
    
        out = model(X.view(-1, c, h, w))
        out = out.view(bs, ncrops, -1).mean(1)

        for i in range(out.size(0)):
            try:
                loss += accuracy(out.data[i], y.data[i])
            except:
                skipped += 1
    
    return loss / (len(te_set) - skipped)
Example #3
0
def test_distribution():
    te_set = PosterSet(POSTER_PATH, split, 'test',  gen_d=gen_d, augment=False, resize=None, ten_crop=None)#, debug=True)
    te_load = DataLoader(te_set, batch_size=64, shuffle=False, num_workers=3, drop_last=True)
    pred = torch.Tensor([0.28188283, 0.23829031, 0.27430824, 0.39426496, 0.38900912, 0.22754676, 0.24563302, 0.11153192, 0.29865512, 0.14260318, 0.17011903, 0.0307621 , 0.20026279, 0.2485701 , 0.24037718, 0.17645695, 1. , 0.42100788, 0.11593755, 0.31264492, 0.62699026, 0.1946205 , 0.27446282])
    loss = 0
    skipped = 0
    for X, y in tqdm(te_load, desc='dis'):
        for i in range(X.size(0)):
            try:
                loss += accuracy(pred, y[i])
            except:
                skipped += 1
    
    return loss / (len(te_set) - skipped)
DEBUG_MODE = False

num_epochs = 101
batch_s = 128
log_percent = 0.25
s_factor = 0.5
learn_r = 0.0001
input_size = (268, 182)  #posters are all 182 width, 268 heigth

p = pickle.load(open(DATA_PATH, 'rb'))
gen_d = pickle.load(open(DICT_PATH, 'rb'))

train_set = PosterSet(POSTER_PATH,
                      p,
                      'train',
                      gen_d=gen_d,
                      tv_norm=True,
                      augment=True,
                      resize=input_size,
                      debug=DEBUG_MODE)
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=batch_s,
                                           shuffle=True,
                                           num_workers=4)
log_interval = np.ceil((len(train_loader.dataset) * log_percent) / batch_s)

val_set = PosterSet(POSTER_PATH,
                    p,
                    'val',
                    gen_d=gen_d,
                    tv_norm=True,
                    augment=True,
Example #5
0
import sys, os

from extractors import *
from core import PosterSet

DATASET_PATH = "../sets/set_splits.p"
POSTER_PATH = "../posters/"
GENRE_DICT_PATH = "../sets/gen_d.p"
SETS_PATH = "../sets/"
FEATURE_PATH = "../sets/features_all.h5"
CUDA_ON = True

p = pickle.load(open(DATASET_PATH, 'rb'))
gen_d = pickle.load(open(GENRE_DICT_PATH, 'rb'))
h5 = h5py.File(FEATURE_PATH, 'a')
dataset = PosterSet(POSTER_PATH, p, 'all', gen_d=gen_d, normalize=True)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=32,
                                         shuffle=False,
                                         num_workers=1)

h5.create_dataset("labels", data=np.stack([y for __, y in tqdm(dataset)]))
h5.create_dataset("ids",
                  data=np.array([s.encode('utf8') for s in dataset.ids]))

cuda_device = int(sys.argv[1]) if len(sys.argv) > 1 else 0
if cuda_device == -1:
    cuda_device = 0
    CUDA_ON = False

with torch.cuda.device(cuda_device):
Example #6
0
    print(TERM['y'])
    print("mean: ", m.running_mean)
    print("var: ", m.running_var)
    print("bias:", m.bias.data)
    print("weight:", m.weight.data)
    print(TERM['clr'])


# prepare data
gen_d = pickle.load(open(GENRE_DICT_PATH, 'rb'))
split = pickle.load(open(DATASET_PATH, 'rb'))

tr_set = PosterSet(POSTER_PATH,
                   split,
                   'train',
                   gen_d=gen_d,
                   augment=True,
                   resize=None,
                   rnd_crop=CROP_SIZE)  #, debug=True)
tr_load = DataLoader(tr_set,
                     batch_size=128,
                     shuffle=True,
                     num_workers=3,
                     drop_last=True)

va_set = PosterSet(POSTER_PATH,
                   split,
                   'val',
                   gen_d=gen_d,
                   augment=False,
                   resize=None,