Exemplo n.º 1
0
 def init(cls):
     try:
         path = download_file('https://download.pytorch.org/test_data/legacy_modules.t7')
     except unittest.SkipTest:
         return
     tests = load_lua(path)
     for name, test in tests['modules'].items():
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._module_test(name, test))
     for name, test in tests['criterions'].items():
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._criterion_test(name, test))
Exemplo n.º 2
0
    def __init__(self, model_file_name, input_shape):
        super(TorchParser, self).__init__()
        if not os.path.exists(model_file_name):
            raise ValueError("Torch7 model file [{}] is not found.".format(model_file_name))
        model = load_lua(model_file_name)
        if type(model).__name__=='hashable_uniq_dict':
            model = model.model
        model.evaluate()
        self.weight_loaded = True

        # Build network graph
        self.torch_graph = TorchGraph(model)
        self.torch_graph.build([[1] + input_shape])
Exemplo n.º 3
0
Arquivo: utils.py Projeto: phonx/MUNIT
def load_vgg16(model_dir):
    """ Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
    if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')):
        if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')):
            os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7'))
        vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7'))
        vgg = Vgg16()
        for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):
            dst.data[:] = src
        torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight'))
    vgg = Vgg16()
    vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight')))
    return vgg
def torch_to_pytorch(t7_filename,outputname=None):
    model = load_lua(t7_filename,unknown_classes=True)
    if type(model).__name__=='hashable_uniq_dict': model=model.model
    model.gradInput = None
    slist = lua_recursive_source(lnn.Sequential().add(model))
    s = simplify_source(slist)
    header = '''
import torch
import torch.nn as nn
import torch.legacy.nn as lnn

from functools import reduce
from torch.autograd import Variable

class LambdaBase(nn.Sequential):
    def __init__(self, fn, *args):
        super(LambdaBase, self).__init__(*args)
        self.lambda_func = fn

    def forward_prepare(self, input):
        output = []
        for module in self._modules.values():
            output.append(module(input))
        return output if output else input

class Lambda(LambdaBase):
    def forward(self, input):
        return self.lambda_func(self.forward_prepare(input))

class LambdaMap(LambdaBase):
    def forward(self, input):
        return list(map(self.lambda_func,self.forward_prepare(input)))

class LambdaReduce(LambdaBase):
    def forward(self, input):
        return reduce(self.lambda_func,self.forward_prepare(input))
'''
    varname = t7_filename.replace('.t7','').replace('.','_').replace('-','_')
    s = '{}\n\n{} = {}'.format(header,varname,s[:-2])

    if outputname is None: outputname=varname
    with open(outputname+'.py', "w") as pyfile:
        pyfile.write(s)

    n = nn.Sequential()
    lua_recursive_model(model,n)
    torch.save(n.state_dict(),outputname+'.pth')
Exemplo n.º 5
0
 def init(cls):
     try:
         path = download_file('https://download.pytorch.org/test_data/legacy_modules.t7')
     except unittest.SkipTest:
         return
     long_size = 8 if sys.platform == 'win32' else None
     tests = load_lua(path, long_size=long_size)
     for name, test in tests['modules'].items():
         if name == "HardShrink":
             continue
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._module_test(name, test))
     for name, test in tests['criterions'].items():
         if name == "HardShrink":
             continue
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._criterion_test(name, test))
Exemplo n.º 6
0
    def generateSampleFace(self, idx):
        sf = self.scale_factor
        rf = self.rot_factor

        main_pts = load_lua(self.anno[idx])
        pts = main_pts
        mins_ = torch.min(pts, 0)[0].view(2)  # min vals
        maxs_ = torch.max(pts, 0)[0].view(2)  # max vals
        c = torch.FloatTensor((maxs_[0] - (maxs_[0] - mins_[0]) / 2,
                               maxs_[1] - (maxs_[1] - mins_[1]) / 2))
        # c[0] -= ((maxs_[0] - mins_[0]) * 0.12)
        c[1] -= ((maxs_[1] - mins_[1]) * 0.12)
        s = (maxs_[0] - mins_[0] + maxs_[1] - mins_[1]) / 195

        img = load_image(self.anno[idx][:-3] + '.jpg')

        r = 0
        if self.is_train:
            # scale
            s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
            # rotatation
            r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0
            # flip
            if random.random() <= 0.5:
                img = torch.from_numpy(fliplr(img.numpy())).float()
                pts = shufflelr(pts, width=img.size(2), dataset='w300lp')
                c[0] = img.size(2) - c[0]
            # RGB
            img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
            img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
            img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)

        inp = crop(img, c, s, [256, 256], rot=r)
        # inp = color_normalize(inp, self.mean, self.std)

        tpts = pts.clone()
        out = torch.zeros(self.nParts, 64, 64)
        for i in range(self.nParts):
            if tpts[i, 0] > 0:
                tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [64, 64], rot=r))
                out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)

        return inp, out, pts, c, s
Exemplo n.º 7
0
    def generateSampleFace(self, idx):
        sf = self.scale_factor
        rf = self.rot_factor

        main_pts = load_lua(
            os.path.join(self.img_folder, 'landmarks', self.anno[idx].split('_')[0],
                         self.anno[idx][:-4] + '.t7'))
        pts = main_pts[0] if self.pointType == '2D' else main_pts[1]
        c = torch.Tensor((450 / 2, 450 / 2 + 50))
        s = 1.8

        img = load_image(
            os.path.join(self.img_folder, self.anno[idx].split('_')[0], self.anno[idx][:-8] +
                         '.jpg'))

        r = 0
        if self.is_train:
            s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
            r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0

            if random.random() <= 0.5:
                img = torch.from_numpy(fliplr(img.numpy())).float()
                pts = shufflelr(pts, width=img.size(2), dataset='w300lp')
                c[0] = img.size(2) - c[0]

            img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
            img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
            img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)

        inp = crop(img, c, s, [256, 256], rot=r)
        inp = color_normalize(inp, self.mean, self.std)

        tpts = pts.clone()
        out = torch.zeros(self.nParts, 64, 64)
        for i in range(self.nParts):
            if tpts[i, 0] > 0:
                tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [64, 64], rot=r))
                out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)

        return inp, out, pts, c, s
Exemplo n.º 8
0
def loadgts(datapath, pointType='2D'):
    if datapath.endswith('300W_LP'):
        base_dir = os.path.join(datapath, 'landmarks')
        dirs = os.listdir(base_dir)
        lines = []
        for d in dirs:
            files = [
                f for f in os.listdir(osp.join(base_dir, d))
                if f.endswith('mat') and f.find('test') != -1
            ]
            lines.extend(files)
        all_gts = torch.zeros((len(lines), 68, 2))
        for i, f in enumerate(lines):
            if pointType == '2D':
                pts = load_lua(osp.join(base_dir, f.split('_')[0], f[:-4] + '.t7'))[0]
            else:
                pts = load_lua(osp.join(base_dir, f.split('_')[0], f[:-4] + '.t7'))[1]
            all_gts[i, :, :] = pts
        return all_gts, lines

    elif datapath.find('300VW-3D') != -1:
        lines = []
        for split in ['CatA', 'CatB', 'CatC']:
            base_dir = os.path.join(datapath, split)
            dirs = os.listdir(base_dir)
            for d in dirs:
                files = [
                    osp.join(base_dir, d, f) for f in os.listdir(osp.join(base_dir, d))
                    if f.endswith('t7')
                ]
                lines.extend(files)
    elif datapath.endswith('LS3D-W'):
        base_dir = osp.join(datapath, 'new_dataset')
        lines, E, M, H = [],[],[],[]
        files = [f for f in os.listdir(base_dir) if f.endswith('.t7')]
        for f in files:
            num_of_file = int(f.split('.')[0])
            if num_of_file % 3 == 1:  # 0-30
                E.append(os.path.join(base_dir, f))
            elif num_of_file % 3 == 2:  # 30-60
                M.append(os.path.join(base_dir, f))
            else:  # 60-90
                H.append(os.path.join(base_dir, f))
        lines.extend(E)
        lines.extend(M)
        lines.extend(H)

    all_gts = torch.zeros((len(lines), 68, 2))
    for i, f in enumerate(lines):
        if pointType == '2D':
            if datapath.endswith('300W_LP'):
                pts = load_lua(osp.join(base_dir, f.split('_')[0], f[:-4] + '.t7'))[0]
            else:
                print("Given data set do not have 3D annotations.")
                exit()
        else:
            pts = load_lua(f)
        all_gts[i, :, :] = pts
    print('Loaded {} sample from {}'.format(len(lines), base_dir))

    return all_gts, lines
Exemplo n.º 9
0
    p_wct.d2.load_state_dict(
        torch.load('pth_models/feature_invertor_conv2.pth'))
    p_wct.e3.load_state_dict(torch.load('pth_models/vgg_normalised_conv3.pth'))
    p_wct.d3.load_state_dict(
        torch.load('pth_models/feature_invertor_conv3.pth'))
    p_wct.e4.load_state_dict(torch.load('pth_models/vgg_normalised_conv4.pth'))
    p_wct.d4.load_state_dict(
        torch.load('pth_models/feature_invertor_conv4.pth'))


if __name__ == '__main__':
    if not os.path.exists('pth_models'):
        os.mkdir('pth_models')

    ## VGGEncoder1
    vgg1 = load_lua('models/vgg_normalised_conv1_1_mask.t7')
    e1 = VGGEncoder(1)
    weight_assign(vgg1, e1, {
        'conv0': 0,
        'conv1_1': 2,
    })
    torch.save(e1.state_dict(), 'pth_models/vgg_normalised_conv1.pth')

    ## VGGDecoder1
    inv1 = load_lua('models/feature_invertor_conv1_1_mask.t7')
    d1 = VGGDecoder(1)
    weight_assign(inv1, d1, {
        'conv1_1': 1,
    })
    torch.save(d1.state_dict(), 'pth_models/feature_invertor_conv1.pth')
Exemplo n.º 10
0
all_networks = {}

canonical_ordering = []

for language, version in tqdm(p(languages, versions),
                              desc='loading',
                              total=len(languages) * len(versions)):

    # Standard network name
    network_name = 'en-%s-%d' % (language, version)

    canonical_ordering.append(network_name)

    # Load as 4000x(sentence_length)x500 matrix
    all_networks[network_name] = load_lua('../descriptions/%s.desc.t7' %
                                          network_name)

means = {}
variances = {}

# Eigenvectors and values
e, v = torch.load('eigenvalues_and_vectors.pkl')

# transforms
cca_transforms = torch.load('svcca-99.pkl')

# Sort
e, indices = torch.abs(e[:, 0]).sort(descending=True)
v = v[:, indices]

pca_list = []
if __name__ == '__main__':
    opt = parser.parse_args()
    impath = opt.data_dir
    match_data = {name: read_matches_file(os.path.join(impath, name))
                  for name in os.listdir(impath) if name.startswith('m50_')}

    info = np.loadtxt(os.path.join(impath, 'info.txt'), dtype=np.uint64)[:,0]
    
    image_list = filter(lambda x: x.endswith('.bmp'), os.listdir(impath))

    patches = []
    for name in tqdm(sorted(image_list)):
        im = cv2.imread(os.path.join(impath, name), cv2.IMREAD_GRAYSCALE)
        patches.append(im.reshape(16,64,16,64).transpose(0,2,1,3).reshape(-1,64,64))
    patches = np.concatenate(patches)[:info.size]
    mean = np.mean(patches, axis=(1,2))

    np.save(arr={'patches': patches,
                 'mean': mean,
                 'info': info,
                 'match_data': match_data},
            file=open(os.path.join(impath, 'data.npy'), 'w'))

    if opt.test:
        import torch
        from torch.utils.serialization import load_lua
        data_torch = load_lua(os.path.join(impath, 'data.t7'))

        print((torch.from_numpy(patches).float() - data_torch['patches'].float()).abs().max())
Exemplo n.º 12
0
in cuDNN.
"""

parser = argparse.ArgumentParser()
parser.add_argument('--t7_file', required=True)
parser.add_argument('--pth_file', required=True)
args = parser.parse_args()

model_name = os.path.splitext(args.pth_file)[0].split('-')[0]

model = getattr(models, model_name)()
model.load_state_dict(torch.load(args.pth_file))
model.float()
model.eval()

test_cases = load_lua(args.t7_file)['tests']
for i, test_case in enumerate(test_cases):
    print('Running test case %d / %d' % (i + 1, len(test_cases)))
    x = Variable(test_case['input'].float(), requires_grad=True)
    expected_y = test_case['output'].float()
    grad_y = test_case['grad_output'].float()
    expected_grad_x = test_case['grad_input'].float()

    y = model(x)
    y_diff = torch.abs(y.data - expected_y).sum()
    assert y_diff == 0, 'y_diff = %f' % y_diff
    y.backward(grad_y)
    grad_x_diff = torch.abs(x.grad.data - expected_grad_x).sum()
    assert grad_x_diff == 0, 'grad_x_diff = %f' % grad_x_diff

print('All tests pass!')
Exemplo n.º 13
0
import torch
import torchvision
from torch.utils.serialization import load_lua

from train_res3d.resnet3d import resnet18

torchvision.models.resnet18()
model = resnet18()
model_t7 = load_lua('resnet-18-kinetics-cpu.t7')
# dummy inputs for the example
print('load success')


def copy_param(n, m):
    if m.weight is not None: n.weight.data.copy_(m.weight)
    if m.bias is not None: n.bias.data.copy_(m.bias)
    if hasattr(n, 'running_mean'): n.running_mean.copy_(m.running_mean)
    if hasattr(n, 'running_var'): n.running_var.copy_(m.running_var)


copy_param(model.conv1, model_t7.modules[0].modules[0])
copy_param(model.bn1, model_t7.modules[0].modules[1])
# layer1
copy_param(
    model.layer1[0].conv1,
    model_t7.modules[0].modules[4].modules[0].modules[0].modules[0].modules[0])
copy_param(
    model.layer1[0].bn1,
    model_t7.modules[0].modules[4].modules[0].modules[0].modules[0].modules[1])
copy_param(
    model.layer1[0].conv2,
import tqdm
import cv2
from network import Network
from network import model_path
import load

IMAGE_SIZE = 256
LOCAL_SIZE = 128
HOLE_MIN = 32
HOLE_MAX = 127
LEARNING_RATE = 1e-3
BATCH_SIZE = 8
PRETRAIN_EPOCH = 100

from torch.utils.serialization import load_lua
datamean = load_lua(model_path).mean
datamean = np.array(datamean)


def train():
    x = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])
    mask = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 1])
    local_x = tf.placeholder(tf.float32,
                             [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])
    global_completion = tf.placeholder(tf.float32,
                                       [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])
    local_completion = tf.placeholder(tf.float32,
                                      [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])
    is_training = tf.placeholder(tf.bool, [])

    model = Network(x,
Exemplo n.º 15
0
                    action="store",
                    dest="inputsize",
                    type=int,
                    default=2000,
                    help="The input sequence window size for neural network")
parser.add_argument('--batchsize',
                    action="store",
                    dest="batchsize",
                    type=int,
                    default=32,
                    help="Batch size for neural network predictions.")
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()

genome = pyfasta.Fasta('./resources/hg19.fa')
model = load_lua('./resources/deepsea.beluga.2002.cpu')
model.evaluate()
if args.cuda:
    model.cuda()

CHRS = [
    'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9',
    'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17',
    'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX', 'chrY'
]

inputfile = args.inputfile
maxshift = args.maxshift
inputsize = args.inputsize
batchSize = args.batchsize
windowsize = inputsize + 100
Exemplo n.º 16
0
import torch
import torch.nn as nn
from torch.legacy import nn as old_nn
from torch.legacy.nn.Sequential import Sequential as old_Sequential
from torch.utils.serialization import load_lua
from glcic import glcic

A = load_lua('completionnet_places2.t7', long_size=8).model
B = glcic(in_ch=4, out_ch=3, ch=64)

A_layers = list(
    m for m in A.modules
    if isinstance(m, (old_nn.SpatialConvolution, old_nn.SpatialFullConvolution,
                      old_nn.SpatialDilatedConvolution,
                      old_nn.SpatialBatchNormalization)))
B_layers = list(
    m for m in B.modules()
    if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d)))

for m1, m2 in zip(A_layers, B_layers):
    # m1, m2 = A_layers[0], B_layers[0]
    if isinstance(m2, (nn.Conv2d, nn.ConvTranspose2d)):
        m2.weight.data.copy_(m1.weight)
        m2.bias.data.copy_(m1.bias)
    elif isinstance(m2, nn.BatchNorm2d):
        m2.running_var.copy_(m1.running_var)
        m2.running_mean.copy_(m1.running_mean)
        m2.weight.data.copy_(m1.weight)
        m2.bias.data.copy_(m1.bias)

torch.save({'model': B.state_dict()}, 'completionnet_places2.pth')
opt = parser.parse_args()
print(opt)

log_file = opt.experiment + '/' + opt.log

if not os.path.isdir(opt.experiment):
    os.system('mkdir {0}'.format(opt.experiment))

noise = torch.FloatTensor(opt.batchSize, 100, 1, 1)
noise_int = torch.FloatTensor(opt.batchSize * 3 / 2, 100, 1, 1)
# for test
fixed_noise = torch.FloatTensor(opt.batchSize, 100, 1, 1).normal_(0, 1)
fixed_noise = fixed_noise.cuda()
fixed_noise_v = Variable(fixed_noise)
fixed_txt = load_lua(
    './txt1024.t7')  #this flower has white petals and a yellow stamen
fixed_txt = fixed_txt.view(1, 1024, 1, 1)
fixed_txt = fixed_txt.expand(opt.batchSize, 1024, 1, 1)
fixed_txt = fixed_txt.cuda()
fixed_txt_v = Variable(fixed_txt)

input_img = torch.FloatTensor(opt.batchSize, opt.nc, opt.imSize, opt.imSize)
input_txt = torch.FloatTensor(opt.batchSize, 1024, 1, 1)
input_txt_int = torch.FloatTensor(opt.batchSize * 3 / 2, 1024, 1, 1)
input_txt_int_cpu = torch.FloatTensor(opt.batchSize * 3 / 2, 1024, 1, 1)
fake_img = torch.FloatTensor(opt.batchSize, 3, opt.imSize, opt.imSize)
fake_img_int = torch.FloatTensor(opt.batchSize * 3 / 2, 3, opt.imSize,
                                 opt.imSize)
wrong_img = torch.FloatTensor(opt.batchSize, 3, opt.imSize, opt.imSize)
target_img = torch.FloatTensor(opt.batchSize, 3, opt.imSize, opt.imSize)
noise, noise_int = noise.cuda(), noise_int.cuda()
## Weights init function
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)

## Initialization
TNet = Image_transform_net()
if not param.trained_model:
    VGG16 = VGG16()
    # VGG-16 working on [0,255] scale
    # Can't use PyTorch trained model (https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py) because we need access to in-between steps
    # Need to use Justin Johnson Lua trained model and convert it
    os.chdir(param.VGG16_folder)
    os.system('wget -nc http://cs.stanford.edu/people/jcjohns/fast-neural-style/models/vgg16.t7')
    VGG16_Lua = load_lua('vgg16.t7')
    for (src, dst) in zip(VGG16_Lua.parameters()[0], VGG16.parameters()):
        # dst[:].data = src[:]
        dst.data[:] = src
    torch.save(VGG16.state_dict(), f"{base_dir}/VGG16.pth")
# Initialize weights
TNet.apply(weights_init)

# Load existing models
if param.model_load != '':
    TNet.load_state_dict(torch.load(param.model_load))

print(TNet)
print(TNet, file=log_output)
if not param.trained_model:
    print(VGG16)
def main():
    # cofiguration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    torch.manual_seed(args.seed)
    if args.cuda:
        print('using CUDA with gpu_id:')
        print(args.gpu_id)
        torch.cuda.manual_seed(args.seed)
    # dataset
    unlabeledset = load_lua(
        '/home/hankuan1993/dataset/stl10/stl10-unlabeled-scaled-tensor.t7')
    unlabeledsetnp = unlabeledset.numpy()
    trainset = load_lua(
        '/home/hankuan1993/dataset/stl10/stl10-train-scaled-tensor.t7')
    trainsetnp = trainset.numpy()
    testset = load_lua(
        '/home/hankuan1993/dataset/stl10/stl10-test-scaled-tensor.t7')
    testsetnp = testset.numpy()
    trainsetnp = np.concatenate((unlabeledsetnp, trainsetnp), axis=0)
    trainlen = len(trainsetnp)
    testlen = len(testsetnp)
    # model
    model = CVAE()
    if args.loadPrev:
        print('====> loading previously saved model ...')
        model.load_state_dict(torch.load('./cvaeCheckPoint.pth'))
    if args.cuda:
        print('====> loading model to gpu ...')
        model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    # train and evaluate
    for epoch in range(1, args.epochs + 1):
        # train
        model.train()
        train_loss = 0
        traintime = math.ceil(trainlen / args.batch_size)
        shuffleidx = np.random.permutation(trainsetnp.shape[0])
        for batch_idx in range(traintime):
            datanp = trainsetnp[shuffleidx[batch_idx *
                                           args.batch_size:(batch_idx + 1) *
                                           args.batch_size], :, :, :].astype(
                                               np.float32) / 255.0
            data = torch.from_numpy(datanp)
            data = Variable(data)
            if args.cuda:
                data = data.cuda()
            optimizer.zero_grad()
            recon_batch, mu, logvar = model(data)
            loss = loss_function(recon_batch, data, mu, logvar)
            loss.backward()
            train_loss += loss.data[0]
            optimizer.step()


#            if batch_idx % args.log_interval == 0:
#                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
#                    epoch, batch_idx * len(data), len(train_loader.dataset),
#                    100. * batch_idx / len(train_loader),
#                    loss.data[0] / len(data)))
        print('====> Epoch: {} Train Average loss: {:.4f}'.format(
            epoch, train_loss / traintime / args.batch_size))

        # evaluate
        model.eval()
        test_loss = 0
        for test_idx in range(testlen):
            datanp = testsetnp[test_idx, :, :, :].astype(np.float32) / 255.0
            data = torch.from_numpy(datanp)
            if args.cuda:
                data = data.cuda()
            data = Variable(data, volatile=True)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).data[0]

        test_loss /= testlen
        print('====> Test set loss: {:.4f}'.format(test_loss))
    # Since training is finished, save it! :)
    torch.save(model.state_dict(), './cvaeCheckPoint.pth')
Exemplo n.º 20
0
def load_activations(activations_path, num_neurons_per_layer=None, is_brnn=False):
    """Load extracted activations.

    Parameters
    ----------
    activations_path : str
        Path to the activations file. Can be of type t7, pt, acts, json or hdf5
    num_neurons_per_layer : int, optional
        Number of neurons per layer - used to compute total number of layers.
        This is only necessary in the case of t7/p5/acts activations.
    is_brnn : bool, optional
        If the model used to extract activations was bidirectional (default: False)

    Returns
    -------
    activations : list of numpy.ndarray
        List of *sentence representations*, where each *sentence representation*
        is a numpy matrix of shape ``[num tokens in sentence x concatenated representation size]``
    num_layers : int
        Number of layers. This is usually representation_size/num_neurons_per_layer.
        Divide again by 2 if model was bidirectional

    """
    file_ext = activations_path.split(".")[-1]

    activations = None
    num_layers = None

    # Load activations based on type
    # Also ensure everything is on the CPU
    #   as activations may have been saved as CUDA variables
    if file_ext == "t7":
        # t7 loading requires torch < 1.0
        print("Loading seq2seq-attn activations from %s..." % (activations_path))
        assert (
            num_neurons_per_layer is not None
        ), "t7 activations require num_neurons_per_layer"
        from torch.utils.serialization import load_lua

        activations = load_lua(activations_path)["encodings"]
        activations = [a.cpu() for a in activations]
        num_layers = len(activations[0][0]) / num_neurons_per_layer
        if is_brnn:
            num_layers /= 2
    elif file_ext == "pt":
        print("Loading OpenNMT-py activations from %s..." % (activations_path))
        assert (
            num_neurons_per_layer is not None
        ), "pt activations require num_neurons_per_layer"
        activations = torch.load(activations_path)
        activations = [
            torch.stack([torch.cat(token) for token in sentence]).cpu()
            for sentence in activations
        ]
        num_layers = len(activations[0][0]) / num_neurons_per_layer
    elif file_ext == "acts":
        print("Loading generic activations from %s..." % (activations_path))
        assert (
            num_neurons_per_layer is not None
        ), "acts activations require num_neurons_per_layer"
        with open(activations_path, "rb") as activations_file:
            activations = pickle.load(activations_file)

        # Combine all layers sequentially
        print("Combining layers " + str([a[0] for a in activations]))
        activations = [a[1] for a in activations]
        num_layers = len(activations)
        num_sentences = len(activations[0])
        concatenated_activations = []
        for sentence_idx in range(num_sentences):
            sentence_acts = []
            for layer_idx in range(num_layers):
                sentence_acts.append(np.vstack(activations[layer_idx][sentence_idx]))
            concatenated_activations.append(np.concatenate(sentence_acts, axis=1))
        activations = concatenated_activations
    elif file_ext == "hdf5":
        print("Loading hdf5 activations from %s..." % (activations_path))
        representations = h5py.File(activations_path, "r")
        sentence_to_index = json.loads(representations.get("sentence_to_index")[0])
        activations = []
        # TODO: Check order
        for _, value in sentence_to_index.items():
            sentence_acts = torch.FloatTensor(representations[value])
            num_layers, sentence_length, embedding_size = (
                sentence_acts.shape[0],
                sentence_acts.shape[1],
                sentence_acts.shape[2],
            )
            num_neurons_per_layer = embedding_size
            sentence_acts = np.swapaxes(sentence_acts, 0, 1)
            sentence_acts = sentence_acts.reshape(
                sentence_length, num_layers * embedding_size
            )
            activations.append(sentence_acts.numpy())
        num_layers = len(activations[0][0]) / num_neurons_per_layer
    elif file_ext == "json":
        print("Loading json activations from %s..." % (activations_path))
        activations = []
        with open(activations_path) as fp:
            for line in fp:
                token_acts = []
                sentence_activations = json.loads(line)["features"]
                for act in sentence_activations:
                    num_neurons_per_layer = len(act["layers"][0]["values"])
                    token_acts.append(
                        np.concatenate([l["values"] for l in act["layers"]])
                    )
                activations.append(np.vstack(token_acts))

        num_layers = activations[0].shape[1] / num_neurons_per_layer
        print(len(activations), num_layers)
    else:
        assert False, "Activations must be of type t7, pt, acts, json or hdf5"

    return activations, int(num_layers)
Exemplo n.º 21
0
    print("Processing sentences - Building SID Tensor")
    num_sentences = len(sentences)
    data = np.empty((num_sentences, 2), dtype=np.int32)
    for idx, item in enumerate(sentences.items()):
        if (idx % 100000) == 0:
            print(idx, num_sentences)

        key, value = item
        start_idx, length = value
        data[idx, 0] = start_idx
        data[idx, 1] = length
    return data


assert (len(sys.argv) == 3)
path = sys.argv[1]
filename = sys.argv[2]
print("Filepath:", path)
print("Output:", filename)

word_freq = load_lua(os.path.join(path, 'word_freq.th7')).numpy()
print("Loaded Tensor")

data = build(os.path.join(path, 'train_data.th7'))
print("Build Sentence ID Tensor")

with open(filename, 'wb') as f:
    np.savez(f, item=data)
print("Saved Sentence ID Tensor")
Exemplo n.º 22
0
##################################################################
# prepare our data, we first to build a test dataset which is like
# the MIT ecg data. it is <5x1x400>

import torch
import time
import math
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.serialization import load_lua

N, T, D = 50, 5, 400  #opt.batch_size, opt.seq_length , word_dim

train_temp = load_lua('/home/lu/code/D1Train.t7')
trainset = train_temp.view(50, -1, 2000).transpose(0, 1).clone()
data_len = trainset.size()[0]

test_temp = load_lua('/home/lu/code/D1Test.t7')
testset = test_temp.view(-1, 2000)
test_len = testset.size()[0]
print(data_len, test_len)

count = 0


def timeSince(since):
    now = time.time()
    s = now - since
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)
Exemplo n.º 23
0
def convert(model,
            input_shapes,
            output_shapes=None,
            input_names=['input'],
            output_names=['output'],
            mode=None,
            image_input_names=[],
            preprocessing_args={},
            image_output_names=[],
            deprocessing_args={},
            class_labels=None,
            predicted_feature_name='classLabel',
            unknown_layer_converter_fn=None):
    """
    Convert Torch7 model to CoreML.

    Parameters
    ----------
    model: Torch7 model (loaded with PyTorch) | str
        A trained Torch7 model loaded in python using PyTorch or path to file
        with model (*.t7).

    input_shapes: list of tuples
        Shapes of the input tensors.

    mode: str ('classifier', 'regressor' or None)
        Mode of the converted coreml model:
        'classifier', a NeuralNetworkClassifier spec will be constructed.
        'regressor', a NeuralNetworkRegressor spec will be constructed.

    preprocessing_args: dict
        'is_bgr', 'red_bias', 'green_bias', 'blue_bias', 'gray_bias',
        'image_scale' keys with the same meaning as
        https://apple.github.io/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters

    deprocessing_args: dict
        Same as 'preprocessing_args' but for deprocessing.

    class_labels: A string or list of strings.
        As a string it represents the name of the file which contains
        the classification labels (one per line).
        As a list of strings it represents a list of categories that map
        the index of the output of a neural network to labels in a classifier.

    predicted_feature_name: str
        Name of the output feature for the class labels exposed in the Core ML
        model (applies to classifiers only). Defaults to 'classLabel'

    unknown_layer_converter_fn: function with signature:
        (builder, name, layer, input_names, output_names)
            builder: object - instance of NeuralNetworkBuilder class
            name: str - generated layer name
            layer: object - pytorch object for corresponding layer
            input_names: list of strings
            output_names: list of strings
            Returns: list of strings for layer output names
        Callback function to handle unknown for torch2coreml layers


    Returns
    -------
    model: A coreml model.
    """
    _gen_layer_name.called = 0
    _get_layer_converter_fn.unknown_converter_fn = unknown_layer_converter_fn

    if isinstance(model, basestring):
        torch_model = load_lua(model, unknown_classes=True)
    elif isinstance(model, torch.legacy.nn.Sequential):
        torch_model = model
    else:
        raise TypeError(
            "Model must be file path to .t7 file or pytorch loaded model \
            with torch.legacy.nn.Sequential module as root"
        )

    #torch_model.evaluate()

    if not isinstance(input_shapes, list):
        raise TypeError("Input shapes should be a list of tuples.")

    for shape in input_shapes:
        if not isinstance(shape, tuple):
            raise TypeError("Input shape should be a tuple.")

    if len(input_names) != len(input_shapes):
        raise ValueError(
            "Input names count must be equal to input shapes count"
        )

    if output_shapes == None:
        output_shapes = _infer_torch_output_shapes(
            torch_model,
            input_shapes
        )

    if len(output_shapes) != len(output_names):
        raise ValueError(
            "Model has {} outputs, but you set output_names for {}."
            .format(len(output_shapes), len(output_names))
        )

    # create input/output features
    input_features = []
    for i in range(len(input_names)):
        input_features.append(
            (input_names[i], datatypes.Array(*input_shapes[i]))
        )
    output_features = []
    for i in range(len(output_names)):
        output_features.append(
            (output_names[i], datatypes.Array(*output_shapes[i]))
        )

    builder = NeuralNetworkBuilder(input_features, output_features, mode)

    # build model
    layer_name = _gen_layer_name(torch_model)
    _output_names = output_names[:]
    if len(image_output_names) > 0:
        for i in range(len(_output_names)):
            if _output_names[i] in image_output_names:
                _output_names[i] = _gen_layer_name(_DEPROCESS_LAYER_NAME)

    model_output_names = _layers._convert_layer(
        builder, layer_name, torch_model, input_names, _output_names
    )

    # set preprocessing parameters
    if len(image_input_names) > 0:
        builder.set_pre_processing_parameters(
            image_input_names=image_input_names,
            is_bgr=preprocessing_args.get('is_bgr', False),
            red_bias=preprocessing_args.get('red_bias', 0.0),
            green_bias=preprocessing_args.get('green_bias', 0.0),
            blue_bias=preprocessing_args.get('blue_bias', 0.0),
            gray_bias=preprocessing_args.get('gray_bias', 0.0),
            image_scale=preprocessing_args.get('image_scale', 1.0)
        )

    # set deprocessing parameters
    if len(image_output_names) > 0:
        for i in range(len(output_names)):
            output_name = output_names[i]
            if output_name in image_output_names:
                output_shape = output_shapes[i]
                if len(output_shape) == 2 or output_shape[0] == 1:
                    is_grayscale = True
                elif output_shape[0] == 3:
                    is_grayscale = False
                else:
                    raise ValueError('Output must be RGB image or Grayscale')
                _set_deprocessing(
                    is_grayscale,
                    builder,
                    deprocessing_args,
                    model_output_names[i],
                    output_name
                )

    if class_labels is not None:
        if type(class_labels) is str:
            labels = [l.strip() for l in open(class_labels).readlines()]
        elif type(class_labels) is list:
            labels = class_labels
        else:
            raise TypeError(
                "synset variable of unknown type. Type found: {}. \
                Expected either string or list of strings."
                .format(type(class_labels),))

        builder.set_class_labels(
            class_labels=labels,
            predicted_feature_name=predicted_feature_name
        )

    return MLModel(builder.spec)
Exemplo n.º 24
0
# load_lua is supported in pytorch 0.4.1 but not 1.0.0
import torch
import collections
from torch.utils.serialization import load_lua

multiGPU = False
prefix = 'module.' if multiGPU else ''

model1 = load_lua('vgg_SCNN_DULR_w9.t7', unknown_classes=True)
model2 = collections.OrderedDict()

model2[prefix + 'conv1_1.weight'] = model1.modules[0].weight
model2[prefix + 'bn1_1.weight'] = model1.modules[1].weight
model2[prefix + 'bn1_1.bias'] = model1.modules[1].bias
model2[prefix + 'bn1_1.running_mean'] = model1.modules[1].running_mean
model2[prefix + 'bn1_1.running_var'] = model1.modules[1].running_var
model2[prefix + 'conv1_2.weight'] = model1.modules[3].weight
model2[prefix + 'bn1_2.weight'] = model1.modules[4].weight
model2[prefix + 'bn1_2.bias'] = model1.modules[4].bias
model2[prefix + 'bn1_2.running_mean'] = model1.modules[4].running_mean
model2[prefix + 'bn1_2.running_var'] = model1.modules[4].running_var

model2[prefix + 'conv2_1.weight'] = model1.modules[7].weight
model2[prefix + 'bn2_1.weight'] = model1.modules[8].weight
model2[prefix + 'bn2_1.bias'] = model1.modules[8].bias
model2[prefix + 'bn2_1.running_mean'] = model1.modules[8].running_mean
model2[prefix + 'bn2_1.running_var'] = model1.modules[8].running_var
model2[prefix + 'conv2_2.weight'] = model1.modules[10].weight
model2[prefix + 'bn2_2.weight'] = model1.modules[11].weight
model2[prefix + 'bn2_2.bias'] = model1.modules[11].bias
model2[prefix + 'bn2_2.running_mean'] = model1.modules[11].running_mean
Exemplo n.º 25
0
import torch
import math
import torch.nn as nn
from torch.autograd import Variable

from torch.utils.serialization import load_lua

N, T, D = 50, 5, 400  #opt.batch_size, opt.seq_length , word_dim

train_temp = load_lua('D1Train.t7')
trainset = train_temp.view(50, -1, 2000).transpose(0, 1).clone().cuda()
data_len = trainset.size()[0]

test_temp = load_lua('D1Test.t7')
testset = train_temp.view(-1, 2000).cuda()
test_len = testset.size()[0]

count = 0


def read_data():
    global count, trainset

    x = trainset[count].view(N, T, D).transpose(0, 1).clone()
    x = x.type('torch.FloatTensor')

    count += 1
    if (count == data_len):
        count = 0

    y = torch.LongTensor(50)
Exemplo n.º 26
0
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
    return model


import torchfile
from torch.utils.serialization import load_lua
import torch

netparams = torchfile.load('data/resnet152/netparams.t7')
#netparams2 = load_lua('data/resnet152/netparams.t7')
#import ipdb; ipdb.set_trace()
netoutputs = []
for i in range(1, 12):
    path = 'data/resnet152/output{}.t7'.format(i)
    out = load_lua(path)
    #print(out.size())
    if out.dim() == 4:
        pass  #out.transpose_(2, 3)
    netoutputs.append(out)

net = resnet152()
state_dict = net.state_dict()

import collections

s = collections.OrderedDict()

i = 0
for key in state_dict.keys():
    new = torch.from_numpy(netparams[i])
    ids.append(vocab('<end>'))
    return ids

for _class in sorted(os.listdir(embedding_path)):
    split = ''
    if _class in train_classes:
        split = train
    elif _class in val_classes:
        split = valid
    elif _class in test_classes:
        split = test

    data_path = os.path.join(embedding_path, _class)
    txt_path = os.path.join(text_path, _class)
    for example, txt_file in zip(sorted(glob(data_path + "/*.t7")), sorted(glob(txt_path + "/*.txt"))):
        example_data = load_lua(example)
        img_path = example_data['img']
        embeddings = example_data['txt'].numpy()
        example_name = img_path.split('/')[-1][:-4]

        f = open(txt_file, "r")
        txt = f.readlines()
        f.close()

        img_path = os.path.join(images_path, img_path)
        img = open(img_path, 'rb').read()

        txt_choice = np.random.choice(range(10), 5)

        embeddings = embeddings[txt_choice]
        
Exemplo n.º 28
0
parser.add_argument('--save',
                    type=str,
                    default='model.pt',
                    help='path to save the final model')
args = parser.parse_args()

# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)

###############################################################################
# Load data
###############################################################################

# Torch
word_freq = load_lua(os.path.join(args.data, 'word_freq.th7')).numpy()
mapto = torch.from_numpy(util.reverse(np.argsort(-word_freq))).long()
print("load word frequency mapping - complete")

ntokens = len(word_freq)
nsampled = 8192

train_corpus = FastGBWDataset(args.data, 'train_data.th7', 'train_data.sid',
                              mapto)
print("load train data - complete")

test_corpus = GBWDataset(args.data, 'test_data.th7', mapto)
print("load test data - complete")

# Streaming
'''
Exemplo n.º 29
0
 def __init__(self, model_file):
     self.net = load_lua(model_file)
Exemplo n.º 30
0
import h5py

parser = argparse.ArgumentParser(description='Predict variant chromatin effects')
parser.add_argument('inputfile', type=str, help='Input file in vcf format')
parser.add_argument('--maxshift', action="store",
                    dest="maxshift", type=int, default=800,
                    help='Maximum shift distance for computing nearby effects')
parser.add_argument('--inputsize', action="store", dest="inputsize", type=int,
                    default=2000, help="The input sequence window size for neural network")
parser.add_argument('--batchsize', action="store", dest="batchsize",
                    type=int, default=32, help="Batch size for neural network predictions.")
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()

genome = pyfasta.Fasta('./resources/hg19.fa')
model = load_lua('./resources/deepsea.beluga.2002.cpu')
model.evaluate()
if args.cuda:
    model.cuda()

CHRS = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9',
        'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17',
        'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX','chrY']


inputfile = args.inputfile
maxshift = args.maxshift
inputsize = args.inputsize
batchSize = args.batchsize
windowsize = inputsize + 100
    """
    model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
    return model

import torchfile
from torch.utils.serialization import load_lua
import torch
netparams = torchfile.load('data/resnet152/netparams.t7')
#netparams2 = load_lua('data/resnet152/netparams.t7')
#import ipdb; ipdb.set_trace()
netoutputs = []
for i in range(1, 12):
    path = 'data/resnet152/output{}.t7'.format(i)
    out = load_lua(path)
    #print(out.size())
    if out.dim()==4:
        pass#out.transpose_(2, 3)
    netoutputs.append(out)

net = resnet152()
state_dict = net.state_dict()

import collections
s = collections.OrderedDict()


i=0
for key in state_dict.keys():
    new = torch.from_numpy(netparams[i])
Exemplo n.º 32
0
def main(args):
    opt = parser.parse_args(args)
    print('parsed options:', vars(opt))

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
    if torch.cuda.is_available():
        # to prevent opencv from initializing CUDA in workers
        torch.randn(8).cuda()
        os.environ['CUDA_VISIBLE_DEVICES'] = ''

    def load_provider():
        print('Loading test data')

        p = np.load(opt.test_set)[()]

        for i, t in enumerate(['matches', 'nonmatches']):
            p[t] = p['match_data'][opt.test_matches][i]

        return p

    test_iter = get_iterator(load_provider(), opt.batch_size, opt.nthread)

    def cast(t):
        return t.cuda() if torch.cuda.is_available() else t

    f = models[opt.model]
    net = load_lua(opt.lua_model)

    if opt.model == '2ch':
        params = {}
        for j, i in enumerate([0, 3, 6]):
            params['conv%d.weight' % j] = net.get(i).weight
            params['conv%d.bias' % j] = net.get(i).bias
        params['fc.weight'] = net.get(9).weight
        params['fc.bias'] = net.get(9).bias
    elif opt.model == '2ch2stream':
        params = {}
        for j, branch in enumerate(['fovea', 'retina']):
            for k, layer in enumerate(map(net.get(0).get(j).get(1).get, [1, 4, 7, 9])):
                params['%s.conv%d.weight' % (branch, k)] = layer.weight
                params['%s.conv%d.bias' % (branch, k)] = layer.bias
        for k, layer in enumerate(map(net.get, [1, 3])):
            params['fc%d.weight' % k] = layer.weight
            params['fc%d.bias' % k] = layer.bias
    elif opt.model == 'siam' or opt.model == 'siam_l2':
        params = {}
        for k, layer in enumerate(map(net.get(0).get(0).get, [1, 4, 7])):
            params['conv%d.weight' % k] = layer.weight
            params['conv%d.bias' % k] = layer.bias
        for k, layer in enumerate(map(net.get, [1, 3])):
            params['fc%d.weight' % k] = layer.weight
            params['fc%d.bias' % k] = layer.bias
    elif opt.model == 'siam2stream' or opt.model == 'siam2stream_l2':
        params = {}
        for stream, name in zip(net.get(0).get(0).modules, ['retina', 'fovea']):
            for k, layer in enumerate(map(stream.get, [2, 5, 7, 9])):
                params['%s.conv%d.weight' % (name, k)] = layer.weight
                params['%s.conv%d.bias' % (name, k)] = layer.bias
        for k, layer in enumerate(map(net.get, [1, 3])):
            params['fc%d.weight' % k] = layer.weight
            params['fc%d.bias' % k] = layer.bias

    params = {k: Variable(cast(v)) for k, v in params.items()}

    def create_variables(sample):
        inputs = Variable(cast(sample['input'].float().view(-1, 2, 64, 64)))
        targets = Variable(cast(sample['target'].float().view(-1)))
        return inputs, targets

    test_outputs, test_targets = [], []
    for sample in tqdm(test_iter, dynamic_ncols=True):
        inputs, targets = create_variables(sample)
        y = f(inputs, params)
        test_targets.append(sample['target'].view(-1))
        test_outputs.append(y.data.cpu().view(-1))

    fpr, tpr, thresholds = metrics.roc_curve(torch.cat(test_targets).numpy(),
                                             torch.cat(test_outputs).numpy(), pos_label=1)
    fpr95 = float(interpolate.interp1d(tpr, fpr)(0.95))

    print('FPR95:', fpr95)

    return fpr95
 def __init__(self, model_file):
     self.net = load_lua(model_file)
Exemplo n.º 34
0
import argparse
import os
import torch
import torchvision.models as models
from torch.utils.serialization import load_lua
"""
Read a .t7 file written by caffemodel_to_t7.lua and convert it to a PyTorch .pth
file containing a state dict for a VGG model.
"""

parser = argparse.ArgumentParser()
parser.add_argument('--input_t7', required=True)
parser.add_argument('--model_name', required=True)
args = parser.parse_args()

t7_model = load_lua(args.input_t7)['model']

pytorch_model = getattr(models, args.model_name)()
feature_modules = list(pytorch_model.features.modules())
classifier_modules = list(pytorch_model.classifier.modules())
pytorch_modules = feature_modules + classifier_modules

next_pytorch_idx = 0
for i, t7_module in enumerate(t7_model.modules):
    if not hasattr(t7_module, 'weight'):
        continue
    assert hasattr(t7_module, 'bias')
    while not hasattr(pytorch_modules[next_pytorch_idx], 'weight'):
        next_pytorch_idx += 1
    pytorch_module = pytorch_modules[next_pytorch_idx]
    next_pytorch_idx += 1
Exemplo n.º 35
0
    copy_group_conv(block.conv_group_2, parameters[2])
    copy_bn_relu(block.bn_relu_3, parameters[3])
    copy_conv_bn(block.conv_bn_4, [parameters[4], parameters[5]])
    if skip_connection is not None:
        copy_conv_bn(skip_connection, [parameters[6], parameters[7]])


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='convert torch resnext model to chainer resnext model')
    parser.add_argument('--t7', '-t', default=str, help='t7 file')
    args = parser.parse_args().__dict__
    t7 = args['t7']
    print(args)
    print('start convering')
    trained_model = load_lua(t7, unknown_classes=True)
    components = []

    def dfs(modules):
        try:
            for module in modules.modules:
                dfs(module)
        except:
            components.append(modules)

    dfs(trained_model)

    parameters = []
    for c in components:
        for judge, extract in ((judge_bn, extract_bn),
                               (judge_linear, extract_linear), (judge_conv,
Exemplo n.º 36
0
# # Up_conv2
# py_model['model.6.weight'] = net.modules[7].weight
# py_model['model.6.bias'] = net.modules[7].bias
# # HR_conv1
# py_model['model.8.weight'] = net.modules[9].weight
# py_model['model.8.bias'] = net.modules[9].bias
# # final_conv
# py_model['model.10.weight'] = net.modules[11].weight
# py_model['model.10.bias'] = net.modules[11].bias

# assert ori_keys == len(py_model), 'the keys mismatch! Please check.'

# torch.save(py_model, 'SRResNet_noBN_torch.pth')

# SRResNet w/ BN
net = load_lua('torch_models/01_SRResNet_nnCPU.t7')
py_model = torch.load('pytorch_models/SRResNet.pth')

for k, v in py_model.items():
    print(k)
ori_keys = len(py_model)
# first conv
py_model['model.0.weight'] = net.modules[0].weight
py_model['model.0.bias'] = net.modules[0].bias
# 16 Residual Blocks
for i in range(16):
    t_bn1_w = net.modules[1].modules[0].modules[i].modules[0].modules[
        0].modules[0].weight
    t_bn1_b = net.modules[1].modules[0].modules[i].modules[0].modules[
        0].modules[0].bias
    t_bn1_rm = net.modules[1].modules[0].modules[i].modules[0].modules[
Exemplo n.º 37
0
def train_cur_data(cur_epoch, datapart, moving_file, target_file, parameter,
                   output_name, net, criterion, optimizer, registration_spec,
                   args):
    old_experiments = False
    if moving_file[-3:] == '.t7':
        old_experiments = True
        #only for old data used in the Neuroimage paper. Do not use .t7 format for new data and new experiments.
        moving_appear_trainset = load_lua(moving_file).float()
        target_appear_trainset = load_lua(target_file).float()
        train_m0 = load_lua(parameter).float()
    else:
        moving_appear_trainset = torch.load(moving_file).float()
        target_appear_trainset = torch.load(target_file).float()
        train_m0 = torch.load(parameter).float()

    input_batch = torch.zeros(args.batch_size, 2, args.patch_size,
                              args.patch_size, args.patch_size).cuda()
    output_batch = torch.zeros(args.batch_size, 3, args.patch_size,
                               args.patch_size, args.patch_size).cuda()

    dataset_size = moving_appear_trainset.size()
    flat_idx = util.calculatePatchIdx3D(dataset_size[0],
                                        args.patch_size * torch.ones(3),
                                        dataset_size[1:],
                                        args.stride * torch.ones(3))
    flat_idx_select = torch.zeros(flat_idx.size())

    for patch_idx in range(1, flat_idx.size()[0]):
        patch_pos = util.idx2pos_4D(flat_idx[patch_idx], dataset_size[1:])
        moving_patch = moving_appear_trainset[patch_pos[0],
                                              patch_pos[1]:patch_pos[1] +
                                              args.patch_size,
                                              patch_pos[2]:patch_pos[2] +
                                              args.patch_size,
                                              patch_pos[3]:patch_pos[3] +
                                              args.patch_size]
        target_patch = target_appear_trainset[patch_pos[0],
                                              patch_pos[1]:patch_pos[1] +
                                              args.patch_size,
                                              patch_pos[2]:patch_pos[2] +
                                              args.patch_size,
                                              patch_pos[3]:patch_pos[3] +
                                              args.patch_size]
        if (torch.sum(moving_patch) + torch.sum(target_patch) != 0):
            flat_idx_select[patch_idx] = 1

    flat_idx_select = flat_idx_select.byte()

    flat_idx = torch.masked_select(flat_idx, flat_idx_select)
    N = flat_idx.size()[0] / args.batch_size

    for iters in range(0, N):
        train_idx = (torch.rand(args.batch_size).double() * flat_idx.size()[0])
        train_idx = torch.floor(train_idx).long()
        for slices in range(0, args.batch_size):
            patch_pos = util.idx2pos_4D(flat_idx[train_idx[slices]],
                                        dataset_size[1:])
            input_batch[slices,
                        0] = moving_appear_trainset[patch_pos[0],
                                                    patch_pos[1]:patch_pos[1] +
                                                    args.patch_size,
                                                    patch_pos[2]:patch_pos[2] +
                                                    args.patch_size,
                                                    patch_pos[3]:patch_pos[3] +
                                                    args.patch_size].cuda()
            input_batch[slices,
                        1] = target_appear_trainset[patch_pos[0],
                                                    patch_pos[1]:patch_pos[1] +
                                                    args.patch_size,
                                                    patch_pos[2]:patch_pos[2] +
                                                    args.patch_size,
                                                    patch_pos[3]:patch_pos[3] +
                                                    args.patch_size].cuda()
            output_batch[slices] = train_m0[patch_pos[0], :,
                                            patch_pos[1]:patch_pos[1] +
                                            args.patch_size,
                                            patch_pos[2]:patch_pos[2] +
                                            args.patch_size,
                                            patch_pos[3]:patch_pos[3] +
                                            args.patch_size].cuda()

        input_batch_variable = Variable(input_batch).cuda()
        output_batch_variable = Variable(output_batch).cuda()

        optimizer.zero_grad()
        recon_batch_variable = net(input_batch_variable)
        loss = criterion(recon_batch_variable, output_batch_variable)
        loss.backward()
        loss_value = loss.data[0]
        optimizer.step()
        print(
            '====> Epoch: {}, datapart: {}, iter: {}/{}, loss: {:.4f}'.format(
                cur_epoch + 1, datapart + 1, iters, N,
                loss_value / args.batch_size))
        if iters % 100 == 0 or iters == N - 1:
            if args.n_GPU > 1:
                cur_state_dict = net.module.state_dict()
            else:
                cur_state_dict = net.state_dict()

            modal_name = output_name

            model_info = {
                'patch_size': args.patch_size,
                'network_feature': args.features,
                'state_dict': cur_state_dict,
                'deformation_params': registration_spec
            }
            if old_experiments:
                model_info['matlab_t7'] = True
            #endif
            torch.save(model_info, modal_name)
Exemplo n.º 38
0
all_networks = {}
canonical_ordering = []

for language, version in tqdm(p(languages, [1, 2, 3]),
                              desc='loading',
                              total=len(languages) * 3):
    network_name = 'en-%s-%d' % (language, version)

    canonical_ordering.append(network_name)

    # Load the description of the network
    # This will be a 4000x(sentence_length)x500 matrix.
    # Rehsape to be a (total_tokens)x500 matrix.
    all_networks[network_name] = torch.cat(
        load_lua('../descriptions/%s.desc.t7' % (network_name, )))

print(canonical_ordering)

# Create enormous data set, which will have
# a second dimension of size 500*15 = 7500
# Transfer it to CUDA.
full_set = torch.cat([all_networks[network] for network in canonical_ordering],
                     dim=1).cuda()

# Whiten the full set
full_set -= full_set.mean(0)
full_set /= full_set.std(0)

# Get covariances
covariances = torch.mm(full_set.t(), full_set) / (full_set.size()[1] - 1)
Exemplo n.º 39
0
args = parser.parse_args()

# Load all the descriptions of networks
# Get list of network filenames
with open(args.descriptions) as f:
    network_fnames = [line.strip() for line in f]

all_networks = {}

for fname in tqdm(network_fnames):
    network_name = os.path.split(fname)[1]
    network_name = network_name[:network_name.index('.')]

    # Load as 4000x(sentence_length)x500 matrix
    all_networks[network_name] = torch.cat(load_lua(fname)['encodings'])

# Whiten dimensions
if args.normalize_dimensions:
    for network in tqdm(all_networks, desc='mu, sigma'):
        all_networks[network] -= all_networks[network].mean(0)
        all_networks[network] /= all_networks[network].std(0)

# PCA to get independent components
whitening_transforms = {}
for network in tqdm(all_networks, desc='pca'):
    X = all_networks[network]
    covariance = torch.mm(X.t(), X) / (X.size()[0] - 1)

    e, v = torch.eig(covariance, eigenvectors=True)
Exemplo n.º 40
0
    p_wct.e1.load_state_dict(torch.load('pth_models/vgg_normalised_conv1.pth'))
    p_wct.d1.load_state_dict(torch.load('pth_models/feature_invertor_conv1.pth'))
    p_wct.e2.load_state_dict(torch.load('pth_models/vgg_normalised_conv2.pth'))
    p_wct.d2.load_state_dict(torch.load('pth_models/feature_invertor_conv2.pth'))
    p_wct.e3.load_state_dict(torch.load('pth_models/vgg_normalised_conv3.pth'))
    p_wct.d3.load_state_dict(torch.load('pth_models/feature_invertor_conv3.pth'))
    p_wct.e4.load_state_dict(torch.load('pth_models/vgg_normalised_conv4.pth'))
    p_wct.d4.load_state_dict(torch.load('pth_models/feature_invertor_conv4.pth'))


if __name__ == '__main__':
    if not os.path.exists('pth_models'):
        os.mkdir('pth_models')
    
    ## VGGEncoder1
    vgg1 = load_lua('models/vgg_normalised_conv1_1_mask.t7')
    e1 = VGGEncoder(1)
    weight_assign(vgg1, e1, {
        'conv0': 0,
        'conv1_1': 2,
    })
    torch.save(e1.state_dict(), 'pth_models/vgg_normalised_conv1.pth')
    
    ## VGGDecoder1
    inv1 = load_lua('models/feature_invertor_conv1_1_mask.t7')
    d1 = VGGDecoder(1)
    weight_assign(inv1, d1, {
        'conv1_1': 1,
    })
    torch.save(d1.state_dict(), 'pth_models/feature_invertor_conv1.pth')
    
Exemplo n.º 41
0
def main():
    # Hyper Parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_path',
                        default='/home/ibrahim/vsepp/',
                        help='path to datasets')
    parser.add_argument(
        '--data_name',
        default='cub',
        help='{coco,f8k,f30k,10crop}_precomp|coco|f8k|f30k|cub')
    parser.add_argument('--vocab_path',
                        default='./vocab/',
                        help='Path to saved vocabulary pickle files.')
    parser.add_argument('--margin',
                        default=0.2,
                        type=float,
                        help='Rank loss margin.')
    parser.add_argument('--num_epochs',
                        default=30,
                        type=int,
                        help='Number of training epochs.')
    parser.add_argument('--batch_size',
                        default=60,
                        type=int,
                        help='Size of a training mini-batch.')
    parser.add_argument('--word_dim',
                        default=300,
                        type=int,
                        help='Dimensionality of the word embedding.')
    parser.add_argument('--embed_size',
                        default=1024,
                        type=int,
                        help='Dimensionality of the joint embedding.')
    parser.add_argument('--grad_clip',
                        default=2.,
                        type=float,
                        help='Gradient clipping threshold.')
    parser.add_argument('--crop_size',
                        default=224,
                        type=int,
                        help='Size of an image crop as the CNN input.')
    parser.add_argument('--num_layers',
                        default=1,
                        type=int,
                        help='Number of GRU layers.')
    parser.add_argument('--learning_rate',
                        default=.0002,
                        type=float,
                        help='Initial learning rate.')
    parser.add_argument('--lr_update',
                        default=15,
                        type=int,
                        help='Number of epochs to update the learning rate.')
    parser.add_argument('--workers',
                        default=10,
                        type=int,
                        help='Number of data loader workers.')
    parser.add_argument('--log_step',
                        default=10,
                        type=int,
                        help='Number of steps to print and record the log.')
    parser.add_argument('--val_step',
                        default=500,
                        type=int,
                        help='Number of steps to run validation.')
    parser.add_argument('--logger_name',
                        default='runs/runX',
                        help='Path to save the model and Tensorboard log.')
    parser.add_argument('--resume',
                        default='',
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('--max_violation',
                        action='store_true',
                        help='Use max instead of sum in the rank loss.')
    parser.add_argument('--img_dim',
                        default=4096,
                        type=int,
                        help='Dimensionality of the image embedding.')
    parser.add_argument('--finetune',
                        action='store_true',
                        help='Fine-tune the image encoder.')
    parser.add_argument('--cnn_type',
                        default='vgg19',
                        help="""The CNN used for image encoder
                        (e.g. vgg19, resnet152)""")
    parser.add_argument('--use_restval',
                        action='store_true',
                        help='Use the restval data for training on MSCOCO.')
    parser.add_argument('--measure',
                        default='cosine',
                        help='Similarity measure used (cosine|order)')
    parser.add_argument('--use_abs',
                        action='store_true',
                        help='Take the absolute value of embedding vectors.')
    parser.add_argument('--no_imgnorm',
                        action='store_true',
                        help='Do not normalize the image embeddings.')
    parser.add_argument('--reset_train',
                        action='store_true',
                        help='Ensure the training is always done in '
                        'train mode (Not recommended).')
    opt = parser.parse_args()
    print(opt)

    logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
    tb_logger.configure(opt.logger_name, flush_secs=5)

    # Load Vocabulary Wrapper
    from torch.utils.serialization import load_lua
    vocab = load_lua('vocab_c10.t7')
    #    vocab = pickle.load(open(os.path.join(
    #        opt.vocab_path, '%s_vocab.pkl' % opt.data_name), 'rb'))
    opt.vocab_size = len(vocab)

    # Load data loaders
    train_loader, val_loader = data.get_loaders(opt.data_name, vocab,
                                                opt.crop_size, opt.batch_size,
                                                opt.workers, opt)

    # Construct the model
    model = VSE(opt)

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            start_epoch = checkpoint['epoch']
            best_rsum = checkpoint['best_rsum']
            model.load_state_dict(checkpoint['model'])
            # Eiters is used to show logs as the continuation of another
            # training
            model.Eiters = checkpoint['Eiters']
            print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})".format(
                opt.resume, start_epoch, best_rsum))
            validate(opt, val_loader, model)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # Train the Model
    best_rsum = 0
    for epoch in range(opt.num_epochs):
        adjust_learning_rate(opt, model.optimizer, epoch)

        # train for one epoch
        train(opt, train_loader, model, epoch, val_loader)

        # evaluate on validation set
        rsum = validate(opt, val_loader, model)

        # remember best R@ sum and save checkpoint
        is_best = rsum > best_rsum
        best_rsum = max(rsum, best_rsum)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': model.state_dict(),
                'best_rsum': best_rsum,
                'opt': opt,
                'Eiters': model.Eiters,
            },
            is_best,
            prefix=opt.logger_name + '/')