예제 #1
0
    def load_model(lm_folder_path, device):
        word_map = torchfile.load(os.path.join(lm_folder_path, "word_map.th7"))
        word_map = [w.decode("utf-8") for w in word_map]
        word_to_idx = {w: i for i, w in enumerate(word_map)}
        word_freq = torchfile.load(
            os.path.join(os.path.join(lm_folder_path, "word_freq.th7")))
        mapto = torch.from_numpy(util_reverse(
            np.argsort(-word_freq))).long().to(device)

        model_file = open(os.path.join(lm_folder_path, "lm-state-dict.pt"),
                          "rb")

        model = RNNModel(
            "GRU",
            793471,
            256,
            2048,
            1,
            [4200, 35000, 180000, 793471],
            dropout=0.01,
            proj=True,
            lm1b=True,
        )

        model.load_state_dict(torch.load(model_file, map_location=device))
        model.full = True  # Use real softmax--important!
        model.to(device)
        model.eval()
        model_file.close()
        return QueryHandler(model, word_to_idx, mapto, device)
def run_model(model_file_path='./run_model_pascal.lua', q_ref='Queries'):
    if ('pascal' in model_file_path):
        db = 'rPascal'
    else:
        db = 'rImageNet'
    images_folder = os.path.join('../../data/Databases/', db, q_ref)
    images = sorted(os.listdir(images_folder))
    # Path for storing intermediate torchfile output
    dest_folder_temp = './encoded_feats/'
    # Path for storing numpy converted output
    dest_folder = os.path.join('./encoded_feats/', db, 'full', q_ref)

    # 4--> rPascal | 5--> rImagenet
    if (db == 'rPascal'):
        clip = 4
    elif (db == 'rImageNet'):
        clip = 5

    for i, image in enumerate(images):
        print(i)
        os.system(
            os.path.join(
                'th ' + model_file_path + ' -input_image ' + images_folder,
                image))
        inds = t.load(os.path.join(dest_folder_temp, 'temp_inds.t7'))
        inds = inds['inds']
        feats = t.load(os.path.join(dest_folder_temp, 'temp_feats.t7'))
        feats = feats['feats']
        np.save(
            os.path.join(dest_folder, image[:len(image) - clip] + '_feats'),
            feats)
        np.save(os.path.join(dest_folder, image[:len(image) - clip] + '_inds'),
                inds)
예제 #3
0
def read_ip_ip_grad(ip_path, ip_grad_path):
    input = trf.load(ip_path)
    input = torch.from_numpy(input)
    print("### ip_grad", ip_grad_path)
    ip_grad = torch.from_numpy(trf.load(ip_grad_path))

    return input.view(input.size()[0], -1), ip_grad
예제 #4
0
파일: util.py 프로젝트: czczup/URST
    def __init__(self,args):
        super(WCT, self).__init__()
        # load pre-trained network
        vgg1 = torchfile.load(args.vgg1)
        decoder1_torch = torchfile.load(args.decoder1)
        vgg2 = torchfile.load(args.vgg2)
        decoder2_torch = torchfile.load(args.decoder2)
        vgg3 = torchfile.load(args.vgg3)
        decoder3_torch = torchfile.load(args.decoder3)
        vgg4 = torchfile.load(args.vgg4)
        decoder4_torch = torchfile.load(args.decoder4)
        vgg5 = torchfile.load(args.vgg5)
        decoder5_torch = torchfile.load(args.decoder5)

        
        self.e1 = encoder1(vgg1)
        self.d1 = decoder1(decoder1_torch)
        self.e2 = encoder2(vgg2)
        self.d2 = decoder2(decoder2_torch)
        self.e3 = encoder3(vgg3)
        self.d3 = decoder3(decoder3_torch)
        self.e4 = encoder4(vgg4)
        self.d4 = decoder4(decoder4_torch)
        self.e5 = encoder5(vgg5)
        self.d5 = decoder5(decoder5_torch)
        
        self.wct1 = ThumbWhitenColorTransform()
        self.wct2 = ThumbWhitenColorTransform()
        self.wct3 = ThumbWhitenColorTransform()
        self.wct4 = ThumbWhitenColorTransform()
        self.wct5 = ThumbWhitenColorTransform()
예제 #5
0
def main():
    torch.manual_seed(args.seed)
    data = torchfile.load(os.path.join(args.data_path, 'train.t7'))
    vocab = torchfile.load(os.path.join(args.data_path, 'vocab.t7'))

    split = int(0.9 * len(data))
    train_loader = torch.utils.data.DataLoader(WordsDataset(
        data[:split], vocab, args.seq),
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(WordsDataset(
        data[split:], vocab, args.seq),
                                              batch_size=args.batch_size,
                                              shuffle=True)
    num_classes = max(vocab.values()) + 1

    model = LanguageModel(num_classes, args.emsize, 256, batch_first=True)
    if args.retrain:
        model = torch.load('{}.checkpoint'.format(args.save))

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    criterion = nn.CrossEntropyLoss()
    train_writer = SummaryWriter(log_dir=os.path.join(args.log_dir, 'train'))
    test_writer = SummaryWriter(log_dir=os.path.join(args.log_dir, 'test'))

    steps = 100
    total_steps = len(train_loader) // steps
    for epoch in range(1, args.epochs + 1):
        for step in range(total_steps):
            train(model, criterion, optimizer, train_loader, train_writer,
                  step, total_steps)
            test(model, criterion, test_loader, test_writer, step, total_steps)
            torch.save(model, '{}.checkpoint'.format(args.save))
예제 #6
0
    def __init__(self, args):
        super(WCT, self).__init__()
        # load pre-trained network
        vgg1 = load(
            args.vgg1,
            force_8bytes_long=True)  # windows下需要参数:force_8bytes_long=True
        decoder1_torch = load(args.decoder1, force_8bytes_long=True)
        vgg2 = load(args.vgg2, force_8bytes_long=True)
        decoder2_torch = load(args.decoder2, force_8bytes_long=True)
        vgg3 = load(args.vgg3, force_8bytes_long=True)
        decoder3_torch = load(args.decoder3, force_8bytes_long=True)
        vgg4 = load(args.vgg4, force_8bytes_long=True)
        decoder4_torch = load(args.decoder4, force_8bytes_long=True)
        vgg5 = load(args.vgg5, force_8bytes_long=True)
        decoder5_torch = load(args.decoder5, force_8bytes_long=True)

        self.e1 = encoder1(vgg1)
        self.d1 = decoder1(decoder1_torch)
        self.e2 = encoder2(vgg2)
        self.d2 = decoder2(decoder2_torch)
        self.e3 = encoder3(vgg3)
        self.d3 = decoder3(decoder3_torch)
        self.e4 = encoder4(vgg4)
        self.d4 = decoder4(decoder4_torch)
        self.e5 = encoder5(vgg5)
        self.d5 = decoder5(decoder5_torch)
예제 #7
0
def makeModel(pathToFile):
    file = open(pathToFile, 'r')
    fileData = ""
    for chunk in file:
        fileData += chunk
    fileData = fileData.split('\n')
    while (fileData[-1] == ''):
        fileData = fileData[:-1]
    file.close()
    pathToWeight = fileData[-2]
    pathToBias = fileData[-1]
    numberOfLayers = fileData[0]
    fileData = fileData[1:-2]

    model = Model()

    for element in fileData:
        currentData = element.split()
        if currentData[0] == "linear":
            model.addLayer(Linear(int(currentData[1]), int(currentData[2])))
        elif currentData[0] == "relu":
            model.addLayer(ReLU())
        # elif currentData[0] == "batchnorm":
        # 	model.addLayer(BatchNorm(int(currentData[1])))

    weightData = tf.load(pathToWeight)
    biasData = tf.load(pathToBias)
    index = 0
    for layer in model.Layers:
        if layer.isTrainable:
            layer.weight = torch.from_numpy(weightData[index]).t()
            layer.bias = torch.from_numpy(biasData[index])
            index += 1

    return model
예제 #8
0
def get_bookmarks(split='train'):
    """Load the bookmarks dataset"""
    assert split in ['train', 'test']
    feature_dim = 2150
    label_dim = 208

    features = np.zeros((0, feature_dim))
    labels = np.zeros((0, label_dim))

    if split == "train":
        # Load train data
        for nr in range(1, 6):
            data = torchfile.load("%s/icml_mlc_data/data/bookmarks/bookmarks-train-%d.torch" % (dir_path,nr))
            labels = np.concatenate((labels, data['labels']), axis=0)
            features = np.concatenate((features, data['data'][:, 0:feature_dim]), axis=0)

        # Load dev data
        data = torchfile.load("%s/icml_mlc_data/data/bookmarks/bookmarks-dev.torch" % dir_path)
        labels = np.concatenate((labels, data['labels']), axis=0)
        features = np.concatenate((features, data['data'][:, 0:feature_dim]), axis=0)
    else:
        # Load train data
        for nr in range(1, 4):
            data = torchfile.load("%s/icml_mlc_data/data/bookmarks/bookmarks-test-%d.torch" % (dir_path, nr))
            labels = np.concatenate((labels, data['labels']), axis=0)
            features = np.concatenate((features, data['data'][:, 0:feature_dim]), axis=0)

    return labels, features, None
예제 #9
0
def process_data(path_data, path_labels):
    global data, labels, sz
    # path_labels = "./data/labels.bin"
    # path_data = "./data/data.bin"

    # loading as numpy array
    l = trf.load(path_labels)
    d = trf.load(path_data)

    # converting numpy arrays into tensors
    _l = torch.from_numpy(l)
    _d = torch.from_numpy(d)
    _data = _d.contiguous().view(_d.size()[0], -1).type(torch.DoubleTensor)
    _labels = _l.type(torch.DoubleTensor)

    data = _data[:]
    labels = _labels[:]

    sz = data.size()[0]

    # normalizing the data
    mean = data.mean(dim=0)
    std = data.std(dim=0, keepdim=True)
    data = (data - mean) / std

    return mean, std
def transform(dataset_dir, image_dir, text_dir, new_image_dir, new_text_dir):

    if not os.path.exists(os.path.join(dataset_dir, new_image_dir)):
        os.makedirs(os.path.join(dataset_dir, new_image_dir))
    if not os.path.exists(os.path.join(dataset_dir, new_text_dir)):
        os.makedirs(os.path.join(dataset_dir, new_text_dir))

    for ofn in os.listdir(os.path.join(dataset_dir, image_dir)):
        # note that names in image dir and text dir as supposed to be the same

        # class name
        clas = os.path.splitext(ofn)[0]

        # old t7 image file
        t7fn = os.path.join(dataset_dir, image_dir, ofn)

        # new h5 image file
        h5fn = os.path.join(dataset_dir, new_image_dir, clas + '.h5')

        t7_np = torchfile.load(t7fn)
        with h5py.File(h5fn, 'w') as h5fp:
            for i in range(t7_np.shape[0]):
                h5fp[str(i)] = t7_np[i]

        # old t7 text file
        t7fn = os.path.join(dataset_dir, text_dir, ofn)
        # new h5 text file
        h5fn = os.path.join(dataset_dir, new_text_dir, clas + '.h5')

        t7_np = torchfile.load(t7fn)
        with h5py.File(h5fn, 'w') as h5fp:
            for i in range(t7_np.shape[0]):
                h5fp[str(i)] = t7_np[i]
예제 #11
0
def create_model(make_model_file_path):
    _f = open(make_model_file_path, 'r')
    _fData = ''.join(line for line in _f).split('\n')
    while (_fData[-1] == ''):
        _fData = _fData[:-1]
    _f.close()

    weight_p = _fData[-2]
    bias_p = _fData[-1]
    _fData = _fData[1:-2]
    model = Model()

    for data in _fData:
        test_data = data.split()
        no_ips = int(test_data[1])
        no_ops = int(test_data[2])
        {
            "linear": model.addLayer(Linear(no_ips, no_ops)),
            "relu": model.addLayer(ReLU())
        }[test_data[0]]

    i = 0
    layers = model.Layers
    for l in layers:
        status = l.canTrain
        if status:
            l.weight = torch.from_numpy(trf.load(weight_p)[i]).t()
            l.bias = torch.from_numpy(trf.load(bias_p)[i])
            i += 1

    return model
def getModel(config_file):
    with open(config_file,'r') as f:
        config = f.readlines()
    weights = torchfile.load(config[-2].strip())
    biases = torchfile.load(config[-1].strip())

    model = Model()
    il = 0
    for desc in config[1:-2]:
        desc = desc.split()
        if desc[0] == 'linear':
            in_features, out_features = int(desc[1]), int(desc[2])
            layer = Linear(in_features, out_features)
            layer.W = torch.Tensor(weights[il])
            layer.B = torch.Tensor(biases[il]).view(out_features, 1)
            il += 1
        elif desc[0] == 'relu':
            layer = ReLU()
        elif desc[0] == 'dropout':
            layer = Dropout(float(desc[1]), isTrain=False)
        else:
            print(desc[0] + ' layer not implemented!')
        model.addLayer(layer)

    return model
예제 #13
0
파일: MNIST.py 프로젝트: szokejokepu/argo
    def load_binary_det_mnist(data_dir, train_test_ratio=None, pm=True):
        """ load the mnist from torchfiles in the path labels are not available
         """
        assert (not train_test_ratio), "not implemented yet"
        import torchfile

        mnist_data_path = data_dir
        if not os.path.exists(mnist_data_path):
            os.mkdir(mnist_data_path)
        # dataset from:
        # train: http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_train.amat
        # validation: http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_valid.amat
        # test: http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_test.amat
        train_set_x = torchfile.load(mnist_data_path + "/train.t7")
        validation_set_x = torchfile.load(mnist_data_path + "/valid.t7")
        test_set_x = torchfile.load(mnist_data_path + "/test.t7")
        # no labels available here
        print('[WARNING] - This dataset has no lables')
        if pm:
            train_set_x = train_set_x.astype(np.float32) * 2 - 1
            validation_set_x = validation_set_x.astype(np.float32) * 2 - 1
            test_set_x = test_set_x.astype(np.float32) * 2 - 1
        else:
            train_set_x = train_set_x.astype(np.float32)
            validation_set_x = validation_set_x.astype(np.float32)
            test_set_x = test_set_x.astype(np.float32)
        return train_set_x, None, validation_set_x, None, test_set_x, None
예제 #14
0
    def __init__(self, args):
        super(WCT, self).__init__()
        # 加载预训练的vgg:
        vgg1 = load(args.vgg1, force_8bytes_long=True)
        # vgg1 = vgg1.eval()
        decoder1_torch = load(args.decoder1, force_8bytes_long=True)
        vgg2 = load(args.vgg2, force_8bytes_long=True)
        decoder2_torch = load(args.decoder2, force_8bytes_long=True)
        vgg3 = load(args.vgg3, force_8bytes_long=True)
        decoder3_torch = load(args.decoder3, force_8bytes_long=True)
        vgg4 = load(args.vgg4, force_8bytes_long=True)
        decoder4_torch = load(args.decoder4, force_8bytes_long=True)
        vgg5 = load(args.vgg5, force_8bytes_long=True)
        decoder5_torch = load(args.decoder5, force_8bytes_long=True)

        self.e1 = encoder1(vgg1)
        self.d1 = decoder1(decoder1_torch)
        self.e2 = encoder2(vgg2)
        self.d2 = decoder2(decoder2_torch)
        self.e3 = encoder3(vgg3)
        self.d3 = decoder3(decoder3_torch)
        self.e4 = encoder4(vgg4)
        self.d4 = decoder4(decoder4_torch)
        self.e5 = encoder5(vgg5)
        self.d5 = decoder5(decoder5_torch)
예제 #15
0
def _load_bookmarks_data(train_data=True):
    """
        Load (pre-split) bookmarks dataset, no examples with all positive/negative labels (checked)
    """
    features = np.zeros((0, _bookmarks_nFeatures))
    labels = np.zeros((0, _bookmarks_nLabels))

    if train_data is True:
        # load train data
        for k in range(1, 6):
            data_dict = torchfile.load(os.path.join(data_dir, 'bookmarks/bookmarks-train-%d.torch' % k))
            features = np.concatenate([features, data_dict[b'data'][:, 0:_bookmarks_nFeatures]], axis=0)
            labels = np.concatenate([labels, data_dict[b'labels']], axis=0)

        # load dev data
        data_dict = torchfile.load(os.path.join(data_dir, 'bookmarks/bookmarks-dev.torch'))
        features = np.concatenate([features, data_dict[b'data'][:, 0:_bookmarks_nFeatures]], axis=0)
        labels = np.concatenate([labels, data_dict[b'labels']], axis=0)

    else:
        # load test data
        for k in range(1, 4):
            data_dict = torchfile.load(os.path.join(data_dir, 'bookmarks/bookmarks-test-%d.torch' % k))
            features = np.concatenate([features, data_dict[b'data'][:, 0:_bookmarks_nFeatures]], axis=0)
            labels = np.concatenate([labels, data_dict[b'labels']], axis=0)
    return features, labels.astype(np.bool)
예제 #16
0
def read_ip_ip_grad(ip_path, ip_grad_path):
    ip = trf.load(ip_path)
    ip_grad = trf.load(ip_grad_path)

    ip = torch.from_numpy(ip)
    ip_grad = torch.from_numpy(ip_grad)

    return ip, ip_grad
예제 #17
0
    def __getitem__(self, idx):
        class_name = self.class_list[idx]
        image_path = os.path.join(self.image_dir, class_name)
        image = torchfile.load(image_path)
        text_path = os.path.join(self.text_dir, class_name)
        text = torchfile.load(text_path)

        return {'image': image, 'text': text}
예제 #18
0
def readCommand(argv):
    "Processes the command used to run from the command line."
    from optparse import OptionParser
    parser = OptionParser(USAGE_STRING)

    parser.add_option(
        '-i',
        '--input_bin',
        help=default('The type of classifier'),
        default='CS_763_Deep_Learning_HW/input_criterion_sample_1.bin')
    parser.add_option('-t',
                      '--target_bin',
                      help=default('The size of the training set'),
                      default='CS_763_Deep_Learning_HW/target_sample_1.bin',
                      type="string")
    parser.add_option(
        '-g',
        '--ig',
        help=default('Whether to use enhanced features'),
        default='CS_763_Deep_Learning_HW/gradCriterionInput_sample_1.bin',
        type="string")
    # parser.add_option('-a', '--autotune', help=default("Whether to automatically tune hyperparameters"), default=False, action="store_true")
    # parser.add_option('-i', '--iterations', help=default("Maximum iterations to run training"), default=3, type="int")
    # parser.add_option('-s', '--test', help=default("Amount of test data to use"), default=TEST_SET_SIZE, type="int")
    # parser.add_option('-v', '--validate', help=default("Whether to validate when training (for graphs)"), default=False, action="store_true")
    # parser.add_option('-d', '--dataset', help=default("Specifies the data set to use"), choices=['d1', 'd2'], default='d1')
    # parser.add_option('-k', '--classes', help=default("Specifies the number of classes"), default=10, type="int")

    options, otherjunk = parser.parse_args(argv)
    if len(otherjunk) != 0:
        raise Exception('Command line input not understood: ' + str(otherjunk))
    args = {}

    input_bin_path = options.input_bin
    target_bin_path = options.target_bin
    grad_input_bin_path = options.ig

    input_bin = torch.tensor(torchfile.load(input_bin_path)).double()
    target_bin = torch.tensor(torchfile.load(target_bin_path)).double()
    grad_input_bin = torch.tensor(torchfile.load(grad_input_bin_path)).double()

    size = target_bin.shape[0]
    # print size
    for j in range(size):
        target_bin[j] -= 1

    # print target_bin
    # grad_Input_bin = options.grad_Input_bin
    # print grad_input_bin
    # loss = Criterion.forward(input_bin, target_bin)
    # gradLoss = Criterion.backward(input_bin, target_bin)
    criterion = Criterion.Criterion()
    loss = criterion.forward(input_bin, target_bin)
    print "Loss is -----", loss
    # print loss
    # print input_bin.shape
    # print gradLoss
    gradInput = criterion.backward(input_bin, target_bin)
예제 #19
0
def process_entity():
    ent_freq = {}
    with open(freq_path, 'r') as f:
        count = 0
        for line in f:
            eid, name, freq = line.strip().split('\t')
            name = name.replace('"', '%22').replace(' ', '_')
            ent_freq[name] = freq
            count += 1
            if count % 1000 == 0:
                print(count, end='\r')

    # embeddings
    print('load embeddings from', embs_path)
    embs = torchfile.load(embs_path)

    print('save embeddings to file entity_embeddings')
    np.save('entity_embeddings', embs)

    # dictionary
    print('load name_id_map from', name_id_map_path)
    wikiid2name = torchfile.load(name_id_map_path)[b'ent_wikiid2name']

    print('load thid_to_wikiid from', thid_to_wikiid_path)
    thid2wikiid = torchfile.load(
        thid_to_wikiid_path)[b'reltd_ents_rltdid_to_wikiid']

    name2id = {}
    id2name = []
    thid = 0

    with open('dict.entity', 'w') as f:
        for wikiid in thid2wikiid:
            try:
                name = wikiid2name[wikiid].decode('utf-8').replace(
                    '"', '%22').replace(' ', '_')
            except:
                name = 'UNK-wikiid-' + str(wikiid)
                # print(wikiid)
            f.write('en.wikipedia.org/wiki/' + name + '\t' +
                    ent_freq.get(name, '100') + '\n')

            name2id[name] = thid
            id2name.append(name)
            thid += 1

    # sanity check
    targets = {'Bill_Clinton', 'Vietnam', 'Edinburgh'}
    for target in targets:
        print('---', target)
        emb = embs[name2id[target], :].reshape(-1, 1)
        scores = np.matmul(embs, emb).reshape(-1)

        for i in range(10):
            maxid = np.argmax(scores)
            scores[maxid] = -1e10
            print(id2name[maxid])
예제 #20
0
def readCommand(argv):
    "Processes the command used to run from the command line."
    from optparse import OptionParser
    parser = OptionParser(USAGE_STRING)

    parser.add_option('-m', '--modelName', help=default('modelConfig'))
    parser.add_option('-d',
                      '--data',
                      help=default('input'),
                      default='data.bin',
                      type="string")
    parser.add_option('-t',
                      '--target',
                      help=default('gradoutput'),
                      default='labels.bin',
                      type="string")

    options, otherjunk = parser.parse_args(argv)
    if len(otherjunk) != 0:
        raise Exception('Command line input not understood: ' + str(otherjunk))
    args = {}

    model_name = options.modelName
    training_data_path = options.data
    target_labels_path = options.target

    Data = torchfile.load(training_data_path)
    Labels = torchfile.load(target_labels_path)

    Data = torch.tensor(normalize(Data)).double()
    Data = Data.reshape(Data.shape[0], 108 * 108)
    Labels = torch.tensor(Labels).long()

    # trainingData = Data[0:int(Data.shape[0]*0.9),:]
    # trainingLabels = Labels[0:int(Data.shape[0]*0.9)]

    my_model = Model.Model()
    my_model.addLayer(Linear(108 * 108, 1024))
    my_model.addLayer(ReLu())
    my_model.addLayer(Linear(1024, 256))
    my_model.addLayer(ReLu())
    my_model.addLayer(Linear(256, 6))
    my_model.addLayer(ReLu())

    train_and_test(my_model, Data, Labels, 1, 432, 0.01, 0.001)
    try:
        os.mkdir(model_name)
    except:
        pass
    weights = []
    weights.append(my_model.layers[0].W)
    weights.append(my_model.layers[0].B)
    weights.append(my_model.layers[2].W)
    weights.append(my_model.layers[2].B)
    weights.append(my_model.layers[4].W)
    weights.append(my_model.layers[4].B)
    torch.save(weights, model_name + "/model.bin")
예제 #21
0
 def load(self, key, load_id):
     module = getattr(self, key)
     loadpath = os.path.join(LOAD_DIR, load_id)
     for i,m in enumerate(module):
         if type(m)==nn.Conv2d:
             weight_path = loadpath + '_' + str(i+1) + 'weight.t7'
             bias_path = loadpath + '_' + str(i+1) + 'bias.t7'
             m.weight.data.copy_(torchfile.load(weight_path))
             m.bias.data.copy_(torchfile.load(bias_path))
예제 #22
0
def main(args):
    criterion = Criterion()
    input = torch.Tensor(torchfile.load(args.i))
    target = torch.Tensor(torchfile.load(
        args.t)) - 1  # because python uses 0-indexing

    loss = criterion(input, target)
    print('Average cross-entropy loss = {}'.format(loss))

    gradInput = criterion.backward(input, target)
    torch.save(gradInput.numpy(), args.ig)
예제 #23
0
 def load_annos(self):
     train_annos = torchfile.load(
         os.path.join(self.root, 'anno', 'train.dat'))
     val_annos = torchfile.load(os.path.join(self.root, 'anno', 'val.dat'))
     train_val_annos = {**train_annos, **val_annos}
     annos = {}
     for name, kp in train_val_annos.items():
         name = name.decode()
         annos[name] = {}
         for idx, loc in kp.items():
             annos[name][int(idx.decode())] = tuple(loc)
     return annos
예제 #24
0
def load_annos():
    train_annos = torchfile.load('./anno/train.dat')
    val_annos = torchfile.load('./anno/val.dat')
    train_val_annos = {**train_annos, **val_annos}
    annos = {}
    for name, kp in train_val_annos.items():
        name = name.decode()
        annos[name] = {}
        for idx, loc in kp.items():
            annos[name][int(idx.decode())] = tuple(loc)

    return annos
예제 #25
0
def load_model(device):
  word_map = torchfile.load(os.path.join(PARAM_DIR, 'word_map.th7'))
  word_map = [w.decode('utf-8') for w in word_map]
  word_to_idx = {w: i for i, w in enumerate(word_map)}
  word_freq = torchfile.load(os.path.join(os.path.join(PARAM_DIR, 'word_freq.th7')))
  mapto = torch.from_numpy(util.reverse(np.argsort(-word_freq))).long().to(device)

  with open(os.path.join(PARAM_DIR, 'lm.pt'), 'rb') as model_file:
    model = torch.load(model_file)
  model.full = True  # Use real softmax--important!
  model.to(device)
  model.eval()
  return QueryHandler(model, word_to_idx, mapto, device)
예제 #26
0
            def __init__(self, intLevel):
                super(Basic, self).__init__()

                self.moduleBasic = torch.nn.Sequential(
                    torch.nn.Conv2d(in_channels=8,
                                    out_channels=32,
                                    kernel_size=7,
                                    stride=1,
                                    padding=3), torch.nn.ReLU(inplace=False),
                    torch.nn.Conv2d(in_channels=32,
                                    out_channels=64,
                                    kernel_size=7,
                                    stride=1,
                                    padding=3), torch.nn.ReLU(inplace=False),
                    torch.nn.Conv2d(in_channels=64,
                                    out_channels=32,
                                    kernel_size=7,
                                    stride=1,
                                    padding=3), torch.nn.ReLU(inplace=False),
                    torch.nn.Conv2d(in_channels=32,
                                    out_channels=16,
                                    kernel_size=7,
                                    stride=1,
                                    padding=3), torch.nn.ReLU(inplace=False),
                    torch.nn.Conv2d(in_channels=16,
                                    out_channels=2,
                                    kernel_size=7,
                                    stride=1,
                                    padding=3))

                if intLevel == 5:
                    if arguments_strModel == '3' or arguments_strModel == '4':
                        intLevel = 4  # the models trained on the flying chairs dataset do not come with weights for the sixth layer
                    # end
                # end

                for intConv in range(5):
                    self.moduleBasic[intConv * 2].weight.data.copy_(
                        torch.Tensor(
                            torchfile.load('./models/spynet_models/modelL' +
                                           str(intLevel + 1) + '_' +
                                           arguments_strModel + '-' +
                                           str(intConv + 1) +
                                           '-weight.t7')))  # my changed
                    self.moduleBasic[intConv * 2].bias.data.copy_(
                        torch.Tensor(
                            torchfile.load('./models/spynet_models/modelL' +
                                           str(intLevel + 1) + '_' +
                                           arguments_strModel + '-' +
                                           str(intConv + 1) +
                                           '-bias.t7')))  # my changed
예제 #27
0
def get_CIFAR10_dataset(train_root_location, test_root_location):
    """
    Function to obtain a tensor-ized version for the CIFAR10 dataset
    Args:
        train_root_location : dictionary of the form for the training data
                                    {'inputs': <input_root>, 'outputs': <output_root>}
        test_root_location  : dictionary of the form for the testing data
                                    {'inputs': <input_root>, 'outputs': <output_root>}
    """
    tr_input = load(train_root_location['inputs']).astype(np.float32)
    tr_output = load(train_root_location['outputs']).astype(int)
    te_input = load(test_root_location['inputs']).astype(np.float32)
    te_output = load(test_root_location['outputs']).astype(int)
    return (tr_input, tr_output - 1), (te_input, te_output - 1)
예제 #28
0
    def next_batch(self, batch_size):
        # if stage = train, one image is paired with one text description
        # otherwise we have batch_size of 1 with 5 ratio
        image_list = []  # size [batch_size, 224, 224, 3]
        real_text_req_list = []  # size [batch_size, 1024]
        fake_text_req_list = []  # size [batch_size, 1024]

        for i_data in range(batch_size):

            # the real text
            tf_path = os.path.join(self.dataset_dir, 'raw_data',
                                   self.file_list_in_use[self.data_id])
            t7_loader = torchfile.load(tf_path)

            image_path = os.path.join(self.dataset_dir, t7_loader['img'])
            image_list.append(load_image(image_path, img_size=64))

            # the mismatched text representation
            fake_id = np.mod(
                self.data_id + random.randint(1, self.dataset_size - 1),
                self.dataset_size)
            tf_path = os.path.join(self.dataset_dir, 'raw_data',
                                   self.file_list_in_use[fake_id])
            fake_t7_loader = torchfile.load(tf_path)
            if self.stage == 'train':
                # randomly choose one real text_rep and one fake text_rep
                ran_int = random.randint(0, len(t7_loader['word'][0]) - 1)
                real_text_req_list.append(t7_loader['txt'][ran_int, :])

                # fake text rep
                ran_int = random.randint(0, len(fake_t7_loader['word'][0]) - 1)
                fake_text_req_list.append(fake_t7_loader['txt'][ran_int, :])
            else:
                assert False, logger.error('Not implemented')

            # increment on the data id
            self.data_id += 1
            if self.data_id >= self.dataset_size:
                if self.stage == 'train':
                    self.data_id = 0
                    random.shuffle(self.file_list_in_use)
                else:  # all the test data is loaded
                    break

        logger.info('[LOAD BATCH] New batch, loading to id {}'.format(
            self.data_id))

        return np.array(image_list), np.array(real_text_req_list), \
            np.array(fake_text_req_list)
예제 #29
0
def getTest_Img_proto_labels(img_path, desc_path, att_path):
    all_files = glob.glob(img_path + '/**.t7')
    print('Number of files in directory: {}'.format(len(all_files)))
    with open(att_path, 'rb') as handle:
        att_dict = pickle.load(handle)

    for i, file in enumerate(all_files):
        class_name = os.path.basename(file)
        c_name = class_name[:3]
        #Load image, desc and attribute files
        temp_img = torchfile.load(file, force_8bytes_long=True)
        desc_file = os.path.join(desc_path, class_name)
        temp_desc = torchfile.load(desc_file, force_8bytes_long=True)
        temp_att = np.expand_dims(np.array(att_dict[c_name], dtype='float32'),
                                  axis=0)
        if i == 0:
            img = temp_img.transpose(0, 2, 1).reshape(
                temp_img.shape[0] * temp_img.shape[2], temp_img.shape[1])
            desc = [
                temp_desc.transpose(0, 2, 1).reshape(
                    temp_desc.shape[0] * temp_desc.shape[2],
                    temp_desc.shape[1])
            ]
            att = [np.repeat(temp_att, img.shape[0], axis=0)]
            proto2label = [i]
            x2label = [i] * (temp_img.shape[0] * temp_img.shape[2])
        else:
            img = np.append(img,
                            temp_img.transpose(0, 2, 1).reshape(
                                temp_img.shape[0] * temp_img.shape[2],
                                temp_img.shape[1]),
                            axis=0)
            desc.append(
                temp_desc.transpose(0, 2, 1).reshape(
                    temp_desc.shape[0] * temp_desc.shape[2],
                    temp_desc.shape[1]))
            att.append(
                np.repeat(temp_att,
                          temp_img.shape[0] * temp_img.shape[2],
                          axis=0))
            proto2label += [i]
            x2label += [i] * (temp_img.shape[0] * temp_img.shape[2])
        assert (len(x2label) == img.shape[0]
                ), 'Shape not match for class {}, img {} and desc {}'.format(
                    class_name, img.shape, len(desc))
    assert (len(proto2label) == 50), 'Mistake in finding proto2label'
    print('Total test images: {}'.format(img.shape[0]))
    return img, desc, att, x2label, proto2label
예제 #30
0
def torch_to_pytorch(model, t7_file, output):
    py_layers = []
    for layer in list(model.children()):
        py_layer_serial(layer, py_layers)

    t7_data = torchfile.load(t7_file)
    t7_layers = []
    for layer in t7_data:
        torch_layer_serial(layer, t7_layers)

    j = 0
    for i, py_layer in enumerate(py_layers):
        py_name = type(py_layer).__name__
        t7_layer = t7_layers[j]
        t7_name = t7_layer[0].split('.')[-1]
        if layer_map[t7_name] != py_name:
            raise RuntimeError('%s does not match %s' % (py_name, t7_name))

        if py_name == 'LSTM':
            n_layer = 2 if py_layer.bidirectional else 1
            n_layer *= py_layer.num_layers
            t7_layer = t7_layers[j:j + n_layer]
            j += n_layer
        else:
            j += 1

        load_params(py_layer, t7_layer)

    torch.save(model.state_dict(), output)
예제 #31
0
def torch_to_pytorch(model, t7_file, output):
    py_layers = []
    for layer in list(model.children()):
        py_layer_serial(layer, py_layers)

    t7_data = torchfile.load(t7_file)
    t7_layers = []
    for layer in t7_data:
        torch_layer_serial(layer, t7_layers)

    j = 0
    for i, py_layer in enumerate(py_layers):
        py_name = type(py_layer).__name__
        t7_layer = t7_layers[j]
        t7_name = t7_layer[0].split('.')[-1]
        if layer_map[t7_name] != py_name:
            raise RuntimeError('%s does not match %s' % (py_name, t7_name))

        if py_name == 'LSTM':
            n_layer = 2 if py_layer.bidirectional else 1
            n_layer *= py_layer.num_layers
            t7_layer = t7_layers[j:j + n_layer]
            j += n_layer
        else:
            j += 1

        load_params(py_layer, t7_layer)

    torch.save(model.state_dict(), output)
예제 #32
0
def load_t7checkpoint(path, models_keys=['model'], custom_layers=None):
    # model_keys iterable that contains for example the word 'model'
    # the model to load in pyfunt
    cp = torchfile.load(path)
    for model in models_keys:
        cp[model] = load_t7model(obj=cp[model], custom_layers=custom_layers)
    return cp
예제 #33
0
    def sample(self, datapath, stage=1):
        if stage == 1:
            netG, _ = self.load_network_stageI()
        else:
            netG, _ = self.load_network_stageII()
        netG.eval()

        # Load text embeddings generated from the encoder
        t_file = torchfile.load(datapath)
        captions_list = t_file.raw_txt
        embeddings = np.concatenate(t_file.fea_txt, axis=0)
        num_embeddings = len(captions_list)
        print('Successfully load sentences from: ', datapath)
        print('Total number of sentences:', num_embeddings)
        print('num_embeddings:', num_embeddings, embeddings.shape)
        # path to save generated samples
        save_dir = cfg.NET_G[:cfg.NET_G.find('.pth')]
        mkdir_p(save_dir)

        batch_size = np.minimum(num_embeddings, self.batch_size)
        nz = cfg.Z_DIM
        noise = Variable(torch.FloatTensor(batch_size, nz))
        if cfg.CUDA:
            noise = noise.cuda()
        count = 0
        while count < num_embeddings:
            if count > 3000:
                break
            iend = count + batch_size
            if iend > num_embeddings:
                iend = num_embeddings
                count = num_embeddings - batch_size
            embeddings_batch = embeddings[count:iend]
            # captions_batch = captions_list[count:iend]
            txt_embedding = Variable(torch.FloatTensor(embeddings_batch))
            if cfg.CUDA:
                txt_embedding = txt_embedding.cuda()

            #######################################################
            # (2) Generate fake images
            ######################################################
            noise.data.normal_(0, 1)
            inputs = (txt_embedding, noise)
            _, fake_imgs, mu, logvar = \
                nn.parallel.data_parallel(netG, inputs, self.gpus)
            for i in range(batch_size):
                save_name = '%s/%d.png' % (save_dir, count + i)
                im = fake_imgs[i].data.cpu().numpy()
                im = (im + 1.0) * 127.5
                im = im.astype(np.uint8)
                # print('im', im.shape)
                im = np.transpose(im, (1, 2, 0))
                # print('im', im.shape)
                im = Image.fromarray(im)
                im.save(save_name)
            count += batch_size
예제 #34
0
def main():
    saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=1)
    with tf.Session() as sess:
        tf.set_random_seed(1234)
        np.random.seed(123)

        writer = tf.train.SummaryWriter('tf-log/%d' % time.time(), sess.graph_def)

        restore_vars(saver, sess, checkpoint_dir)

        # load train data
        train_data = torchfile.load('train.t7')
        n = len(train_data['x'][0])
        for i in xrange(n_train_step):
            # start = i * batch % n
            # end = min(start + batch, n)
            # rgb_image = train_data['x'][0][start:end].transpose(0, 2, 3, 1)
            # grasp_class = train_data['y'][start:end] - 1

            ind = np.random.choice(n, batch, replace=False)
            rgb_image = train_data['x'][0][ind].transpose(0, 2, 3, 1)
            # d_image = train_data['x'][1][ind].transpose(0, 2, 3, 1)
            grasp_class = train_data['y'][ind] - 1

            if i % n_eval_interval == 0:
                val_feed = {
                    grasp_image_ph: rgb_image,
                    grasp_class_ph: grasp_class,
                    # depth_image_ph: d_image,
                    keep_prob_ph: 1.,
                }

                # print logit.eval(feed_dict=val_feed)
                print 'grasp loss', grasp_class_loss.eval(feed_dict=val_feed)
                # print 'depth loss', depth_loss.eval(feed_dict=val_feed)
                print 'accuracy', accuracy.eval(feed_dict=val_feed)
                writer.add_summary(sess.run(summary_op, feed_dict=val_feed), i)
                saver.save(sess, checkpoint_dir + '/model', global_step=i)

            # train
            train_feed = {
                grasp_image_ph: rgb_image,
                grasp_class_ph: grasp_class,
                # depth_image_ph: d_image,
                keep_prob_ph: 0.8,
            }
            train_op.run(feed_dict=train_feed)

        # save the model
        saver.save(sess, checkpoint_dir + '/model', global_step=i)
예제 #35
0
def get_st(file):
    info = torchfile.load(file)

    ids = info['ids']

    imids = []
    for i,id in enumerate(ids):
        imids.append(''.join(chr(i) for i in id))

    st_vecs = {}
    st_vecs['encs'] = info['encs']
    st_vecs['rlens'] = info['rlens']
    st_vecs['rbps'] = info['rbps']
    st_vecs['ids'] = imids

    print(np.shape(st_vecs['encs']),len(st_vecs['rlens']),len(st_vecs['rbps']),len(st_vecs['ids']))
    return st_vecs
예제 #36
0
def vgg_from_t7(t7_file, target_layer=None):
    '''Extract VGG layers from a Torch .t7 model into a Keras model
       e.g. vgg = vgg_from_t7('vgg_normalised.t7', target_layer='relu4_1')
       Adapted from https://github.com/jonrei/tf-AdaIN/blob/master/AdaIN.py
       Converted caffe->t7 from https://github.com/xunhuang1995/AdaIN-style
    '''
    t7 = torchfile.load(t7_file, force_8bytes_long=True)
    
    inp = Input(shape=(None, None, 3), name='vgg_input')

    x = inp
    
    for idx,module in enumerate(t7.modules):
        name = module.name.decode() if module.name is not None else None
        
        if idx == 0:
            name = 'preprocess'  # VGG 1st layer preprocesses with a 1x1 conv to multiply by 255 and subtract BGR mean as bias

        if module._typename == b'nn.SpatialReflectionPadding':
            x = Lambda(pad_reflect)(x)            
        elif module._typename == b'nn.SpatialConvolution':
            filters = module.nOutputPlane
            kernel_size = module.kH
            weight = module.weight.transpose([2,3,1,0])
            bias = module.bias
            x = Conv2D(filters, kernel_size, padding='valid', activation=None, name=name,
                        kernel_initializer=lambda shape: K.constant(weight, shape=shape),
                        bias_initializer=lambda shape: K.constant(bias, shape=shape),
                        trainable=False)(x)
        elif module._typename == b'nn.ReLU':
            x = Activation('relu', name=name)(x)
        elif module._typename == b'nn.SpatialMaxPooling':
            x = MaxPooling2D(padding='same', name=name)(x)
        # elif module._typename == b'nn.SpatialUpSamplingNearest': # Not needed for VGG
        #     x = Upsampling2D(name=name)(x)
        else:
            raise NotImplementedError(module._typename)

        if name == target_layer:
            # print("Reached target layer", target_layer)
            break
    
    # Hook it up
    model = Model(inputs=inp, outputs=x)

    return model
예제 #37
0
def main(argv):
    """
    main method of converting torch to paddle files.
    :param argv:
    :return:
    """
    cmdparser = argparse.ArgumentParser(
        "Convert torch parameter file to paddle model files.")
    cmdparser.add_argument(
        '-i', '--input', help='input filename of torch parameters')
    cmdparser.add_argument('-l', '--layers', help='list of layer names')
    cmdparser.add_argument(
        '-o', '--output', help='output file path of paddle model')

    args = cmdparser.parse_args(argv)
    if args.input and args.layers and args.output:
        params = torchfile.load(args.input)
        layers = [line.strip() for line in open(args.layers, 'r')]
        save_net_parameters(layers, params, args.output)
    else:
        print(
            'Usage: python torch2paddle.py -i torchfile.t7 -l layers.txt -o path/to/paddle_model'
        )
예제 #38
0
def load_t7model(path=None, obj=None, model=None, custom_layers=None):
    if not (path is None or obj is None):
        raise Exception('you must pass a path or a TorchObject')
    if path:
        o = torchfile.load(path)
    else:
        o = obj

    # import pdb; pdb.set_trace()
    if type(o) is torchfile.TorchObject:
        class_name = o._typename.split('.')[-1]
        tmodule = o._obj

        if not hasattr(pyfunt, class_name):
            print('class %s not found' % class_name)
            print(please_contribute)
            raise NotImplementedError

        Module = getattr(pyfunt, class_name)
        if not is_container(Module):
            raise Exception('model is a torchobj but not a container')
        model = Module()
        add_inout(model, tmodule)

        m = load_t7model(obj=tmodule, model=model, custom_layers=custom_layers)
        if not model:
            model = m
    else:

        for i, tmodule in enumerate(o.modules):
            if type(tmodule) is torchfile.TorchObject:
                class_name = tmodule._typename.split('.')[-1]
                tmodule_o = tmodule._obj

                if hasattr(pyfunt, class_name):
                    Module = getattr(pyfunt, class_name)
                elif custom_layers and hasattr(custom_layers, class_name):
                    Module = getattr(custom_layers, class_name)
                else:
                    print('class %s not found' % class_name)
                    print(please_contribute)
                    raise NotImplementedError

                if i == 0 and model is None:
                    if not is_container(Module):
                        model = pyfunt.Sequential()
                #     else:
                #         model = Module()
                #         model = load_t7model(obj=tmodule, model=model)
                # else:
                if is_container(Module):
                    model.add(
                        load_t7model(obj=tmodule, model=model, custom_layers=custom_layers))
                else:
                    if class_name in load_parser_init:
                        args = load_parser_init[class_name](tmodule_o)
                        module = Module(*args)
                    else:
                        try:
                            module = Module()
                        except:
                            print('parser for %s not found' % class_name)
                            print('%s cannot be initialized with no args' %
                                  class_name)
                            print(please_contribute)
                            raise NotImplementedError

                    #add_possible_values(module, tmodule)
                    add_inout(module, tmodule_o)
                    add_w(module, tmodule_o)
                    if class_name in load_parser_vals:
                        load_parser_vals[class_name](module, tmodule_o)
                    model.add(module)
            else:
                print('oops!')
                print(please_contribute)
                pdb.set_trace()
                raise NotImplementedError
    return model
def resnet152(pretrained=False, **kwargs):
    """Constructs a ResNet-152 model.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
    return model

import torchfile
from torch.utils.serialization import load_lua
import torch
netparams = torchfile.load('data/resnet152/netparams.t7')
#netparams2 = load_lua('data/resnet152/netparams.t7')
#import ipdb; ipdb.set_trace()
netoutputs = []
for i in range(1, 12):
    path = 'data/resnet152/output{}.t7'.format(i)
    out = load_lua(path)
    #print(out.size())
    if out.dim()==4:
        pass#out.transpose_(2, 3)
    netoutputs.append(out)

net = resnet152()
state_dict = net.state_dict()

import collections
예제 #40
0
import torchfile
import numpy as np

model = torchfile.load('kit.model')

weights = dict()

params = ['weight', 'bias', 'running_mean', 'running_var']

recursive = ['conv_nets']

def save_weight(name, node, level):
    weights[name] = dict()
    current_layer = weights[name]
    for p in params:
        if hasattr(node, p):
            func = getattr(node, p)
            arr = np.array(func)
            if arr.ndim >= 1:
                current_layer[p] = arr
                print ("    " * level + "{}.{} shape {} {}".format(name, p, current_layer[p].shape, current_layer[p].dtype))
    
    for p in recursive:
        if hasattr(node, p):
            func = getattr(node, p)
            if func != None:
                for idx, subnode in enumerate(func):
                    new_name = name + ":{}:{}".format(p, idx)
                    save_weight(new_name, subnode, level + 1)

예제 #41
0
import sys
import numpy as np
import torchfile

def convert_to_cov_format(in_arr, filepath, count):
	with open(filepath, 'w') as f:
		if in_arr.dtype != np.dtype('float64'):
			in_arr = in_arr.astype('float64')
			if count != 'all':
				in_arr = in_arr[:int(count)]
		n_points = np.array(in_arr.shape[0], dtype='int32')
		n_points.tofile(f)
		dims = np.array(in_arr.shape[1], dtype='int32')
		dims.tofile(f)
		in_arr.tofile(f)

if 3 < len(sys.argv) < 4:
	sys.argv.append('all')
elif len(sys.argv) < 3:
    print "Use: python convert.py <original file> <output filename> <all|number of features to convert>"
in_arr = torchfile.load(sys.argv[1])
convert_to_cov_format(in_arr, sys.argv[2], sys.argv[3])	
예제 #42
0
        idx = batchnorm(m, idx, params, param_names)
    elif 'Linear' in module_type:
        idx = linear(m, idx, params, param_names)
    return idx


if __name__ == '__main__':
    parser = ArgumentParser(description='Convert params from torch to python '
            'dict. \n resnet could have depth of 18, 34, 101, 152; \n'
            'wrn has depth 50; preact has depth 200; addbn has depth 50')
    parser.add_argument("infile", help="torch checkpoint file")
    parser.add_argument("model", choices=['resnet', 'wrn', 'preact', 'addbn'])
    parser.add_argument("depth", type=int, choices=[18, 34, 50, 101, 152, 200])
    args = parser.parse_args()

    net = model.create_net(args.model, args.depth)
    # model.init_params(net)
    m = torchfile.load(args.infile)
    params = {}
    # params = net.param_values()
    param_names = net.param_names()
    traverse(m, 0, params, param_names)
    miss = [name for name in param_names if name not in params]
    if len(miss) > 0:
        print('The following params are missing from torch file')
        print(miss)

    outfile = os.path.splitext(args.infile)[0] + '.pickle'
    with open(outfile, 'wb') as fd:
        pickle.dump(params, fd)
예제 #43
0
파일: vis.py 프로젝트: ljofre/im2recipe
      'pasta':[],'salad':[],'smoothie':[],'cookie':[]}

parser = get_parser()
params = parser.parse_args()

#random.seed(params.seed)

DATA_ROOT = params.test_feats
IMPATH = os.path.join(params.dataset,'images')
partition = params.partition

img_embs = partition + '_img_embs.t7'
instr_embs = partition + '_instr_embs.t7'
test_ids = partition + '_ids.t7'

im_vecs = np.array(torchfile.load(os.path.join(DATA_ROOT,img_embs)))
instr_vecs = np.array(torchfile.load(os.path.join(DATA_ROOT,instr_embs)))
names = np.array(torchfile.load(os.path.join(DATA_ROOT,test_ids)))

im_vecs = normalize(im_vecs)
instr_vecs = normalize(instr_vecs)

# load dataset
print('Loading dataset.')
dataset = utils.Layer.merge([utils.Layer.L1, utils.Layer.L2, utils.Layer.INGRS],params.dataset)
print "Done."
idx2ind = {} #sample id to position in dataset
for i in range(len(dataset)):
    idx2ind[dataset[i]['id']] = i

names_str = []
예제 #44
0
import numpy as np
import torchfile
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse

# set up
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
#np.random.seed(16)
np.random.seed(11)
mean = torchfile.load('../save/m.t7')
colors = {}
for j in xrange(len(mean)):
    colors[j] = np.random.rand(3,1)


plotting = True
count = 0
while plotting:

    xs = torchfile.load('../save/xs.t7')

    ax.cla()

    label = torchfile.load('../save/label.t7')
    mean = torchfile.load('../save/m.t7')

    # draw ellipse
    cov = torchfile.load('../save/cov.t7')
    def eigsorted(cov):
from __future__ import print_function

import torchfile
import numpy as np

EMBEDDING = torchfile.load("../lstm-char-cnn/param_init_1.t7")

KERNEL_1_W = torchfile.load("../lstm-char-cnn/param_init_2.t7")
KERNEL_1_B = torchfile.load("../lstm-char-cnn/param_init_3.t7")

LSTM_1_W = torchfile.load("../lstm-char-cnn/param_init_4.t7")
LSTM_B = torchfile.load("../lstm-char-cnn/param_init_5.t7")
LSTM_2_W = torchfile.load("../lstm-char-cnn/param_init_6.t7")

# following manipulations make LSTM_W usable with BasicLSTMCell - need to flip some blocks to convert from Karpathy's LSTM implementation
LSTM_W = np.concatenate([LSTM_1_W, LSTM_2_W], axis=1)
a, b, c, d = np.split(LSTM_W, 4, axis=0)
LSTM_W = np.concatenate([a, d, c, b], axis=0)
LSTM_W = LSTM_W.transpose()

a, b, c, d = np.split(LSTM_B, 4)
LSTM_B = np.concatenate([a, d, c, b], axis=0)

SOFTMAX_W = torchfile.load("../lstm-char-cnn/param_init_7.t7")
SOFTMAX_B = torchfile.load("../lstm-char-cnn/param_init_8.t7")


if __name__ == "__main__":

    print(EMBEDDING)
예제 #46
0
        predicted_classes=ranked_classes_all[i,:]
        x=(predicted_classes==expected_class)
        r=x*1
        ind=np.where(predicted_classes==expected_class)[0][0]
        sum_ind=sum_ind+ind
        #AP=AP+1./(1+ind)
        AP=AP+sum([sum(r[:z + 1]) / (z + 1.)  for z, y in enumerate(r) if y])
    
    mAP=AP/num_test_samples

    print('Mean Average Precision:',mAP)
    return scores,mAP



training_data_torch = torchfile.load('/home/hessam/Activity_recognition/hmdb_stip/data_feat_train_RGB_centerCrop_25f_sp1.t7')
training_data_torch2 = torchfile.load('/home/hessam/Activity_recognition/hmdb_stip/data_feat_train_RGB_centerCrop_25f_sp2.t7')



testing_data_torch=torchfile.load('/home/hessam/Activity_recognition/hmdb_stip/data_feat_test_RGB_centerCrop_25f_sp1.t7')
testing_data_torch2=torchfile.load('/home/hessam/Activity_recognition/hmdb_stip/data_feat_test_RGB_centerCrop_25f_sp2.t7')

vgg_matrix_train=training_data_torch['featMats']
vgg_matrix_train=np.swapaxes(vgg_matrix_train,1,2)



vgg_matrix_train2=training_data_torch2['featMats']
vgg_matrix_train2=np.swapaxes(vgg_matrix_train2,1,2)
vgg_matrix_train2=np.vstack((vgg_matrix_train,vgg_matrix_train2))
예제 #47
0
# loss
grasp_class_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logit, grasp_class_ph), name='grasp_class_loss')
# depth_loss = tf.reduce_mean(tf.square(depth_image_ph - depth_prediction), name='depth_loss')
# combined_loss = (1. - loss_lambda) * grasp_class_loss + loss_lambda * depth_loss
combined_loss = grasp_class_loss

# evaluation
batch = int(sys.argv[2])
correct_prediction = tf.equal(tf.argmax(grasp_class_prediction, 1), grasp_class_ph)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=1)
with tf.Session() as sess:
    restore_vars(saver, sess, checkpoint_dir)
    val_data = torchfile.load('val.t7')
    n = len(val_data['x'][0])
    print '%d samples' % n
    acc = 0.
    loss = 0.
    for i in xrange(n / batch + 1):
        start = batch * i
        if n == start:
            break
        end = min(start + batch, n)
        rgb_image = val_data['x'][0][start:end].transpose(0, 2, 3, 1)
        grasp_class = val_data['y'][start:end] - 1
        eval_feed = {
            grasp_image_ph: rgb_image,
            grasp_class_ph: grasp_class,
            # depth_image_ph: d_image,
예제 #48
0
from flask import Flask, request
import numpy as np
import json
import time
import cPickle
import torchfile
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA, FastICA
from sklearn.manifold import TSNE

print 'Loading feature vectors.'
features_filenames = {('Allegheny', 19) : 'level19/Allegheny_z19_features.t7',
                      ('New_York' , 19) : 'level19/New_York_z19_features.t7',
                      ('San_Fran' , 19) : 'level19/San_Fran_z19_features.t7',
		      ('Detroit'  , 19) : 'level19/Detroit_z19_features.t7'}
all_features = {region_level : torchfile.load(features_filenames[region_level]) \
                for region_level in features_filenames}


def get_lat_long(filename):
    return (float(filename.split("_")[1]), float(filename.split("_")[2]))

print 'Loading filenames.'
tile_filenames = {('Allegheny', 19) : 'level19/Allegheny_z19.txt',
                  ('New_York' , 19) : 'level19/New_York_z19.txt',
                  ('San_Fran' , 19) : 'level19/San_Fran_z19.txt',
		  ('Detroit'  , 19) : 'level19/Detroit_z19.txt'}

# dict of filename : (filename idx, (lat, long))
tile_dicts = {}
all_filenames = {}
예제 #49
0
파일: plot_recon.py 프로젝트: lckfork/GMVAE
import numpy as np
import torchfile
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse


#np.random.seed(16)
np.random.seed(11)
# set up
plt.ion()

fig = plt.figure()
ax2 = fig.add_subplot(111)


mean = torchfile.load('../save/m.t7')
colors = {}
for j in xrange(len(mean)):
    colors[j] = np.random.rand(3,1)


plotting = True
count = 0
while plotting:

    x = torchfile.load('../save/xs.t7')
    ax2.cla()
    y = torchfile.load('../datasets/spiral.t7')
    ax2.scatter(y[:,0], y[:, 1], marker='.' , s = 5 )
    y_recon = torchfile.load('../save/recon.t7')
    label   = torchfile.load('../save/label.t7')
예제 #50
0
in_height = args.height

lmdb_in_env = lmdb.open(args.input, readonly=True)
lmdb_in_txn = lmdb_in_env.begin()
lmdb_in_cursor = lmdb_in_txn.cursor()

lmdb_out_env = lmdb.open(args.output, map_size=4*1024*1024*1024)
with lmdb_out_env.begin(write=True) as out_txn:

    count = 0
    for key, value in lmdb_in_cursor:
        # print(value)

        with open("/tmp/nasse.t7", "wb") as f:
            f.write(value)
        item = torchfile.load("/tmp/nasse.t7")

        label=item[b"Name"].decode().split('_')[0]
        # print(label)
        
        jpeg_bytes = bytes(item[b"Data"])
        if False:
            img = Image.open(BytesIO(jpeg_data))
            # print(img.size)

            start_x = math.ceil((img.size[0] - in_width + 1) / 2)
            start_y = math.ceil((img.size[1] - in_height + 1) / 2)

            cropped = img.crop((start_x, start_y, start_x + in_width, start_y + in_height))

            jpeg_fo = BytesIO()
예제 #51
0
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor

import numpy as np
import ujson
import torchfile
from sklearn.neighbors import NearestNeighbors

PORT = 4545
WORKERS = 12 * 2 + 1

# A dict mapping from query params to sample-file filenames
samples_filenames = {frozenset(('Detroit',)) : \
                     '/home/studio/Documents/level19/Detroit_z19_features.t7'}

all_samples = {key : torchfile.load(samples_filenames[key]) \
                for key in samples_filenames}

# A dict mapping from query params to sample label-files
sample_labels = {frozenset(('Detroit',)) : \
                 '/home/studio/Documents/level19/Detroit_z19.txt' }

label_to_idx = {}
all_labels = {}

# Load labels into labels dict and also reverse label to idx mapping
for key, label_file in sample_labels.viewitems():
    with open(label_file, 'r') as f:
        
        labels = [line.rstrip() for line in f.readlines()] #filenames
        print('First 3 labels of ', key, labels[:3], '...', '\n')
예제 #52
0
파일: demo.py 프로젝트: Soledad89/StackGAN
        superimage = drawCaption(np.uint8(superimage), captions_batch[j])
        scipy.misc.imsave(fullpath, superimage)


if __name__ == "__main__":
    args = parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.gpu_id != -1:
        cfg.GPU_ID = args.gpu_id
    if args.caption_path is not None:
        cfg.TEST.CAPTION_PATH = args.caption_path

    # Load text embeddings generated from the encoder
    cap_path = cfg.TEST.CAPTION_PATH
    t_file = torchfile.load(cap_path)
    captions_list = t_file.raw_txt
    embeddings = np.concatenate(t_file.fea_txt, axis=0)
    num_embeddings = len(captions_list)
    print('Successfully load sentences from: ', cap_path)
    print('Total number of sentences:', num_embeddings)
    print('num_embeddings:', num_embeddings, embeddings.shape)
    # path to save generated samples
    save_dir = cap_path[:cap_path.find('.t7')]
    if num_embeddings > 0:
        batch_size = np.minimum(num_embeddings, cfg.TEST.BATCH_SIZE)

        # Build StackGAN and load the model
        config = tf.ConfigProto(allow_soft_placement=True)
        with tf.Session(config=config) as sess:
            with tf.device("/gpu:%d" % cfg.GPU_ID):