예제 #1
0
파일: train.py 프로젝트: lshpku/AutoQA
def train(from_version=1, epoches=50):
    global embd, disc, embd_optim, disc_optim
    embd = Embedding(len(dictionary), VEC_SIZE).load(
        '.', from_version, device)
    disc = Discriminator(VEC_SIZE).load('.', from_version, device)
    embd_optim = optim.SGD(embd.parameters(), lr=lr)
    disc_optim = optim.SGD(disc.parameters(), lr=lr)
    for i in range(epoches):
        train_epoch()
        embd.save('.', i+from_version)
        disc.save('.', i+from_version)
예제 #2
0
    def __init__(self, hparams):
        super(NeRFSystem, self).__init__()
        self.hparams = hparams

        self.loss = MSELoss()  # loss_dict[hparams.loss_type]()

        self.embedding_xyz = Embedding(3, 10)  # 10 is the default number
        self.embedding_dir = Embedding(3, 4)  # 4 is the default number
        self.embeddings = [self.embedding_xyz, self.embedding_dir]

        self.nerf_coarse = NeRF()
        self.models = [self.nerf_coarse]
        if hparams.N_importance > 0:
            self.nerf_fine = NeRF()
            self.models += [self.nerf_fine]
예제 #3
0
 def __init__(
     self, 
     det_model_dir, 
     emb_model_dir, 
     use_gpu=False, 
     run_mode='fluid', 
     threshold=0.5,
     max_cosine_distance=0.2, 
     nn_budget=100, 
     max_iou_distance=0.9, 
     max_age=70, 
     n_init=3
 ):
     self.detector = Detector(det_model_dir, use_gpu, run_mode)
     self.emb = Embedding(emb_model_dir, use_gpu)
     self.threshold = threshold
     metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
     self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
예제 #4
0
class DeepSort(object):
    def __init__(
        self, 
        det_model_dir, 
        emb_model_dir, 
        use_gpu=False, 
        run_mode='fluid', 
        threshold=0.5,
        max_cosine_distance=0.2, 
        nn_budget=100, 
        max_iou_distance=0.9, 
        max_age=70, 
        n_init=3
    ):
        self.detector = Detector(det_model_dir, use_gpu, run_mode)
        self.emb = Embedding(emb_model_dir, use_gpu)
        self.threshold = threshold
        metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
        self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)

    def update(self, ori_img):
        self.height, self.width = ori_img.shape[:2]
        results = self.detector.predict(ori_img, self.threshold)
        if results is None:
            return None
        else:
            tlwh, xyxy, confidences = results
            if not confidences.tolist():
                return None
        # generate detections
        features = self.get_features(xyxy, ori_img)
        detections = [Detection(tlwh[i], conf, features[i]) for i,conf in enumerate(confidences)]

        # update tracker
        self.tracker.predict()
        self.tracker.update(detections)

        # output bbox identities
        outputs = []
        for track in self.tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            box = track.to_tlbr()
            x1,y1,x2,y2 = box
            track_id = track.track_id
            outputs.append(np.array([x1,y1,x2,y2,track_id], dtype=np.int))
        if len(outputs) > 0:
            outputs = np.stack(outputs,axis=0)
        return outputs

    def get_features(self, xyxy, ori_img):
        crops = []
        for bbox in xyxy:
            crop = ori_img[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
            crops.append(crop)
        features = self.emb.predict(crops)
        return features
예제 #5
0
    def __init__(self,
                 det_model_dir,
                 emb_model_dir,
                 use_gpu=False,
                 run_mode='fluid',
                 use_dynamic_shape=False,
                 trt_min_shape=1,
                 trt_max_shape=1280,
                 trt_opt_shape=640,
                 trt_calib_mode=False,
                 cpu_threads=1,
                 enable_mkldnn=False,
                 threshold=0.5,
                 max_cosine_distance=0.2,
                 nn_budget=100,
                 max_iou_distance=0.9,
                 max_age=70,
                 n_init=3):
        self.threshold = threshold

        self.detector = Detector(model_dir=det_model_dir,
                                 use_gpu=use_gpu,
                                 run_mode=run_mode,
                                 use_dynamic_shape=use_dynamic_shape,
                                 trt_min_shape=trt_min_shape,
                                 trt_max_shape=trt_max_shape,
                                 trt_opt_shape=trt_opt_shape,
                                 trt_calib_mode=trt_calib_mode,
                                 cpu_threads=cpu_threads,
                                 enable_mkldnn=enable_mkldnn)

        self.emb = Embedding(emb_model_dir, use_gpu, enable_mkldnn,
                             cpu_threads)

        metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance,
                                               nn_budget)
        self.tracker = Tracker(metric,
                               max_iou_distance=max_iou_distance,
                               max_age=max_age,
                               n_init=n_init)
예제 #6
0
    def __init__(self, hparams):
        super(NeRFSystem, self).__init__()
        self.hparams = hparams

        kwargs = {'root_dir': self.hparams.root_dir,
                  'img_wh': tuple(self.hparams.img_wh)}
        self.dataset = dataset_dict[self.hparams.dataset_name]
        train_dataset = self.dataset(split='train', **kwargs)


        self.loss = MSELoss() # loss_dict[hparams.loss_type]()

        self.grid_size = (10,10,10)

        self.embedding_xyz = Embedding(3, 10) # 10 is the default number
        self.embedding_dir = Embedding(3, 4) # 4 is the default number
        # bounding_box, grid_size, embedding_dim
        self.grid_embedding = GridEmbedding(self.grid_size, 32, train_dataset.get_bounding_box()) # grid_size, embedding_dim
        self.embeddings = [self.embedding_xyz, self.embedding_dir, self.grid_embedding]

        self.nerf_coarse = NeRF()
        self.models = [self.nerf_coarse]
print('Source vocabulary size: ' + str(corpus.sourceVoc.size()))
print('Target vocabulary size: ' + str(corpus.targetVoc.size()))
print()
print('# of training samples: ' + str(len(corpus.trainData)))
print('# of develop samples:  ' + str(len(corpus.devData)))
print('Random seed: ', str(seed))

useSmallSoftmax = (K > 0 and K <= corpus.targetVoc.size())

if useSmallSoftmax:
    print('K = ', K)
else:
    print('Full softmax')
print()

embedding = Embedding(sourceEmbedDim, targetEmbedDim, corpus.sourceVoc.size(),
                      corpus.targetVoc.size())
encdec = EncDec(sourceEmbedDim,
                targetEmbedDim,
                hiddenDim,
                corpus.targetVoc.size(),
                useSmallSoftmax=useSmallSoftmax,
                dropoutRate=dropoutRate,
                numLayers=numLayers)

if useSmallSoftmax:
    vocGen = VocGenerator(vocGenHiddenDim, corpus.targetVoc.size(),
                          corpus.sourceVoc.size())
    vocGen.load_state_dict(torch.load(vocGenFile))
    vocGen.cuda()
    vocGen.eval()
예제 #8
0
    label_vocab = saved_state['label_vocab']
    test_set = Dataset(test_file, labels=labels)
    test_set.token_vocab = token_vocab
    test_set.label_vocab = label_vocab
    test_set.numberize_dataset()


# ----------------------------------------------------------------------
# Construct the model
models = {}
optimizers = {}
if mode == 'train':
    word_embedding = Embedding(len(token_vocab),
                               embedding_dim,
                               padding_idx=0,
                               sparse=True,
                               pretrain=embedding_file,
                               vocab=token_vocab,
                               trainable=True)
    for target_label in labels:
        lstm = LSTM(embedding_dim, hidden_size, batch_first=True, forget_bias=1.0)
        linears = [Linear(i, o) for i, o
                   in zip([hidden_size] + linear_sizes, linear_sizes + [2])]
        model = MoralClassifier(word_embedding, lstm, linears)
        if use_gpu:
            model.cuda()
        optimizer = torch.optim.SGD(
            filter(lambda p: p.requires_grad, model.parameters()),
            lr=learning_rate, momentum=.9)
        models[target_label] = model
        optimizers[target_label] = optimizer
예제 #9
0
        (dev_set, token_vocab, label_vocab, char_vocab),
        (test_set, token_vocab, label_vocab, char_vocab),
    ],
    token_ignore_case=word_ignore_case,
    label_ignore_case=False,
    char_ignore_case=False
)

# Model components
logger.info('Building the model')
word_embed = Embedding(Config({
    'num_embeddings': len(token_vocab),
    'embedding_dim': args.word_embed_dim,
    'padding': C.EMBED_START_IDX,
    'padding_idx': 0,
    'sparse': True,
    'trainable': True,
    'file': embed_file,
    'stats': args.embed_skip_first,
    'vocab': token_vocab,
    'ignore_case': word_ignore_case
}))
char_cnn = CharCNN(Config({
    'vocab_size': len(char_vocab),
    'padding': C.CHAR_EMBED_START_IDX,
    'dimension': args.char_embed_dim,
    'filters': charcnn_filters
}))
char_highway = Highway(Config({
    'num_layers': 2,
    'size': char_cnn.output_size,
    'activation': 'selu'
예제 #10
0
def train_model(args):
    # Set train/dev source/target files.
    sourceDevFile = args.sourceDevFile
    sourceOrigDevFile = args.sourceDevFile
    targetDevFile = args.targetDevFile
    sourceTrainFile = args.sourceTrainFile
    sourceOrigTrainFile = args.sourceTrainFile
    targetTrainFile = args.targetTrainFile

    minFreqSource = args.minFreqSource
    minFreqTarget = args.minFreqTarget
    hiddenDim = args.hiddenDim
    decay = args.decay
    gradClip = args.gradClip
    dropoutRate = args.dropoutRate
    numLayers = args.numLayers

    maxLen = args.maxLen
    maxEpoch = args.maxEpoch
    decayStart = args.decayStart

    sourceEmbedDim = args.hiddenDim
    targetEmbedDim = args.hiddenDim

    batchSize = args.batchSize
    learningRate = args.learningRate
    momentumRate = args.momentumRate

    gpuId = args.gpuId
    seed = args.seed
    device = torch.device('cuda:' + str(gpuId[0]))
    cpu = torch.device('cpu')

    weightDecay = args.weightDecay

    beamSize = args.beamSize

    torch.set_num_threads(1)

    torch.manual_seed(seed)
    random.seed(seed)
    torch.cuda.manual_seed(seed)

    corpus = Corpus(sourceTrainFile, sourceOrigTrainFile, targetTrainFile,
                    sourceDevFile, sourceOrigDevFile, targetDevFile,
                    minFreqSource, minFreqTarget, maxLen)

    print('Source vocabulary size: ' + str(corpus.sourceVoc.size()))
    print('Target vocabulary size: ' + str(corpus.targetVoc.size()))
    print()
    print('# of training samples: ' + str(len(corpus.trainData)))
    print('# of develop samples:  ' + str(len(corpus.devData)))
    print('SEED: ', str(seed))
    print()

    embedding = Embedding(sourceEmbedDim, targetEmbedDim,
                          corpus.sourceVoc.size(), corpus.targetVoc.size())
    encdec = EncDec(sourceEmbedDim,
                    targetEmbedDim,
                    hiddenDim,
                    corpus.targetVoc.size(),
                    dropoutRate=dropoutRate,
                    numLayers=numLayers)

    encdec.wordPredictor.softmaxLayer.weight = embedding.targetEmbedding.weight
    encdec.wordPredictor = nn.DataParallel(encdec.wordPredictor, gpuId)

    embedding.to(device)
    encdec.to(device)

    batchListTrain = utils.buildBatchList(len(corpus.trainData), batchSize)
    batchListDev = utils.buildBatchList(len(corpus.devData), batchSize)

    withoutWeightDecay = []
    withWeightDecay = []
    for name, param in list(embedding.named_parameters()) + list(
            encdec.named_parameters()):
        if 'bias' in name or 'Embedding' in name:
            withoutWeightDecay += [param]
        elif 'softmax' not in name:
            withWeightDecay += [param]
    optParams = [{
        'params': withWeightDecay,
        'weight_decay': weightDecay
    }, {
        'params': withoutWeightDecay,
        'weight_decay': 0.0
    }]
    totalParamsNMT = withoutWeightDecay + withWeightDecay

    opt = optim.SGD(optParams, momentum=momentumRate, lr=learningRate)

    bestDevGleu = -1.0
    prevDevGleu = -1.0

    for epoch in range(maxEpoch):
        batchProcessed = 0
        totalLoss = 0.0
        totalTrainTokenCount = 0.0

        print('--- Epoch ' + str(epoch + 1))
        startTime = time.time()

        random.shuffle(corpus.trainData)

        embedding.train()
        encdec.train()

        for batch in batchListTrain:
            print('\r', end='')
            print(batchProcessed + 1, '/', len(batchListTrain), end='')

            batchSize = batch[1] - batch[0] + 1

            opt.zero_grad()

            batchInputSource, lengthsSource, batchInputTarget, batchTarget, lengthsTarget, tokenCount, batchData = corpus.processBatchInfoNMT(
                batch, train=True, device=device)

            inputSource = embedding.getBatchedSourceEmbedding(batchInputSource)
            sourceH, (hn, cn) = encdec.encode(inputSource, lengthsSource)

            batchInputTarget = batchInputTarget.to(device)
            batchTarget = batchTarget.to(device)
            inputTarget = embedding.getBatchedTargetEmbedding(batchInputTarget)

            loss = encdec(inputTarget, lengthsTarget, lengthsSource, (hn, cn),
                          sourceH, batchTarget)
            loss = loss.sum()

            totalLoss += loss.item()
            totalTrainTokenCount += tokenCount

            loss /= batchSize
            loss.backward()
            nn.utils.clip_grad_norm_(totalParamsNMT, gradClip)
            opt.step()

            batchProcessed += 1
            if batchProcessed == len(
                    batchListTrain) // 2 or batchProcessed == len(
                        batchListTrain):
                devPerp = 0.0
                devGleu = 0.0
                totalTokenCount = 0.0

                embedding.eval()
                encdec.eval()
                torch.set_grad_enabled(False)

                print()
                print('Training time: ' + str(time.time() - startTime) +
                      ' sec')
                print('Train perp: ' +
                      str(math.exp(totalLoss / totalTrainTokenCount)))

                f_trans = open('./trans.txt', 'w')
                f_gold = open('./gold.txt', 'w')

                for batch in batchListDev:
                    batchSize = batch[1] - batch[0] + 1
                    batchInputSource, lengthsSource, batchInputTarget, batchTarget, lengthsTarget, tokenCount, batchData = corpus.processBatchInfoNMT(
                        batch, train=False, device=device)

                    inputSource = embedding.getBatchedSourceEmbedding(
                        batchInputSource)
                    sourceH, (hn, cn) = encdec.encode(inputSource,
                                                      lengthsSource)

                    indicesGreedy, lengthsGreedy, attentionIndices = encdec.greedyTrans(
                        corpus.targetVoc.bosIndex,
                        corpus.targetVoc.eosIndex,
                        lengthsSource,
                        embedding.targetEmbedding,
                        sourceH, (hn, cn),
                        device,
                        maxGenLen=maxLen)
                    indicesGreedy = indicesGreedy.to(cpu)

                    for i in range(batchSize):
                        for k in range(lengthsGreedy[i] - 1):
                            index = indicesGreedy[i, k].item()
                            if index == corpus.targetVoc.unkIndex:
                                index = attentionIndices[i, k].item()
                                f_trans.write(
                                    batchData[i].sourceOrigStr[index] + ' ')
                            else:
                                f_trans.write(
                                    corpus.targetVoc.tokenList[index].str +
                                    ' ')
                        f_trans.write('\n')

                        for k in range(lengthsTarget[i] - 1):
                            index = batchInputTarget[i, k + 1].item()
                            if index == corpus.targetVoc.unkIndex:
                                f_gold.write(batchData[i].targetUnkMap[k] +
                                             ' ')
                            else:
                                f_gold.write(
                                    corpus.targetVoc.tokenList[index].str +
                                    ' ')
                        f_gold.write('\n')

                    batchInputTarget = batchInputTarget.to(device)
                    batchTarget = batchTarget.to(device)
                    inputTarget = embedding.getBatchedTargetEmbedding(
                        batchInputTarget)

                    loss = encdec(inputTarget, lengthsTarget, lengthsSource,
                                  (hn, cn), sourceH, batchTarget)
                    loss = loss.sum()
                    devPerp += loss.item()

                    totalTokenCount += tokenCount

                f_trans.close()
                f_gold.close()
                os.system("./bleu.sh 2> DUMMY")
                f_trans = open('./bleu.txt', 'r')
                for line in f_trans:
                    devGleu = float(line.split()[2][0:-1])
                    break
                f_trans.close()

                devPerp = math.exp(devPerp / totalTokenCount)
                print("Dev perp:", devPerp)
                print("Dev BLEU:", devGleu)

                embedding.train()
                encdec.train()
                torch.set_grad_enabled(True)

                if epoch > decayStart and devGleu < prevDevGleu:
                    print('lr -> ' + str(learningRate * decay))
                    learningRate *= decay

                    for paramGroup in opt.param_groups:
                        paramGroup['lr'] = learningRate

                elif devGleu >= bestDevGleu:
                    bestDevGleu = devGleu

                    stateDict = embedding.state_dict()
                    for elem in stateDict:
                        stateDict[elem] = stateDict[elem].to(cpu)
                    torch.save(stateDict, './params/embedding.bin')

                    stateDict = encdec.state_dict()
                    for elem in stateDict:
                        stateDict[elem] = stateDict[elem].to(cpu)
                    torch.save(stateDict, './params/encdec.bin')

                prevDevGleu = devGleu
예제 #11
0
torch.cuda.manual_seed(seed)

corpus = Corpus(sourceVocFile, targetVocFile, sourceTrainFile,
                sourceOrigTrainFile, targetTrainFile, sourceDevFile,
                sourceOrigDevFile, targetDevFile, minFreqSource, minFreqTarget,
                maxLen)

print('Source vocabulary size: ' + str(corpus.sourceVoc.size()))
print('Target vocabulary size: ' + str(corpus.targetVoc.size()))
print()
print('# of training samples: ' + str(len(corpus.trainData)))
print('# of develop samples:  ' + str(len(corpus.devData)))
print('SEED: ', str(seed))
print()

embedding = Embedding(sourceEmbedDim, targetEmbedDim, corpus.sourceVoc.size(),
                      corpus.targetVoc.size())
encdec = EncDec(sourceEmbedDim,
                targetEmbedDim,
                hiddenDim,
                corpus.targetVoc.size(),
                dropoutRate=dropoutRate,
                numLayers=numLayers)

encdec.wordPredictor.softmaxLayer.weight = embedding.targetEmbedding.weight
encdec.wordPredictor = nn.DataParallel(encdec.wordPredictor, gpuId)

if train:
    embedding.to(device)
    encdec.to(device)

batchListTrain = utils.buildBatchList(len(corpus.trainData), batchSize)
예제 #12
0
def test_embedding():
    init_embedding = np.asarray([[0.1 * i] * 10 for i in range(5)])
    embedding = Embedding(5, 10, init_embedding)
    print(embedding)
    input_word = Variable(torch.LongTensor([0,1,1,2,4]))
    print(embedding(input_word))
예제 #13
0
    args = get_opts()
    w, h = args.img_wh
    w, h = int(w * 0.5), int(h * 0.5)

    kwargs = {
        'root_dir': args.root_dir,
        'split': args.split,
        'img_wh': tuple(args.img_wh)
    }
    if args.dataset_name == 'llff':
        kwargs['spheric_poses'] = args.spheric_poses
    dataset = dataset_dict[args.dataset_name](**kwargs)
    # dataset = shapenet_dl.ShapeNet(**kwargs)  #
    # dataset = zbrush_dl.Zbrush(**kwargs)  # dataset_dict[args.dataset_name](**kwargs)

    embedding_xyz = Embedding(3, 10)
    embedding_dir = Embedding(3, 4)
    nerf_coarse = NeRF()
    nerf_fine = NeRF()
    load_ckpt(nerf_coarse, args.ckpt_path, model_name='nerf_coarse')
    load_ckpt(nerf_fine, args.ckpt_path, model_name='nerf_fine')
    nerf_coarse.cuda().eval()
    nerf_fine.cuda().eval()

    models = [nerf_coarse, nerf_fine]
    embeddings = [embedding_xyz, embedding_dir]

    imgs = []
    psnrs = []
    dir_name = f'results/{args.dataset_name}/{args.scene_name}'
    os.makedirs(dir_name, exist_ok=True)
예제 #14
0
wordParamsFile = 'word_params-' + str(gpuId)  # for pre-trained embeddings
charParamsFile = 'char_params-' + str(gpuId)  # for pre-trained embeddings

torch.manual_seed(seed)
random.seed(seed)

corpus = Corpus(trainFile, devFile, wordDropoutCoeff)

print('Vocabulary size: ' + str(corpus.voc.size()))
print('# of classes:    ' + str(corpus.classVoc.size()))
print()
print('# of training samples: ' + str(len(corpus.trainData)))
print('# of dev samples:      ' + str(len(corpus.devData)))
print()

embedding = Embedding(corpus.voc.size(), corpus.charVoc.size(), embedDim,
                      charDim)
tagger = Tagger(embedDim + charDim, hiddenDim, corpus.classVoc.size(),
                inputDropoutRate, outputDropoutRate)

if not test and not args.random:
    if os.path.exists(wordParamsFile):
        embedding.wordEmbedding.load_state_dict(torch.load(wordParamsFile))
    else:
        utils.loadEmbeddings(embedding.wordEmbedding, corpus.voc,
                             wordEmbeddingFile)
        torch.save(embedding.wordEmbedding.state_dict(), wordParamsFile)

    if os.path.exists(charParamsFile):
        embedding.charEmbedding.load_state_dict(torch.load(charParamsFile))
    else:
        utils.loadEmbeddings(embedding.charEmbedding, corpus.charVoc,
예제 #15
0
token_vocab = state['vocab']['token']
label_vocab = state['vocab']['label']
char_vocab = state['vocab']['char']
train_args = state['args']
charcnn_filters = [[int(f.split(',')[0]),
                    int(f.split(',')[1])]
                   for f in train_args['charcnn_filters'].split(';')]

# Resume model
logger.info('Resuming the model')
word_embed = Embedding(
    Config({
        'num_embeddings': len(token_vocab),
        'embedding_dim': train_args['word_embed_dim'],
        'padding': C.EMBED_START_IDX,
        'padding_idx': 0,
        'sparse': True,
        'trainable': True,
        'stats': train_args['embed_skip_first'],
        'vocab': token_vocab,
        'ignore_case': train_args['word_ignore_case']
    }))
char_cnn = CharCNN(
    Config({
        'vocab_size': len(char_vocab),
        'padding': C.CHAR_EMBED_START_IDX,
        'dimension': train_args['char_embed_dim'],
        'filters': charcnn_filters
    }))
char_highway = Highway(
    Config({
        'num_layers': 2,