def Validate(opt, net, loss_func, validate_loader, tb, epoch):
    with torch.no_grad():
        net.eval()

        n = opt.validate_count
        label = validate_loader.dataset.label
        a_chain = toTensor(
            validate_loader.dataset.dataset[:, :, :opt.length, :])
        b_chain = toTensor(validate_loader.dataset.dataset[:, :,
                                                           opt.length:, :])

        output = net(a_chain, b_chain)
        loss = loss_func(output, label).data.numpy()
        output = output.view(-1).data.numpy()
        label = label.data.numpy()

        pred = (output > 0.5)
        acc = (pred == label).mean()
        auc = get_AUC(output, label, n)
        roc = get_ROC(output, label, n)

        print("Validate: loss: %lf, acc: %lf, auc: %lf" % (loss, acc, auc))
        tb.add_scalar('validate/validate_loss', loss, epoch)
        tb.add_scalar('validate/validate_acc', acc, epoch)
        tb.add_scalar('validate/validate_prec',
                      abs((output - 0.5) * 2).mean(), epoch)
        tb.add_scalar('validate/validate/auc', auc, epoch)
        tb.add_figure('validate/ROC curve', roc, epoch)

        net.train()
Exemple #2
0
def match(opt, net, data):
    n = data.shape[0]
    a = toTensor(data[:, :, :opt.length])
    b = toTensor(data[:, :, opt.length:])
    feat_a = net.feature_extractor_x(a)
    feat_b = net.feature_extractor_y(b)
    embed()

    target = [i for i in range(n)]
    weight = [[net.match(feat_a[i], feat_b[j]) for i in range(n)]
              for j in range(n)]
    # TODO: weight

    # TODO: the method to match a & b
    # 1. greedy
    #   1.1 for each row
    pred = torch.max(weight, 1)[1].data.cpu().numpy().squeeze()
    acc1 = sum(pred == target) / n
    #   1.2 for each column
    pred = torch.max(weight.transpose(0, 1), 1)[1].data.cpu().numpy().squeeze()
    acc2 = sum(pred == target) / n

    # 2. km

    return (acc1, acc2)
def generateDataset(trainExamples, vocab, use_shuffle = True, skip_disagreement = False):
    
    wordToIndex, indexToWord = utils.getIndexTranslations(vocab)

    conversations = []

    for i, example in enumerate(trainExamples):
        #print("example = ", exampleToString(example))
        agent_input = example["input"]
        dialogue = example["dialogue"]
        output = example["output"]
        partner_input = example["partner input"]

        if skip_disagreement:
            output_vector = output[0] + output[1]
            if (-1 in output_vector):
                ##there was disagreement so don't bother training on this example
                continue

        goals_tensor = utils.toTensor(agent_input, "goals")

        word_indexes = []

        running_context = [wordToIndex["<START>"]]
        contextResponsePairs = []

        for sentence_i, sentence in enumerate(dialogue):
    
            speaker = sentence[0]
            utterance = [speaker]
            utterance += sentence[1].split(" ")
            utterance += ["<eos>"]

            utterance_idx = [wordToIndex[word] if word in wordToIndex else wordToIndex["<UNK>"] for word in utterance]

            #Clip the speaker token
            target_tensor = torch.tensor(utterance_idx[1:], dtype=torch.long)

            running_context_tensor = torch.tensor(running_context, dtype=torch.long)
            contextResponsePairs.append((running_context_tensor, target_tensor))

        
            running_context += utterance_idx


        conversations.append((goals_tensor, contextResponsePairs, example))
        

    if (use_shuffle):
        random.shuffle(conversations)

    return conversations
Exemple #4
0
def rnnBotConsole(trainExamples, vocab):

    #import pdb; pdb.set_trace()

    vocab.append("<START>")
    vocab.append("<sos>")
    vocab.append("<eos>")
    vocab.append("YOU")
    vocab.append("THEM")
    if "<UNK>" not in vocab:
        vocab.append("<UNK>")
    vocab = sorted(vocab)

    wordToIndex, indexToWord = utils.getIndexTranslations(vocab)


    #encoder = torch.load("savedModels/encoderTrained.pth")
    #decoder = torch.load("savedModels/decoderTrained.pth")
    #goals_encoder = torch.load("savedModels/goals_encoderTrained.pth")
    encoder = torch.load("savedModels/encoder.pth")
    decoder = torch.load("savedModels/decoder.pth")
    goals_encoder = torch.load("savedModels/goals_encoder.pth")
    output_classifier = torch.load("savedModels/outputClassifier.pth")



    while(True):
        index = random.choice(range(len(trainExamples)))
        ex = trainExamples[index]

        bot_input = ex["input"]
        opponent_input = ex["partner input"]
        print("\n\nOpponent input = " + inputToString(opponent_input))
        #print("Bot input = ", inputToString(bot_input))

        total_counts = [count for count,value in bot_input]
        bot_values = [value for count,value in bot_input]
        opponent_values = [value for count,value in opponent_input]

        '''bot_value_sort = np.argsort((np.multiply(bot_values, total_counts)))

        mostValuableIndex = bot_value_sort[len(items) -1]
        middleIndex = bot_value_sort[1]
        leastValuableIndex = bot_value_sort[0]

        botItems, opponentItems = None, None

        attemptedTopItem = False
        num_attempts = 0'''

        bot_goals_tensor = utils.toTensor(bot_input, "goals")
        encoded_goals = goals_encoder(bot_goals_tensor)

        running_context = ["<START>"]


        botStarting = random.choice([True, False])
        if(botStarting):

            #encode
            encoder_input = torch.tensor([wordToIndex["<START>"]], dtype=torch.long)[0]
            hidden = encoder.initHidden()
            hidden = encoder(encoder_input, hidden, encoded_goals)

            #decode
            selected_tokens = []

            encoded_context = hidden
            hidden = decoder.initHidden()
            decoder_input = torch.tensor([wordToIndex["<sos>"]], dtype=torch.long)[0]
            while(True):
                output, hidden = decoder(decoder_input, encoded_context, hidden, encoded_goals)

                #categorical = torch.distributions.Categorical(output)
                #selected_token = categorical.sample()
                selected_token_probability, selected_token = torch.max(output, 1)

                selected_token_as_word = indexToWord[int(selected_token[0].numpy())]

                if (selected_token_as_word == "<eos>"):
                    break

                #if(selected_token_as_word != "THEM" and selected_token_as_word != "YOU"):
                selected_tokens.append(selected_token_as_word)

                decoder_input = selected_token[0]

            print("Bot statement: ", " ".join(selected_tokens))
            running_context += ["YOU"] + selected_tokens + ["<eos>"]
            #print("running context = ", running_context)


        while(True):
            input_str = input('Enter a response: ')
            if(input_str == "<selection>"):
                running_context += ["THEM", "<selection>"]
                break

            tokenized = tokenize(input_str)

            running_context += ["THEM"] + tokenized + ["<eos>"]
            #print("running_context = ", running_context)

            running_context_idx = [wordToIndex[word] if word in wordToIndex else wordToIndex["<UNK>"] for word in running_context]
            running_context_tensor = torch.tensor(running_context_idx, dtype=torch.long)

            context = running_context_tensor
            hidden = encoder.initHidden()
            num_context_words = context.size(0)
            for i in range(num_context_words):
                hidden = encoder(context[i], hidden, encoded_goals)

            #decode
            maxResponseLen = 50
            selected_tokens = []
            encoded_context = hidden
            hidden = decoder.initHidden()
            decoder_input = torch.tensor([wordToIndex["<sos>"]], dtype=torch.long)[0]
            while(True):
                output, hidden = decoder(decoder_input, encoded_context, hidden, encoded_goals)

                selected_token_probability, selected_token = torch.max(output, 1)

                selected_token_as_word = indexToWord[int(selected_token[0].numpy())]

                if (selected_token_as_word == "<eos>"):
                    break

                #if(selected_token_as_word != "THEM" and selected_token_as_word != "YOU"):
                selected_tokens.append(selected_token_as_word)


                if(len(selected_tokens) >= maxResponseLen):
                    break

                decoder_input = selected_token[0]


            print("Bot response: ", " ".join(selected_tokens))
            running_context += ["YOU"] + selected_tokens + ["<eos>"]

            if (selected_tokens == ["<selection>"]):
                break

        #print("final running context = ", " ".join(running_context))
        running_context_idx = [wordToIndex[word] if word in wordToIndex else wordToIndex["<UNK>"] for word in running_context]
        running_context_tensor = torch.tensor(running_context_idx, dtype=torch.long)

        num_words = running_context_tensor.size(0)

        hidden = output_classifier.initHidden()
        for i in range(num_words):
            outputs, hidden = output_classifier(running_context_tensor[i], hidden, bot_goals_tensor)

        # in form: counts = [count1, count2, count3, opponent_count1, opponent_count2, opponent_count3]
        bot_selection = findBestValidPrediction(outputs, bot_input)
        bot_counts = bot_selection[:3]
        player_counts = bot_selection[3:]

        print("Bot input = ", inputToString(bot_input))
        print("Bot items: ", countsToString(bot_counts))
        print("Player items: ", countsToString(player_counts))

        print("Bot score = ", np.sum(np.multiply(bot_counts, bot_values)))
        print("Opponent score = ", np.sum(np.multiply(player_counts, opponent_values)))



        sentinel = input("Type 'quit' to exit, anything else to run another example: ")
        if (sentinel == 'quit'):
            break
mask_image = cv2.imread(mask_image)
mask_image_ori = copy.deepcopy(mask_image)
show_from_cv(mask_image)

tmask_image = cv2.imread(tmask_image)
tmask_image_ori = copy.deepcopy(tmask_image)
show_from_cv(tmask_image)

tr = 3
tmask_image = cv2.GaussianBlur(tmask_image, (2 * tr + 1, 2 * tr + 1), tr)
show_from_cv(tmask_image)

cnn = models.vgg19(pretrained=True).features.to(device).eval()
#
content_image = toTensor(content_image).to(device, torch.float)
style_image = toTensor(style_image).to(device, torch.float)
mask_image = toTensor(mask_image).to(device, torch.float)
tmask_image = toTensor(tmask_image).to(device, torch.float)

# a = mask_image.reshape(1,-1)
#
# for i in range(0, 1432200):
#     print(a[0][i])

print('===> Initialize the image...')
# input_img = torch.randn(content_image.data.size(), device=device)
input_img = content_image.clone()
print('the image tensor size is {}'.format(input_img.size()))
show_from_tensor(input_img)
Exemple #6
0
def get_model_and_losses(cnn, normalization_mean, normalization_std,
                         style_img, content_img, mask_image, tmask_image,
                         style_weight=100, content_weight=5, tv_weight=1e-3,
                         content_layers=content_layers_default,
                         style_layers=style_layers_default):
    content_losses = []
    style_losses = []

    normalization = Normalization(normalization_mean, normalization_std).to(device)
    model = nn.Sequential(normalization)
    tv_loss = None

    if tv_weight > 0:
        tv_loss = TVLoss(tv_weight)
        model.add_module('tv_loss', tv_loss)
    # model.add_module('tv_loss', TVLoss(tv_weight))

    i = 0
    for layer in cnn.children():

        if isinstance(layer, nn.Conv2d):
            sap = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)
            if not isinstance(mask_image, torch.Tensor):
                mask_image = toTensor(mask_image).to(device)
            mask_image = sap(mask_image)
            i += 1
            name = "conv_" + str(i)
            model.add_module(name, layer)

        # why every time we resize the mask image to a smaller image,
        # because later we need mask image to fit input image in deep layers
        # vgg19 only shrink image size in pooling layer and the rate is 1/2!
        elif isinstance(layer, nn.MaxPool2d):
            if isinstance(mask_image, torch.Tensor):
                mask_image = tensor_to_np(mask_image)
            mask_image = cv2.resize(mask_image,
                                    (math.floor(mask_image.shape[1] / 2), math.floor(mask_image.shape[0] / 2)))
            mask_image = toTensor(mask_image).to(device)
            name = "pool_" + str(i)
            model.add_module(name, layer)

        elif isinstance(layer, nn.ReLU):
            name = "relu_" + str(i)
            model.add_module(name, nn.ReLU(inplace=False))

        if name in content_layers:
            print('-----Setting up content layer-----')
            target = model(content_img).detach()
            content_loss = ContentLoss(target, mask_image, content_weight)
            content_loss.register_backward_hook(content_loss.content_hook)
            model.add_module("content_loss_" + str(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            print('-----Setting up style layer-----')
            # content_target = model(content_img).detach()

            target_feature = model(style_img).detach()
            mask = mask_image[:, 0:1, :, :]
            mask = mask.expand_as(target_feature)
            target_feature = target_feature * mask

            # add a histogram match here
            style_loss = StyleLoss(target_feature, mask_image, style_weight)
            style_loss.register_backward_hook(style_loss.style_hook)
            model.add_module("style_loss" + str(i), style_loss)
            style_losses.append(style_loss)

    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
            break

        model = model[:i]

    return model, style_losses, content_losses, tv_loss
Exemple #7
0
def generateDataset(trainExamples, vocab, use_shuffle = True):
    
    vocab.append("<START>")
    vocab.append("<sos>")
    vocab.append("<eos>")
    vocab.append("YOU")
    vocab.append("THEM")
    if "<UNK>" not in vocab:
        vocab.append("<UNK>")
    vocab = sorted(vocab)

    wordToIndex, indexToWord = utils.getIndexTranslations(vocab)

    contextTargetPairs = []

    for i, example in enumerate(trainExamples):
        #print("example = ", exampleToString(example))
        agent_input = example["input"]
        dialogue = example["dialogue"]
        output = example["output"]
        partner_input = example["partner input"]

        goals_tensor = utils.toTensor(agent_input, "goals")

        #for now, just use raw tensor, can also train an embedding
        encoded_goals = goals_tensor

        word_indexes = []

        for sentence in dialogue:
            speaker = sentence[0]
            utterance = [speaker]
            utterance += sentence[1].split(" ")
            utterance += ["<eos>"]

            utterance_idx = [wordToIndex[word] if word in wordToIndex else wordToIndex["<UNK>"] for word in utterance]
            word_indexes += utterance_idx

        words_tensor = torch.tensor(word_indexes, dtype=torch.long)
        #print("words tensor = ", words_tensor)

        
        output_vector = output[0] + output[1]
        if (-1 in output_vector):
            ##there was disagreement so don't bother training on this example
            continue
        output_tensors = []
        for output_idx in range(6):
            output_count = output_vector[output_idx]
            output_tensor = torch.tensor([output_count], dtype = torch.long)
            output_tensors.append(output_tensor)

        context = (words_tensor, encoded_goals)
        target = output_tensors

        contextTargetPairs.append((context, target, example))

    if (use_shuffle):
        random.shuffle(contextTargetPairs)

    return contextTargetPairs