示例#1
0
def grad_loc(opt):
    loader = DataLoader(SimulatedDataset(opt.test_dir),
                        batch_size=1,
                        shuffle=True)
    model = Model(nin=True)
    model.load_state_dict(torch.load(opt.weights))
    loc = GradientLocalizaton(model)
    loc.eval()

    anchor, same, _ = next(iter(loader))

    dist = distance(loc(anchor), loc(same))
    print(f"distance: {dist.item()}")

    dist.backward()

    gradients = loc.gradients
    activations = loc.activations(anchor).detach()

    superimposed_anchor, anchor = impose(gradients, activations, anchor)
    cv2.imwrite("grad_anchor.png", superimposed_anchor)
    cv2.imwrite("raw_anchor.png", anchor)

    activations = loc.activations(same).detach()

    superimposed_same, same = impose(gradients, activations, same)
    cv2.imwrite("grad_same.png", superimposed_same)
    cv2.imwrite("raw_same.png", same)
示例#2
0
def procedure(ticks):
    n = 500
    b = .000006662 
    D = 1
    alpha = 2

    n_types = ticks**D
    #print 'Number of types: {}'.format(n_types)
    M = np.zeros([ticks**D, ticks**D])
    registry = {}
    
    next_id = 0

    for index in np.ndindex(tuple([ticks] * D)):
        i = index[:D]
        registry[i] = next_id 
        next_id += 1

    for index in np.ndindex(tuple([ticks]* D * 2)):
        i = index[:D]
        j = index[D:]

        if i != j:
            pos_i = [float(_i) / (ticks - 1) for _i in i]
            pos_j = [float(_j) / (ticks - 1) for _j in j]

            M[registry[i], registry[j]] = .5 * n**2 / n_types**2 *\
                b / (b + model.distance(None, pos_i, pos_j)**alpha) 

    eigvals = scipy.linalg.eigvals(M) 
    return max(eigvals)
示例#3
0
def procedure(ticks):
    n = 500
    b = .000006662
    D = 1
    alpha = 2

    n_types = ticks**D
    #print 'Number of types: {}'.format(n_types)
    M = np.zeros([ticks**D, ticks**D])
    registry = {}

    next_id = 0

    for index in np.ndindex(tuple([ticks] * D)):
        i = index[:D]
        registry[i] = next_id
        next_id += 1

    for index in np.ndindex(tuple([ticks] * D * 2)):
        i = index[:D]
        j = index[D:]

        if i != j:
            pos_i = [float(_i) / (ticks - 1) for _i in i]
            pos_j = [float(_j) / (ticks - 1) for _j in j]

            M[registry[i], registry[j]] = .5 * n**2 / n_types**2 *\
                b / (b + model.distance(None, pos_i, pos_j)**alpha)

    eigvals = scipy.linalg.eigvals(M)
    return max(eigvals)
示例#4
0
def expected_edge_freq(G):
    edge_expected_freq = {}
    for u in G._nodes:
        for v in G._nodes:
            if u < v:
                freq = G.c/(model.distance(G, u, v)**2+G.c)
                #for when edge_expected_freq[key] since the key can be
                #either (u, v) or (v, u)
                edge_expected_freq[(u, v)] = freq
                edge_expected_freq[(v, u)] = freq
    return edge_expected_freq
示例#5
0
def expected_edge_freq(G):
    edge_expected_freq = {}
    for u in G._nodes:
        for v in G._nodes:
            if u < v:
                freq = G.c / (model.distance(G, u, v)**2 + G.c)
                #for when edge_expected_freq[key] since the key can be
                #either (u, v) or (v, u)
                edge_expected_freq[(u, v)] = freq
                edge_expected_freq[(v, u)] = freq
    return edge_expected_freq
def test(test_loader, model, epoch):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:
        current_sample = data_a.size(0)
        data_a = data_a.resize_(args.test_input_per_file *current_sample, 1, data_a.size(2), data_a.size(3))
        data_p = data_p.resize_(args.test_input_per_file *current_sample, 1, data_a.size(2), data_a.size(3))
        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()
        data_a, data_p, label = Variable(data_a, volatile=True), \
                                Variable(data_p, volatile=True), Variable(label)

        # compute output
        out_a, out_p = model(data_a), model(data_p)
        dists = distance(out_a,out_p, args.distance)
        dists = dists.data.cpu().numpy()
        dists = dists.reshape(current_sample,args.test_input_per_file).mean(axis=1)
        distances.append(dists)
        labels.append(label.data.cpu().numpy())

        if batch_idx % args.log_interval == 0:
            pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                # epoch, batch_idx * len(data_a), len(test_loader.dataset),
                epoch, batch_idx * len(data_a), len(test_loader) * len(data_a),
                100. * batch_idx / len(test_loader)))

    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])

    tpr, fpr, accuracy = evaluate(distances, labels)
    print('\33[91mTest set: Accuracy: {:.8f}\n\33[0m'.format(np.mean(accuracy)))
    logger.log_value('Test Accuracy', np.mean(accuracy))
def train(train_loader, model, optimizer, epoch):
    # switch to train mode
    model.train()

    labels, distances = [], []

    pbar = tqdm(enumerate(train_loader))
    for batch_idx, (data_a, data_p, data_n, label_p, label_n) in pbar:
        #print("on training{}".format(epoch))
        if args.cuda:
            data_a, data_p, data_n = data_a.cuda(), data_p.cuda(), data_n.cuda()

        # compute output
        out_a, out_p, out_n = model(data_a), model(data_p), model(data_n)


        if epoch > args.min_softmax_epoch:
            triplet_loss = TripletMarginLoss(args.margin).forward(out_a, out_p, out_n)
            loss = triplet_loss
            # compute gradient and update weights
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            logger.log_value('selected_triplet_loss', triplet_loss.data.item()).step()
            #logger.log_value('selected_cross_entropy_loss', cross_entropy_loss.data.item()).step()
            logger.log_value('selected_total_loss', loss.data.item()).step()

            if batch_idx % args.log_interval == 0:
                pbar.set_description(
                    'Train Epoch: {:3d} [{:8d}/{:8d} ({:3.0f}%)]\tLoss: {:.6f}'.format(
                        # epoch, batch_idx * len(data_a), len(train_loader.dataset),
                        epoch, batch_idx * len(data_a), len(train_loader) * len(data_a),
                        100. * batch_idx / len(train_loader),
                        loss.data.item()))


            dists = distance(out_a, out_n, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.zeros(dists.size(0)))


            dists = distance(out_a, out_p, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.ones(dists.size(0)))



        else:
        # Choose the hard negatives
            d_p = distance(out_a, out_p, args.distance)
            d_n = distance(out_a, out_n, args.distance)
            all = (d_n - d_p < args.margin).cpu().data.numpy().flatten()

            # log loss value for mini batch.
            total_coorect = np.where(all == 0)
            logger.log_value('Minibatch Train Accuracy', len(total_coorect[0]))

            total_dist = (d_n - d_p).cpu().data.numpy().flatten()
            logger.log_value('Minibatch Train distance', np.mean(total_dist))

            hard_triplets = np.where(all == 1)
            if len(hard_triplets[0]) == 0:
                continue

            if args.cuda:
                out_selected_a = Variable(torch.from_numpy(out_a.cpu().data.numpy()[hard_triplets]).cuda())
                out_selected_p = Variable(torch.from_numpy(out_p.cpu().data.numpy()[hard_triplets]).cuda())
                out_selected_n = Variable(torch.from_numpy(out_n.cpu().data.numpy()[hard_triplets]).cuda())

                selected_data_a = Variable(torch.from_numpy(data_a.cpu().data.numpy()[hard_triplets]).cuda())
                selected_data_p = Variable(torch.from_numpy(data_p.cpu().data.numpy()[hard_triplets]).cuda())
                selected_data_n = Variable(torch.from_numpy(data_n.cpu().data.numpy()[hard_triplets]).cuda())
            else:
                out_selected_a = Variable(torch.from_numpy(out_a.data.numpy()[hard_triplets]))
                out_selected_p = Variable(torch.from_numpy(out_p.data.numpy()[hard_triplets]))
                out_selected_n = Variable(torch.from_numpy(out_n.data.numpy()[hard_triplets]))

                selected_data_a = Variable(torch.from_numpy(data_a.data.numpy()[hard_triplets]))
                selected_data_p = Variable(torch.from_numpy(data_p.data.numpy()[hard_triplets]))
                selected_data_n = Variable(torch.from_numpy(data_n.data.numpy()[hard_triplets]))


            selected_label_p = torch.from_numpy(label_p.cpu().numpy()[hard_triplets])
            selected_label_n= torch.from_numpy(label_n.cpu().numpy()[hard_triplets])
            triplet_loss = TripletMarginLoss(args.margin).forward(out_selected_a, out_selected_p, out_selected_n)

            cls_a = model.forward_classifier(selected_data_a)
            cls_p = model.forward_classifier(selected_data_p)
            cls_n = model.forward_classifier(selected_data_n)

            criterion = nn.CrossEntropyLoss()
            predicted_labels = torch.cat([cls_a,cls_p,cls_n])
            if args.cuda:
                true_labels = torch.cat([Variable(selected_label_p.cuda()),Variable(selected_label_p.cuda()),Variable(selected_label_n.cuda())])

                cross_entropy_loss = criterion(predicted_labels.cuda(),true_labels.cuda())
            else:
                true_labels = torch.cat([Variable(selected_label_p),Variable(selected_label_p),Variable(selected_label_n)])

                cross_entropy_loss = criterion(predicted_labels,true_labels)

            loss = cross_entropy_loss + triplet_loss * args.loss_ratio
            # compute gradient and update weights
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()


            # log loss value for hard selected sample
            logger.log_value('selected_triplet_loss', triplet_loss.data).step()
            logger.log_value('selected_cross_entropy_loss', cross_entropy_loss.data).step()
            logger.log_value('selected_total_loss', loss.data).step()
            if batch_idx % args.log_interval == 0:
                pbar.set_description(
                    'Train Epoch: {:3d} [{:8d}/{:8d} ({:3.0f}%)]\tLoss: {:.6f} \t Number of Selected Triplets: {:4d}'.format(
                        # epoch, batch_idx * len(data_a), len(train_loader.dataset),
                        epoch, batch_idx * len(data_a), len(train_loader) * len(data_a),
                        100. * batch_idx / len(train_loader),
                        loss.data,len(hard_triplets[0])))


            dists = distance(out_selected_a, out_selected_n, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.zeros(dists.size(0)))


            dists = distance(out_selected_a, out_selected_p, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.ones(dists.size(0)))


    #accuracy for hard selected sample, not all sample.
    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])

    tpr, fpr, accuracy = evaluate(distances, labels)
    print('\33[91mTrain set: Accuracy: {:.8f}\n\33[0m'.format(np.mean(accuracy)))
    logger.log_value('Train Accuracy', np.mean(accuracy))

    # do checkpointing
    torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()},
               '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))
示例#8
0
# x1=int(input())
# x2=int(input())
# value=add(x1,x2)
# print(value)

#無限參數
# def avg(*ns):
#     sum=0
#     for n in ns:
#         sum=sum+n
#     print(sum/(len(ns)))

# avg(3,4)
# avg(3,4,5)
# avg(3,4,5,6)

#模組
# import model
# result=model.distance(1,1,5,5)
# print(result)
# result=model.slope(1,2,5,6)
# print(result)

import sys
sys.path.append("models") #新增路徑
#print(sys.path)

import model
result=model.distance(1,1,5,5)
print(result)
示例#9
0
def score():
    """Handle requests for /score via POST"""

    # Read strings
    s1 = request.form.get("str1")
    s2 = request.form.get("str2")
    if not s1 or not s2:
        abort(400, "missing strings")

    # Calculate distance
    matrix = distance(s1, s2)

    # Extract operations from table
    operations = []
    i = len(s1)
    j = len(s2)
    while True:
        _, operation = matrix[i][j]
        if not operation:
            break
        if operation == Operation.INSERTED:
            j -= 1
        elif operation == Operation.DELETED:
            i -= 1
        else:
            i -= 1
            j -= 1
        operations.append(operation)
    operations.reverse()

    # Maintain list of intermediate strings, operation and description
    transistions = [(s1, None, None)]
    i = 0

    # Apply each operation
    prev = s1
    for operation in operations:
        # Update sting and description of operation
        if operation == Operation.INSERTED:
            s = (prev[:i], s2[i], prev[i:])
            description = f"inserted '{s2[i]}'"
            prev = prev[:i] + s2[i] + prev[i:]
            i += 1
        elif operation == Operation.DELETED:
            s = (prev[:i], prev[i], prev[i + 1:])
            description = f"deleted '{prev[i]}'"
            prev = prev[:i] + prev[i + 1:]
        elif prev[i] != s2[i]:
            s = (prev[:i], s2[i], prev[i + 1:])
            description = f"substituted '{prev[i]}' with '{s2[i]}'"
            prev = prev[:i] + s2[i] + prev[i + 1:]
            i += 1
        else:
            i += 1
            continue
        transistions.append((s, str(operation), description))
    transistions.append((s2, None, None))

    # Output comparison
    return render_template("score.html",
                           matrix=matrix,
                           s1=s1,
                           s2=s2,
                           operations=transistions)