def find_best_self(domain):
    X_train = load_obj("%s/X_train"%domain)
    y_train = load_obj("%s/y_train"%domain)
    X_test = load_obj("%s/X_test"%domain)
    y_test = load_obj("%s/y_test"%domain)
    X_un = load_obj("%s/X_un"%domain)

    thetas = [0.5,0.6,0.7,0.8,0.9]
    best_acc = 0.0
    best_clf =""
    best_theta = 0.0
    resFile = open("../work/params/%s_in_theta.csv"%domain,"w")
    resFile.write("theta, acc\n")
    for theta in thetas:
        print "##############################"
        print "start with theta=%s"%theta
        print "##############################"
        acc,clf_func = self_train(domain,X_train,y_train,X_test,y_test,X_un,theta=theta)
        
        if best_acc<acc:
            best_acc = acc
            best_clf = clf_func
            best_theta = theta

        resFile.write("%f, %f\n"%(theta,acc))
        resFile.flush()
    resFile.close()
    print "##############################"
    print "best_theta:",best_theta,"best_acc:",best_acc
    save_obj(best_clf,"%s/self_clf"%domain)
    pass
def majority_vote(target):
    X_test = load_obj("%s/X_test"%target)
    y_test = load_obj("%s/y_test"%target)

    domains = []
    if "mlp" in target:
        domains = ["mlp/books","mlp/dvd","mlp/electronics","mlp/kitchen"]
    else:
        if "large" not in target:
            domains = ["books","dvd","electronics","kitchen"]
            if target not in domains:
                return
        else:
            domains =["large/baby","large/cell_phone","large/imdb","large/yelp2014"]

    models = []
    for source in domains:
        if target == source:
            continue
        else:
            print source
            clf_func = load_obj("%s/self_clf"%source)
            models.append(clf_func)


    eclf = EnsembleVoteClassifier(clfs=models,refit=False)#weights=[1,1,1],
    eclf.fit(X_test,y_test) # this line is not doing work
    tmp_name = target.upper()[0] if "large" not in target else "large/"+target.upper()[6]
    tmp_name = target.upper()[0] if "mlp" not in target else "mlp/"+target.upper()[4]
    save_obj(eclf, '%s_eclf'%(tmp_name))
    pred = eclf.predict(X_test)
    acc = accuracy_score(y_test,pred) if "large" not in target else f1_score(y_test,pred,average='macro')
    print 'self-train',acc
    pass
예제 #3
0
def unlabel_sim(target):
    tgt_un = load_obj("%s/X_un"%target)
    # print target,tgt_un.shape
    c_t = compute_centriod(tgt_un)
    computed_tgt_sim = [cos_sim(x,c_t) for x in tgt_un]
    save_obj(computed_tgt_sim,"%s/tgt_sim"%target)
    pass
예제 #4
0
def generate_un():
    source = "d1"
    domains = ["books","dvd","electronics","kitchen"]
    for target in domains:
        src_un,tgt_un = set_up_unlabelled(source,target,False)
        save_obj(tgt_un,"%s/X_un"%target)
    pass
def majority_vote_mlp(target):
    X_test = load_obj("%s/X_test"%target)
    y_test = load_obj("%s/y_test"%target)

    # domains = ["mlp/books","mlp/dvd","mlp/electronics","mlp/kitchen"]
    data_name = ["books", "dvd", "electronics", "kitchen"]
    X_joint = load_obj("%s/X_joint"%target)
    y_joint = load_obj("%s/y_joint"%target)
    temp_un = load_obj("%s/X_un"%target)
    meta_sources = []
    for i in range(len(data_name)):
        if 'mlp/'+data_name[i] != target:
            meta_sources.append(data_name[i])
    # print meta_sources
    models = []
    for j in range(len(meta_sources)):
        temp_X = X_joint[j]
        temp_y = y_joint[j]
        thetas = [0.5,0.6,0.7,0.8,0.9]
        best_acc = 0.0
        best_clf =""
        best_theta = 0.0
        resFile = open("../work/params/%s_theta_self-%s.csv"%(target,meta_sources[j].upper()[0]),"w")
        resFile.write("theta, acc\n")
        for theta in thetas:
            print "##############################"
            print "start with theta=%s"%theta
            print "##############################"
            acc,clf_func = self_train(target,temp_X,temp_y,X_test,y_test,temp_un,theta=theta)
            
            if best_acc<acc:
                best_acc = acc
                best_clf = clf_func
                best_theta = theta

            resFile.write("%f, %f\n"%(theta,acc))
            resFile.flush()
        resFile.close()
        print "##############################"
        print "best_theta:",best_theta,"best_acc:",best_acc
        models.append(best_clf)

    eclf = EnsembleVoteClassifier(clfs=models,refit=False)#weights=[1,1,1],
    eclf.fit(X_test,y_test) # this line is not doing work
    # tmp_name = target.upper()[0] if "large" not in target else "large/"+target.upper()[6]
    # tmp_name = 'mlp/'+target.upper()[4]
    save_obj(eclf, "%s/self_clf"%target)
    pred = eclf.predict(X_test)
    # print pred
    acc = accuracy_score(y_test,pred)
    print 'self-train',acc
    pass
예제 #6
0
def src_cost(target):
    X_joint = load_obj("%s/X_joint"%target)
    src_train = get_sents(X_joint)
    tgt_un = load_obj("%s/X_un"%target)
    c_t = compute_centriod(tgt_un)
    # print src_train
    sim = [cos_sim(x,c_t) for x in src_train]
    # s = sum(sim)
    # sim = [x/s for x in sim]
    # print normalized_sim
    sim = list(split_list(sim,3))
    save_obj(sim,"%s/src_cost"%target)
    pass
예제 #7
0
def find_theta(target):
    print target
    resFile = open("../work/params/%s_theta.csv"%target,"w")
    resFile.write("theta, acc, method\n")
    thetas = [0.5,0.6,0.7,0.8,0.9]
    best_theta = 0.0
    best_acc = 0.0
    best_pos = ""
    best_neg = ""
    best_method = ""
    for theta in thetas:
        pos_star,neg_star,acc,method,sorting,pos_proba,neg_proba = predict_tops(target,theta=theta)
        # print "S_L:",
        # acc1 = test_confidence(target,option=1,theta=theta)
        print "PL(T_L*):",acc,theta
        # acc = test_confidence(target, pos_star,neg_star,theta=theta)
        # print "S_L+T_L*:",
        # acc3 = test_confidence(target,option=2,theta=theta)
        resFile.write("%f, %f, %s, %s\n"%(theta,acc,method,sorting))
        if best_acc<acc:
            best_acc = acc
            best_theta = theta
            best_pos = pos_star
            best_neg = neg_star
            best_method = method
            best_sorting = sorting
            best_pos_proba = pos_proba
            best_neg_proba = neg_proba
        resFile.flush()
    resFile.close()
    print "####################################"
    print "best_theta:",best_theta,"best_acc:",best_acc, "best_method:",best_method,best_sorting
    save_obj(best_pos,"%s/pos_star"%target)
    save_obj(best_neg,"%s/neg_star"%target)
    save_obj(best_pos_proba,"%s/pos_proba"%target)
    save_obj(best_neg_proba,"%s/neg_proba"%target)
    pass
def joint_train(target):
    X_test = load_obj("%s/X_test"%target)
    y_test = load_obj("%s/y_test"%target)

    domains = []
    temp_X = []
    temp_y = []
    temp_un = []
    if "mlp" in target:
        domains = ["mlp/books","mlp/dvd","mlp/electronics","mlp/kitchen"]
        temp_X = get_sents(load_obj("%s/X_joint"%target))
        temp_y = get_sents(load_obj("%s/y_joint"%target))
        for source in domains:
            if target == source:
                continue
            else:
                X_un = load_obj("%s/X_un"%source)
                temp_un = concatenate(temp_un,X_un)
    else:
        if "large" not in target:
            domains = ["books","dvd","electronics","kitchen"]
            if target not in domains:
                return
        else:
            domains =["large/baby","large/cell_phone","large/imdb","large/yelp2014"]
    
        for source in domains:
            if target == source:
                continue
            else:
                print source
                X_train = load_obj("%s/X_train"%source)
                y_train = load_obj("%s/y_train"%source)
                X_un = load_obj("%s/X_un"%source)
                temp_X = concatenate(temp_X,X_train)
                temp_y = concatenate(temp_y,y_train)
                temp_un = concatenate(temp_un,X_un)

    thetas = [0.5,0.6,0.7,0.8,0.9]
    best_acc = 0.0
    best_clf =""
    best_theta = 0.0
    resFile = open("../work/params/%s_theta_joint.csv"%domain,"w")
    resFile.write("theta, acc\n")
    for theta in thetas:
        print "##############################"
        print "start with theta=%s"%theta
        print "##############################"
        acc,clf_func = self_train(target,temp_X,temp_y,X_test,y_test,temp_un,theta=theta)
        
        if best_acc<acc:
            best_acc = acc
            best_clf = clf_func
            best_theta = theta

        resFile.write("%f, %f\n"%(theta,acc))
        resFile.flush()
    resFile.close()
    print "##############################"
    print "best_theta:",best_theta,"best_acc:",best_acc
    save_obj(best_clf,"%s/joint_clf"%target)
    return acc,clf_func
예제 #9
0
def compute_psi(target,k=None):
    pos_star = load_obj('%s/pos_star'%target)
    neg_star = load_obj('%s/neg_star'%target)
    star_matrix = concatenate(pos_star,neg_star)
    # print star_matrix
    X_joint = load_obj('%s/X_joint'%target)
    y_joint = load_obj('%s/y_joint'%target)
    # print np.array(X_joint).shape
    src_cost = load_obj("%s/src_cost"%target)
    # X_train = get_sents(X_joint)
    # print np.matmul(star_matrix,X_train.T)
    # psi_matrix = np.dot(star_matrix,X_train.T).T
    #softmax(np.dot(star_matrix,X_train.T).T)
    # print k
    if k == None:
        psi_matrix = []
    
        for X_split in X_joint:
            # print np.dot(star_matrix,np.array(X_split).T).T
            # print np.array(X_split).shape
            # temp = softmax(np.dot(star_matrix,np.array(X_split).T).T,axis=0)
            temp = softmax(normalize(np.dot(star_matrix,np.array(X_split).T).T),axis=0)
            psi_matrix.append(temp)
            # print temp
        save_obj(np.array(psi_matrix),"%s/psi_matrix"%(target))
        # np.save("../data/%s/psi_matrix"%(target),np.array(psi_matrix))
    else:
        psi_matrix = []
        X_psi = []
        y_psi = []
        cost_psi = []
        X_index = []
        for (X_split,y_split,cost_split) in zip(X_joint,y_joint,src_cost):
            temp = normalize(np.dot(star_matrix,np.array(X_split).T).T)
            # temp = np.dot(star_matrix,np.array(X_split).T).T
            filtered,index = top_k(temp,k)
            # print softmax(filtered,axis=0),index
            psi_matrix.append(softmax(filtered,axis=0))
            # print filtered,filtered.shape
            X_temp = np.array(X_split)[index]
            X_psi.append(X_temp)
            y_temp = np.array(y_split)[index]
            y_psi.append(y_temp)
            cost_temp = np.array(cost_split)[index]
            cost_psi.append(cost_temp)
            X_index.append(index)
            # print y_temp.shape
        # print top_k(psi_matrix,k)
        # print psi_matrix[0].sum(axis=0).shape,psi_matrix[0].sum(axis=0)
        psi_matrix = np.array(psi_matrix)
        X_psi = np.array(X_psi)
        y_psi = np.array(y_psi)
        cost_psi = np.array(cost_psi)
        save_obj(psi_matrix,"%s/%s/psi_matrix"%(target,k))
        save_obj(X_psi,"%s/%s/X_psi"%(target,k))
        save_obj(y_psi,"%s/%s/y_psi"%(target,k))
        save_obj(cost_psi,"%s/%s/src_cost_psi"%(target,k))
        save_obj(X_index,"%s/%s/X_index"%(target,k))
        # print sum([y for domain in y_psi for y in domain  if y==1])
    return np.array(psi_matrix)
예제 #10
0
def chen12(target="all"):

    time_start = time.time()

    amazon = np.load("../data/amazon.npz")
    amazon_xx = coo_matrix(
        (amazon['xx_data'], (amazon['xx_col'], amazon['xx_row'])),
        shape=amazon['xx_shape'][::-1]).tocsc()
    amazon_xx = amazon_xx[:, :5000]
    amazon_yy = amazon['yy']
    amazon_yy = (amazon_yy + 1) / 2
    amazon_offset = amazon['offset'].flatten()
    time_end = time.time()
    print("Time used to process the Amazon data set = {} seconds.".format(
        time_end - time_start))
    print("Number of training instances = {}, number of features = {}.".format(
        amazon_xx.shape[0], amazon_xx.shape[1]))
    print("Number of nonzero elements = {}".format(amazon_xx.nnz))
    print("amazon_xx shape = {}.".format(amazon_xx.shape))
    print("amazon_yy shape = {}.".format(amazon_yy.shape))
    data_name = ["books", "dvd", "electronics", "kitchen"]
    num_data_sets = 4
    data_insts, data_labels, num_insts = [], [], []
    for i in range(num_data_sets):
        data_insts.append(amazon_xx[amazon_offset[i]:amazon_offset[i + 1], :])
        data_labels.append(amazon_yy[amazon_offset[i]:amazon_offset[i + 1], :])
        print(
            "Length of the {} data set label list = {}, label values = {}, label balance = {}"
            .format(
                data_name[i],
                amazon_yy[amazon_offset[i]:amazon_offset[i + 1], :].shape[0],
                np.unique(amazon_yy[amazon_offset[i]:amazon_offset[i + 1], :]),
                np.sum(amazon_yy[amazon_offset[i]:amazon_offset[i + 1], :])))
        num_insts.append(amazon_offset[i + 1] - amazon_offset[i])
        # Randomly shuffle.
        r_order = np.arange(num_insts[i])
        np.random.shuffle(r_order)
        data_insts[i] = data_insts[i][r_order, :]
        data_labels[i] = data_labels[i][r_order, :]
    print("Data sets: {}".format(data_name))
    print("Number of total instances in the data sets: {}".format(num_insts))
    num_trains = 2000
    input_dim = amazon_xx.shape[1]
    # convert to tf_idf vectors
    # data_insts = tf_idf(data_insts)
    for i in range(num_data_sets):
        print data_name[i]
        # Build source instances.
        source_insts = []
        source_labels = []
        for j in range(num_data_sets):
            if j != i:
                source_insts.append(
                    data_insts[j][:num_trains, :].todense().astype(np.float32))
                source_labels.append(
                    data_labels[j][:num_trains, :].ravel().astype(np.int64))
        if target == data_name[i] or target == "all":
            # Build test instances.
            target_idx = i
            target_insts = data_insts[i][num_trains:, :].todense().astype(
                np.float32)
            target_labels = data_labels[i][num_trains:, :].ravel().astype(
                np.int64)
            # train_data=np.concatenate((get_sents(source_insts),target_insts),axis=0)
            # train_labels = np.concatenate((np.array(source_labels).flatten(),target_labels),axis=0)
            train_data = get_sents(source_insts)
            train_labels = np.array(source_labels).flatten()
            # print target_insts
            unlabel_data = data_insts[i][:num_trains, :].todense().astype(
                np.float32)
            train, test, X_un = mlp_vectors(train_data, train_labels,
                                            get_sents(source_insts),
                                            target_insts, unlabel_data)
            mlp_source_insts = list(split_list(train, 3))
            source_labels = list(
                split_list(np.array(source_labels).flatten(), 3))
            mlp_target_insts = test
            clf = LogisticRegression().fit(train,
                                           np.array(source_labels).flatten())
            pred = clf.predict(test)
            acc = accuracy_score(target_labels, pred)
            print acc
            if acc > 0.847:
                save_obj(mlp_source_insts, "mlp/%s/X_joint" % data_name[i])
                save_obj(source_labels, "mlp/%s/y_joint" % data_name[i])
                save_obj(mlp_target_insts, "mlp/%s/X_test" % data_name[i])
                save_obj(target_labels, "mlp/%s/y_test" % data_name[i])
                save_obj(X_un, "mlp/%s/X_un" % data_name[i])
            return acc
예제 #11
0
def recon():
    # input and output folder
    image_path = r'dataset'
    save_path = 'output'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    img_list = glob.glob(image_path + '/**/' + '*.png', recursive=True)
    img_list += glob.glob(image_path + '/**/' + '*.jpg', recursive=True)

    # read BFM face model
    # transfer original BFM model to our model
    if not os.path.isfile('BFM/BFM_model_front.mat'):
        transfer_BFM09()

    device = 'cuda:0' if torch.cuda.is_available() else 'cpu:0'
    bfm = BFM(r'BFM/BFM_model_front.mat', device)

    # read standard landmarks for preprocessing images
    lm3D = bfm.load_lm3d()

    model = resnet50_use().to(device)
    model.load_state_dict(torch.load(r'models\params.pt'))
    model.eval()

    for param in model.parameters():
        param.requires_grad = False

    for file in img_list:
        # load images and corresponding 5 facial landmarks
        img, lm = load_img(file, file.replace('jpg', 'txt'))

        # preprocess input image
        input_img_org, lm_new, transform_params = Preprocess(img, lm, lm3D)

        input_img = input_img_org.astype(np.float32)
        input_img = torch.from_numpy(input_img).permute(0, 3, 1, 2)
        # the input_img is BGR
        input_img = input_img.to(device)

        arr_coef = model(input_img)

        coef = torch.cat(arr_coef, 1)

        # reconstruct 3D face with output coefficients and face model
        face_shape, face_texture, face_color, landmarks_2d, z_buffer, angles, translation, gamma = reconstruction(
            coef, bfm)

        fx, px, fy, py = estimate_intrinsic(landmarks_2d, transform_params,
                                            z_buffer, face_shape, bfm, angles,
                                            translation)

        face_shape_t = transform_face_shape(face_shape, angles, translation)
        face_color = face_color / 255.0
        face_shape_t[:, :, 2] = 10.0 - face_shape_t[:, :, 2]

        images = render_img(face_shape_t, face_color, bfm, 300, fx, fy, px, py)
        images = images.detach().cpu().numpy()
        images = np.squeeze(images)

        path_str = file.replace(image_path, save_path)
        path = os.path.split(path_str)[0]
        if os.path.exists(path) is False:
            os.makedirs(path)

        from PIL import Image
        images = np.uint8(images[:, :, :3] * 255.0)
        # init_img = np.array(img)
        # init_img[images != 0] = 0
        # images += init_img
        img = Image.fromarray(images)
        img.save(file.replace(image_path, save_path).replace('jpg', 'png'))

        face_shape = face_shape.detach().cpu().numpy()
        face_color = face_color.detach().cpu().numpy()

        face_shape = np.squeeze(face_shape)
        face_color = np.squeeze(face_color)
        save_obj(
            file.replace(image_path,
                         save_path).replace('.jpg',
                                            '_mesh.obj'), face_shape, bfm.tri,
            np.clip(face_color, 0,
                    1.0))  # 3D reconstruction face (in canonical view)

        from load_data import transfer_UV
        from utils import process_uv
        # loading UV coordinates
        uv_pos = transfer_UV()
        tex_coords = process_uv(uv_pos.copy())
        tex_coords = torch.tensor(tex_coords,
                                  dtype=torch.float32).unsqueeze(0).to(device)

        face_texture = face_texture / 255.0
        images = render_img(tex_coords, face_texture, bfm, 600, 600.0 - 1.0,
                            600.0 - 1.0, 0.0, 0.0)
        images = images.detach().cpu().numpy()
        images = np.squeeze(images)

        # from PIL import Image
        images = np.uint8(images[:, :, :3] * 255.0)
        img = Image.fromarray(images)
        img.save(
            file.replace(image_path,
                         save_path).replace('.jpg', '_texture.png'))
예제 #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--filename-input',
                        type=str,
                        default=os.path.join('./data/obj/liu_mesh.obj'))
    parser.add_argument('-o',
                        '--output-dir',
                        type=str,
                        default=os.path.join('./data/obj/vd092_mesh.obj'))
    args = parser.parse_args()

    # other settings
    camera_distance = 3.0
    elevation = -5
    azimuth = 0

    # load from Wavefront .obj file
    mesh = sr.Mesh.from_obj(args.filename_input,
                            load_texture=True,
                            texture_res=5,
                            texture_type='vertex')

    # create renderer with SoftRas
    renderer = sr.SoftRenderer(camera_mode='look_at')

    net = Net.ResNet([3, 4, 23, 3]).to(device)
    im = torch.rand(1, 3, 200, 200).to(device)
    out = net(im).to(device)
    BFM_coeff = out

    if not os.path.isfile('./BFM/BFM_model_front.mat'):
        transferBFM09()
    Face_model = BFM()
    BFM_net = compute_bfm(
        torch.tensor(Face_model.idBase, dtype=torch.float16),
        torch.tensor(Face_model.exBase, dtype=torch.float16),
        torch.tensor(Face_model.meanshape, dtype=torch.float16),
        torch.tensor(Face_model.texBase, dtype=torch.float16),
        torch.tensor(Face_model.meantex, dtype=torch.float16),
        torch.tensor(Face_model.tri, dtype=torch.int32))
    id_coeff = BFM_coeff[:, 0:80]
    ex_coeff = BFM_coeff[:, 80:144]
    tex_coeff = BFM_coeff[:, 144:224]
    print(id_coeff)
    vertices, textures, tri = BFM_net(id_coeff, ex_coeff, tex_coeff)
    # draw object from different view
    mesh.reset_()
    elevation = BFM_coeff[:, 226]
    # elevation = torch.sum(elevation)
    azimuth = -90

    renderer.transform.set_eyes_from_angles(camera_distance, 0, 180)
    # images = renderer.render_mesh(mesh)
    print(vertices)
    print(mesh.faces)
    faces = torch.tensor(Face_model.tri, dtype=torch.int32).to(device) - 1
    faces = faces.unsqueeze(0)
    print(faces)

    images = renderer.forward(mesh.vertices,
                              mesh.faces,
                              mesh.textures,
                              texture_type='vertex')
    print(images)
    image = images.detach().cpu().numpy()[0, 0:3].transpose((1, 2, 0))
    image = (image * 255).astype(np.uint8)

    plt.figure(0)
    plt.imshow(image)
    plt.show()

    img_name1 = 'D:/files/project/data/human_faces/CACD2000/CACD2000/17_Jennifer_Lawrence_0013.jpg'
    img_name2 = 'D:/files/project/data/human_faces/CACD2000/CACD2000/17_Lily_Cole_0008.jpg'

    predictor_model = './model/shape_predictor_68_face_landmarks.dat'
    detector = dlib.get_frontal_face_detector()  # dlib人脸检测器
    predictor = dlib.shape_predictor(predictor_model)

    img1 = cv2.imread(img_name1)
    image2 = cv2.imread(img_name2)
    image1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
    # image2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)

    points1 = np.zeros((68, 2))
    points2 = np.zeros((68, 2))
    rects, scores, idx = detector.run(image, 2, 0)
    faces = dlib.full_object_detections()
    for rect in rects:
        faces.append(predictor(image, rect))
    i = 0
    for landmark in faces:
        for idx, point in enumerate(landmark.parts()):
            points1[i, 0] = point.x
            points1[i, 1] = point.y
            i = i + 1
    h, w, c = image.shape
    show_image(image, points1)

    rects, scores, idx = detector.run(image2, 2, 0)
    faces = dlib.full_object_detections()
    for rect in rects:
        faces.append(predictor(image2, rect))
    i = 0
    for landmark in faces:
        for idx, point in enumerate(landmark.parts()):
            points2[i, 0] = point.x
            points2[i, 1] = point.y
            i = i + 1
    h, w, c = image2.shape
    show_image(image2, points2)

    tr = trans.estimate_transform('affine', src=points1, dst=points2)
    M = tr.params[0:2, :]
    cv_img = cv2.warpAffine(image1, M, (image.shape[1], image.shape[0]))
    show_image(image2, points2)

    param = np.linalg.inv(tr.params)
    theta = normalize_transforms(param[0:2, :], w, h)

    to_tensor = torchvision.transforms.ToTensor()
    tensor_img = to_tensor(image).unsqueeze(0)
    theta = torch.Tensor(theta).unsqueeze(0)

    grid = F.affine_grid(theta, tensor_img.size())
    tensor_img = F.grid_sample(tensor_img, grid)
    tensor_img = tensor_img.squeeze(0)
    warp_img = convert_image_np(tensor_img)
    show_image(warp_img, points2)

    vertices = vertices[0].detach().cpu().numpy()
    faces = faces[0].detach().cpu().numpy() + 1
    textures = textures[0].detach().cpu().numpy()
    load.save_obj('./123.obj', vertices, faces, textures)
예제 #13
0
def label_to_tensor(v):
    # print len(v)
    return Variable(torch.FloatTensor(v).view(len(v), -1).to(device))


def softmax(x):
    """Compute softmax values for each sets of scores in x."""
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum(axis=0)  # only difference


if __name__ == '__main__':
    if len(sys.argv) > 2:
        target = sys.argv[1]
        test_index = int(sys.argv[2])
        # option = sys.argv[3]
        evd = compute_single_evidence(target, test_index, tops=2000)
        collected = contruct_collection(evd, option='all')
        save_obj(collected, 'collected-all')
        collected = contruct_collection(evd)
        save_obj(collected, 'collected-theta')
    # elif len(sys.argv) >2:
    #     target = sys.argv[1]
    #     # k = int(sys.argv[2])
    #     # tops = int(sys.argv[3])
    #     test_index = int(sys.argv[2])
    #     evd = compute_single_evidence(target,test_index,tops=2000)

    else:
        print "<target,test_index>"