Exemplo n.º 1
0
def main(opt):
    print('prepareing dataset...')
    all_images, data_matrix = dataset.prepare(opt.image_data_fn, opt.image_dir,
                                              opt.workspace_dir)

    print('use IMU to de-transform images...')
    geometry.changePerspective(all_images, data_matrix, opt.workspace_dir,
                               opt.resize_scale, opt.use_imu)

    print('stitching image')
    image_list = sorted(glob.glob(os.path.join(opt.workspace_dir, "*.png")))
    detector = cv2.xfeatures2d.SURF_create(300)
    result = cv2.imread(image_list[0])
    for i in range(1, len(image_list)):
        image = cv2.imread(image_list[i])

        try:
            result = pano_pair.combine(result, image, detector,
                                       opt.valid_ratio, opt.use_affine)
            cv2.imwrite(os.path.join(opt.results_dir, "int_res_%d.png" % (i)),
                        result)
            print("Stitched " + str(i + 1) + " Images")

        except:
            print("Fail " + str(i))
            #cv2.imwrite("results/int_res" + str(i) + ".JPG", result)

        h, w = result.shape[:2]

        if h > 4000 and w > 4000:

            if h > 4000:
                hx = 4000 / h

                h = h * hx
                w = w * hx

            elif w > 4000:
                wx = 4000 / w

                w = w * wx
                h = h * wx

            result = cv2.resize(result, (int(w), int(h)))

    cv2.imwrite(os.path.join(opt.results_dir, "final_result.png"), result)
Exemplo n.º 2
0

config = InferenceConfig()
config.display()

# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
#DEVICE = "/gpu:0"  # /cpu:0 or /gpu:0

# Load validation dataset
dataset = dataset.CustomDataset()
dataset.load_custom(dataset_dir, "val")

dataset.prepare()

print("Images: {}\nClasses: {}".format(len(dataset.image_ids),
                                       dataset.class_names))

model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)

print("Loading weights ", weight_path)
model.load_weights(weight_path, by_name=True)

n = 0
count_porn = 0
count_non = 0

for image_name in sorted(os.listdir(image_path)):
    if image_name.endswith(('.jpg', '.jpeg')):
Exemplo n.º 3
0
def main(args):

    # get arguments
    rate_num = args.rate_num
    use_side_feature = args.use_side_feature
    lr = args.lr
    weight_decay = args.weight_decay
    num_epochs = args.num_epochs
    hidden_dim = args.hidden_dim
    side_hidden_dim = args.side_hidden_dim
    out_dim = args.out_dim
    drop_out = args.drop_out
    split_ratio = args.split_ratio
    save_steps = args.save_steps
    log_dir = args.log_dir
    saved_model_folder = args.saved_model_folder
    use_data_whitening = args.use_data_whitening
    use_laplacian_loss = args.use_laplacian_loss
    laplacian_loss_weight = args.laplacian_loss_weight

    # mark and record the training file, save the training arguments for future analysis
    post_fix = '/' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
    log_dir = log_dir + post_fix
    writer = SummaryWriter(log_dir=log_dir)
    f = open(log_dir + '/test.txt', 'a')
    f.write(str(vars(args)))
    f.close()

    print(log_dir)

    #get prepared data
    feature_u, feature_v, feature_dim, all_M_u, all_M_v, side_feature_u, side_feature_v, all_M, mask, user_item_matrix_train, user_item_matrix_test, laplacian_u, laplacian_v = prepare(
        args)

    if not os.path.exists(saved_model_folder):
        os.makedirs(saved_model_folder)
    weights_name = saved_model_folder + post_fix + '_weights'

    net = utils.create_models(feature_u, feature_v, feature_dim, hidden_dim,
                              rate_num, all_M_u, all_M_v, side_hidden_dim,
                              side_feature_u, side_feature_v, use_side_feature,
                              out_dim, drop_out)
    net.train()  # in train mode

    # create AMSGrad optimizer
    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
    Loss = utils.loss(all_M, mask, user_item_matrix_train,
                      laplacian_loss_weight)
    iter_bar = tqdm(range(num_epochs), desc='Iter (loss=X.XXX)')
    for epoch in iter_bar:

        optimizer.zero_grad()

        score = net.forward()

        if use_laplacian_loss:
            loss = Loss.laplacian_loss(score, laplacian_u, laplacian_v)
        else:
            loss = Loss.loss(score)

        loss.backward()

        optimizer.step()

        with torch.no_grad():
            rmse = Loss.rmse(score)

            val_rmse = validate(score, rate_num, user_item_matrix_test)
            iter_bar.set_description(
                'Iter (loss=%5.3f, rmse=%5.3f, val_rmse=%5.5f)' %
                (loss.item(), rmse.item(), val_rmse.item()))

            #             writer.add_scalars('scalar',{'loss': loss.item(), 'rmse': rmse.item(), 'val_rmse':val_rmse.item(),},epoch)
            writer.add_scalars('scalar', {'loss': loss.item()}, epoch)

        if epoch % save_steps == 0:
            torch.save(net.state_dict(), weights_name)

    rmse = Loss.rmse(score)
    print('Final training RMSE: ', rmse.data.item())
    torch.save(net.state_dict(), weights_name)

    sm = nn.Softmax(dim=0)
    score = sm(score)
    score_list = torch.split(score, rate_num)
    pred = 0
    for i in range(rate_num):
        pred += (i + 1) * score_list[0][i]

    pred = utils.var_to_np(pred)

    #     pred = np.load('./prediction.npy')

    ### test the performance
    #     user_item_matrix_test = np.load('./processed_dataset/user_item_matrix_test.npy')
    test_mask = user_item_matrix_test > 0

    square_err = (pred * test_mask - user_item_matrix_test)**2
    mse = square_err.sum() / test_mask.sum()
    test_rmse = np.sqrt(mse)
    print('Test RMSE: ', test_rmse)
Exemplo n.º 4
0
import pickle

from sklearn.ensemble import RandomForestRegressor
from sklearn.multioutput import MultiOutputRegressor
from dataset import get_pandas_df, prepare, create_samples


df = get_pandas_df()
df = prepare(df)

X, y = create_samples(0, batch_size=178843, prepared=df)

mr = MultiOutputRegressor(RandomForestRegressor(n_estimators=50,
                                                max_depth=30,
                                                random_state=0,
                                                verbose=True))

print("training model")
mr.fit(X[:10000], y[:10000])
with open('random_forest_model.normed.pkl', 'wb') as f:
    pickle.dump(mr, f)
print("finished training")

# mr = pickle.load(open('random_forest_model.normed.pkl', 'rb'))

print(mr.score(X[10000:], y[10000:]))
Exemplo n.º 5
0
def main(args):
    
    # 获取参数
    rate_num = args.rate_num
    use_side_feature = args.use_side_feature  # using side feature or not
    use_GAT = args.use_GAT
    lr = args.lr
    weight_decay = args.weight_decay
    num_epochs = args.num_epochs
    hidden_dim = args.hidden_dim
    side_hidden_dim = args.side_hidden_dim
    out_dim = args.out_dim
    drop_out = args.drop_out
    split_ratio = args.split_ratio
    save_steps = args.save_steps
    saved_model_folder = args.saved_model_folder
    laplacian_loss_weight = args.laplacian_loss_weight

    post_fix = '/' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())

    # 数据预处理
    feature_u, feature_v, feature_dim, all_M_u, all_M_v, side_feature_u, side_feature_v, all_M, mask,\
    user_item_matrix_train, user_item_matrix_test, laplacian_u, laplacian_v = prepare(args)

    if not os.path.exists(saved_model_folder):
        os.makedirs(saved_model_folder)  
    weights_name = saved_model_folder + post_fix + '_weights'

    net = utils.create_models(feature_u, feature_v, feature_dim, hidden_dim, rate_num, all_M_u, all_M_v,
                              side_hidden_dim, side_feature_u, side_feature_v,
                              use_side_feature, use_GAT, out_dim, user_item_matrix_train, drop_out)
    net.train()

    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
    Loss = utils.loss(all_M, mask, user_item_matrix_train, laplacian_loss_weight)
    iter_bar = tqdm(range(num_epochs), desc='Iter (loss=X.XXX)')

    for epoch in iter_bar:

        optimizer.zero_grad()

        score = net.forward()

        loss = Loss.loss(score)

        loss.backward()

        optimizer.step()

        with torch.no_grad():
            rmse = Loss.rmse(score)
            
            val_rmse = validate(score, rate_num, user_item_matrix_test)
            iter_bar.set_description('Iter (loss=%5.3f, rmse=%5.3f, val_rmse=%5.5f)'%(loss.item(), rmse.item(), val_rmse.item()))


        if epoch % save_steps == 0:
            torch.save(net.state_dict(), weights_name)

    rmse = Loss.rmse(score)
    print('Final training RMSE: ', rmse.data.item())        
    torch.save(net.state_dict(), weights_name)
    
    sm = nn.Softmax(dim = 0)
    score = sm(score)
    score_list = torch.split(score, rate_num)
    pred = 0
    for i in range(rate_num):
        pred += (i + 1) * score_list[0][i]

    pred = utils.var_to_np(pred)

    test_mask = user_item_matrix_test > 0

    square_err = (pred * test_mask - user_item_matrix_test) ** 2
    mse = square_err.sum() / test_mask.sum()
    test_rmse = np.sqrt(mse)
    print('Test RMSE: ', test_rmse)