box_creator = PriorBoxCreator(model)
prior_boxes = box_creator.create_boxes()

layer_scale, box_arg = 0, 780
box_coordinates = prior_boxes[layer_scale][box_arg, :, :]
image_path = '../images/'
image_key = '007040.jpg'
box_creator.draw_boxes(image_path + image_key, box_coordinates)

data_path = '../datasets/VOCdevkit/VOC2007/'
ground_truths = XMLParser(data_path + 'Annotations/').get_data()
prior_box_manager = PriorBoxAssigner(prior_boxes, ground_truths)
assigned_boxes = prior_box_manager.assign_boxes()
prior_box_manager.draw_assigned_boxes(image_path, image_shape[0:2], image_key)
batch_size = 7
train_keys, validation_keys = split_data(assigned_boxes, training_ratio=.8)

assigned_image_generator = ImageGenerator(assigned_boxes, batch_size,
                                          image_shape[0:2], train_keys,
                                          validation_keys,
                                          data_path + 'JPEGImages/')

transformed_image = next(assigned_image_generator.flow(mode='demo'))[0]
transformed_image = np.squeeze(transformed_image[0]).astype('uint8')
original_image = read_image(data_path + 'JPEGImages/' + validation_keys[0])
original_image = resize_image(original_image, image_shape[0:2])
plt.figure(1)
plt.subplot(121)
plt.title('Original image')
plt.imshow(original_image)
plt.subplot(122)
Esempio n. 2
0
def main(**kwargs):
    import dill as pickle
    from datetime import datetime
    exp_dir = os.getcwd() + '/data/feature_net/' + kwargs['input_label'][0] + kwargs['output_label'][0] + '/'
    logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv'], snapshot_mode='last')
    json.dump(kwargs, open(exp_dir + '/params.json', 'w'), indent=2, sort_keys=True, cls=ClassEncoder)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = kwargs.get('gpu_frac', 0.95)
    sess = tf.Session(config=config)

    mode = kwargs['mode'][0]
    visualize_training_data = kwargs['visualize_training_data'][0]
    visualize_testing_data = kwargs['visualize_testing_data'][0]
    visualize_new_data = kwargs['visualize_new_data'][0]

    if mode == 'restore':
        saver = tf.train.import_meta_graph(exp_dir + '-999.meta')
        saver.restore(sess, tf.train.latest_checkpoint(exp_dir))
        graph = tf.get_default_graph()

    with sess.as_default() as sess:

        # folder = './data/policy/' + kwargs['env'][0]
        # buffer, fixed_num_of_contact = pickle.load(open('../saved/HandManipulateEgg-v0-fix9.pickle', 'rb'))

        buffer = {}
        name = 's1'
        paths, fixed_num_of_contact = pickle.load(open('../saved/soft/' + name + '80-dict.pickle', 'rb'))
        for key in paths:
            buffer[key] = paths[key]

        for name in ['s2', 's4', 's5', 's6', 'soft3']:
            paths, fixed_num_of_contact = pickle.load(open('../saved/soft/' + name + '80-dict.pickle', 'rb'))
            for key in paths:
                buffer[key] = np.concatenate([buffer[key], paths[key]], axis = 0)


        env = gym.make(kwargs['env'][0],
                       obs_type = kwargs['obs_type'][0],
                       fixed_num_of_contact = fixed_num_of_contact)

        for key in buffer:
            buffer[key] = buffer[key][:int(1e6)]


        niters = buffer['positions'].shape[0] // 100
        print("total iteration: ", niters)


        ngeoms = env.sim.model.ngeom
        input_label = kwargs['input_label'][0]
        output_label = kwargs['output_label'][0]
        start = time.time()
        # paths = expand_data(buffer, ngeoms, fixed_num_of_contact, input_label, output_label)
        # print("expand data:", time.time() - start)
        paths = buffer

        start = time.time()
        train_data, test_data, vis_data, vis_data_test = split_data(paths, niters)
        print("split data:", time.time() - start)

        train_data['object_position'] = train_data['object_position'][:, :, :3]
        vis_data['original_object_position'] = vis_data['object_position']
        vis_data_test['original_object_position'] = vis_data_test['object_position']
        test_data['object_position'] = test_data['object_position'][:, :, :3]

        labels_to_dims = {}
        labels_to_dims['contacts'] = 3+6+ngeoms
        labels_to_dims['positions'] = 3
        # labels_to_dims['object_position'] = 7
        labels_to_dims['object_position'] = 3
        labels_to_dims['joint_position'] = 24
        labels_to_dims['object_vel'] = 6
        labels_to_dims['joint_vel'] = 24
        labels_to_dims['geoms'] = ngeoms



        dims = (labels_to_dims[input_label], labels_to_dims[output_label])
        print("preparation done")



        num_episodes = 1
        horizon = 100
        if visualize_training_data:
            visualize_data(vis_data, env, fixed_num_of_contact, feature_net, mode, input_label)
        if visualize_testing_data:
            visualize_data(vis_data_test, env, fixed_num_of_contact, feature_net, mode, input_label)
    y_true_classification = y_true[:, :, 4:(4 + num_classes)]
    return categorical_accuracy(y_true_classification, y_pred_classification)


multibox_loss = MultiboxLoss(num_classes, neg_pos_ratio=2.0).compute_loss
model.compile(optimizer=Adam(lr=3e-4),
              loss=multibox_loss,
              metrics=[class_accuracy])
box_creator = PriorBoxCreator(model)
prior_boxes = box_creator.create_boxes()
ground_truth_manager = XMLParser(ground_data_prefix,
                                 background_id=None,
                                 class_names=classes)
ground_truth_data = ground_truth_manager.get_data()
print('Number of ground truth samples:', len(ground_truth_data.keys()))
train_keys, validation_keys = split_data(ground_truth_data, training_ratio=.8)

prior_box_manager = PriorBoxManager(prior_boxes,
                                    box_scale_factors=[.1, .1, .2, .2],
                                    num_classes=num_classes)

image_generator = ImageGenerator(ground_truth_data,
                                 prior_box_manager,
                                 batch_size,
                                 image_shape[0:2],
                                 train_keys,
                                 validation_keys,
                                 image_prefix,
                                 vertical_flip_probability=0,
                                 horizontal_flip_probability=0.5)
Esempio n. 4
0
def main(config):
    raw_data = read_csv(config["filename"])

    # Check if chps are already saved
    saved, chps = check_changepoints(config["filename"])

    # Online changepoint
    if not saved:
        det = detection.BayesOnline()
        chp_online = det.find_changepoints(raw_data,
                                           past=50,
                                           prob_threshold=0.3)
        chps = chp_online[1:]

    # Evaluation bayesian analysis
    if config['split']:
        eval_bayesian(chps, raw_data)

    # Type of dataset (yearly,quarterly...)
    n_step = timeperiod(config['filename'])

    # Split in N train/test set (data + feature)
    if config['processing'] == 'indicators':
        train_data, test_data = split_with_indicators(raw_data, chps, n_step)

    elif config['processing'] == 'difference':
        raw_data = compute_diff(raw_data)
        train_data, test_data = split_data(raw_data, chps, n_step)

    else:
        raw_data = np.array(raw_data).reshape(-1, 1)
        train_data, test_data = split_data(raw_data, chps, n_step)

    # Cuda
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Device: {device}")
    print(torch.cuda.get_device_name(0))

    # Setup the model
    input_size = train_data[0][0][0].size()[0]
    if config["regression"]:
        model = RegressionMLP(input_size=input_size)
        model = model.to(device)
        loss = nn.MSELoss()
        optimizer = torch.optim.Adadelta(model.parameters(), lr=config["lr"])
    else:
        model = ClassficationMLP(input_size=input_size)
        model = model.to(device)
        loss = nn.BCELoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])

    epochs = config["epochs"]
    print(model)

    # Online training
    if config["online"]:
        global_writer = SummaryWriter(
            './runs/online/train/global/' +
            datetime.datetime.now().strftime('%m_%d_%H_%M'))
        for index, train_set in enumerate(train_data):
            print(f"----- DOMAIN {index} -----")
            train_loader = DataLoader(train_set,
                                      batch_size=config["batch_size"],
                                      shuffle=False)
            utils.training.train_online(train_loader,
                                        model=model,
                                        loss=loss,
                                        optimizer=optimizer,
                                        epochs=epochs,
                                        device=device,
                                        domain=index,
                                        global_writer=global_writer)
            # Test on domain just trained + old domains
            for past in range(index + 1):
                print(f"Testing model on domain {past}")
                test_loader = DataLoader(test_data[past],
                                         batch_size=1,
                                         shuffle=False)
                utils.training.test(model=model,
                                    loss=loss,
                                    test_loader=test_loader,
                                    device=device)

    # Continual learning with ER
    if config['continual']:
        utils.training.train_cl(train_set=train_data,
                                test_set=test_data,
                                model=model,
                                loss=loss,
                                optimizer=optimizer,
                                config=config,
                                device=device)
Esempio n. 5
0
import tensorflow as tf

from utils.utils import inputs, input_data, scale_data, split_data, predict_transf

df = pd.read_csv('data/VCD_data_1.csv', parse_dates=[0])
model = tf.keras.models.load_model(os.path.join("temp_models/VCD-1.h5"))
model.summary()
app = flask.Flask(__name__, static_folder='static')
data = scale_data(df)
# print(data[-5:])
lookback = 3
# tf.saved_model.save(model, export_path)
labels = ["Một ngày", "Hai ngày", "Ba ngày"]
df['Date'] = df['Date'].apply(lambda x: (x.year, x.month, x.day))
date, train_x, test_x = split_data(df, lookback, 1)
predict_data = predict_transf(model.predict(train_x))


@app.route("/", methods=['GET', 'POST'])
def main():
    values = zip(df['Date'], df['Vam co dong'])
    aut = zip(labels, df['Vam co dong'][-3:])

    # print(aut)
    # if flask.request.method == 'GET':
    return (flask.render_template('index.html',
                                  predict_1="None",
                                  predict_2="None",
                                  predict_3="None",
                                  values=values,
Esempio n. 6
0
from linear_regression import LinearRegression
import sys
sys.path.append('../')
from utils import utils
from loss import loss
import numpy as np

if __name__ == '__main__':
    with open('../data/housing.txt') as f:
        j = f.readlines()

    data = [map(float, s.split()) for s in j]
    data = np.array(data)
    X, y = data[:, :-1], data[:, -1]
    X_train, y_train, X_test, y_test = utils.split_data(X, y)
    X_train, X_test = utils.normalize(X_train), utils.normalize(X_test)
    lr = LinearRegression()
    lr.fit(X=X_train, y=y_train)
    y_train_pred, y_test_pred = lr.predict(X_train), lr.predict(X_test)
    train_abs_err = np.sum(np.abs(y_train_pred - y_train))
    test_abs_err = np.sum(np.abs(y_test_pred - y_test))
    print "train sae: {} , test sae: {}".format(train_abs_err, test_abs_err)
log_file_path = 'classification.log'

# loading data
data_manager = XMLParser(annotations_path)
ground_truth_data = data_manager.get_data(['background', 'bottle'])
print('Number of real samples:', len(ground_truth_data))
class_names = data_manager.class_names
num_classes = len(class_names)
print('Found classes: \n', class_names)

# creating prior boxes
prior_box_creator = PriorBoxCreator()
prior_boxes = prior_box_creator.create_boxes()
prior_box_manager = PriorBoxManager(prior_boxes, num_classes)

train_keys, val_keys = split_data(ground_truth_data, validation_split)
image_generator = ImageGenerator(ground_truth_data,
                                 prior_box_manager,
                                 batch_size,
                                 image_shape[0:2],
                                 train_keys,
                                 val_keys,
                                 image_prefix,
                                 vertical_flip_probability=0,
                                 suffix='')

# model parameters
sgd = SGD(lr=0.01, momentum=0.8, decay=0.1, nesterov=True)
model = simple_CNN(image_shape, num_classes)
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
Esempio n. 8
0
        print("No preprocessed file found. Starting data preprocessing...")
        pairs = read_lines(os.path.join(start_root, DATA_DIR), FILENAME)
        pairs, path = preprocess_pipeline(pairs, cleaned_file, exp_contraction, max_len = max_sent_len) #data/prepro/eng-deu_cleaned_full.pkl

    ### Get sample ###
    print("Sample from data:")
    print(random.choice(pairs))

    src_sents, trg_sents = [], []


    if limit:
        pairs = pairs[:limit]
        print("Limit set: %s" % str(limit))

    train_set, val_set, test_set = split_data(pairs, seed=args.seed)
    print("Data in train set:", len(train_set))
    print("Data in val set:", len(val_set))
    print("Data in test set:", len(test_set))

    print("Building vocabularies...")

    if voc_all:
        train_data = train_set + val_set
    else:
        train_data = train_set

    src_sents = [item[0] for item in train_data]
    trg_sents = [item[1] for item in train_data]

    max_src_l = max_length(src_sents)
Esempio n. 9
0
batch_size = 16

# Desired image dimensions
img_width = 250
img_height = 80

# Mapping characters to integers
char_to_num = layers.experimental.preprocessing.StringLookup(
    vocabulary=list(characters), num_oov_indices=0, mask_token=None)

# Mapping integers back to original characters
num_to_char = layers.experimental.preprocessing.StringLookup(
    vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True)

# Splitting data into training and validation sets
X_train, X_valid, y_train, y_valid = split_data(np.array(images),
                                                np.array(labels))

train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train_dataset = (train_dataset.map(
    lambda x, y: encode_single_sample(x, y, img_height, img_width, char_to_num
                                      ),
    num_parallel_calls=tf.data.experimental.AUTOTUNE).padded_batch(
        batch_size).prefetch(buffer_size=tf.data.experimental.AUTOTUNE))

validation_dataset = tf.data.Dataset.from_tensor_slices((X_valid, y_valid))
validation_dataset = (validation_dataset.map(
    lambda x, y: encode_single_sample(x, y, img_height, img_width, char_to_num
                                      ),
    num_parallel_calls=tf.data.experimental.AUTOTUNE).padded_batch(
        batch_size).prefetch(buffer_size=tf.data.experimental.AUTOTUNE))
                        type=float,
                        help="Ratio of size of test dataset")

    args = parser.parse_args()

    # Using default netflix dataset, using url to download data or load data from path.
    if args.mode == "netflix":
        data = get_data()

    elif args.mode == "download":
        data = get_data(args.mode, args.data_url)

    else:
        data = get_data(args.mode, "", args.data_path)

    train, test = split_data(data, SEED, args.test_ratio)

    params = dict()
    params["epoch"] = args.epoch
    params["lambda"] = args.lamda
    params["momentum"] = args.momentum
    params["batch_size"] = args.batch_size
    params["lr"] = args.lr
    params["features"] = args.features
    params["users"] = np.unique(data[:, 0]).shape[0]
    params["products"] = np.unique(data[:, 1]).shape[0]

    if args.algorithm == "PMF":
        PMF_experiment = PMF(params)
        PMF_experiment.fit(train, test)
        PMF_experiment.plot_loss()