def setUp(self):
     self.vals = []
     self.provider_status = None
     f = self.add_val
     self.provider = dp.DataProvider(onData=f, onError=f, 
                                     onStart=self.on_start, 
                                     onStop=self.on_stop)
示例#2
0
def example():
    # Create a new DataProvider.
    dp1 = dp.DataProvider()

    # You can add input data (bytes) to data provider.
    example_input_data = bytes([0x0a, 0x37, 0x13, 0x00])
    dp1.add_input_by_data(example_input_data)

    # Or you can add input with an existed data file.
    example_input_path = os.path.join('path', 'to', 'input_data')
    dp1.add_input_by_data_path(example_input_path)

    # Add output.
    # data_type: 0 (float32), 1 (float16)
    size = 2
    data_type = 1
    dp1.add_output('out0.data', size, data_type)

    # Add expect data.
    example_expect_data = bytes([0x3f, 0x1b, 0xf2, 0xcf])
    dp1.add_expect_by_data(example_expect_data)

    # Or add expect data file path.
    example_expect_path = os.path.join('path', 'to', 'expect_data')
    dp1.add_expect_by_data_path(example_expect_path)

    # [OPTIONAL] Set precision_deviation and statistical_discrepancy.
    dp1.set_precision_deviation(0.2)
    dp1.set_statistical_discrepancy(0.2)

    return dp1
示例#3
0
def load_data_provider(args):
    """
    Either load a matching data provider if one exists, or create one.
    """
    DP = dp.DataProvider(args.data_file,
                         args.inputs,
                         args.targets,
                         train_proportion=args.train_proportion,
                         val_proportion=args.val_proportion)
    print("Loaded dataset.")
    return DP
示例#4
0
    def train(self, train_epoch, exp_name, lr):

        # inputs & outputs format
        x = tf.placeholder(tf.float32, [None, 160, 180])
        y = tf.placeholder('float', [None, 8])

        # x_ = tf.unstack(x, 160, 1)

        # construct computation graph
        pred = self.lstm_predictor(x)
        loss = self.compute_loss(pred, y)

        accuracy = self.compute_accuracy(pred, y)

        train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)

        with tf.Session() as sess:
            # initialization
            init = tf.global_variables_initializer()
            sess.run(init)

            dstc_data = data_provider.DataProvider(self.path, data_form=2)
            log_saver = logger.LogSaver(exp_name)
            # train
            for epoch in range(train_epoch):
                batch_x, batch_y = dstc_data.train.task1.next_batch(100)
                sess.run(train_op, feed_dict={x: batch_x, y: batch_y})

                # validating
                if epoch % 10 == 0 and epoch != 0:
                    train_loss = loss.eval(feed_dict={x: batch_x, y: batch_y})

                    val_x, val_y = dstc_data.test1.task1.next_batch(100)
                    val_acc = accuracy.eval(feed_dict={x: val_x, y: val_y})
                    print(
                        'Training {0} epoch, validation accuracy is {1}, training loss is {2}'
                        .format(epoch, val_acc, train_loss))

                    log_saver.train_process_saver([epoch, train_loss, val_acc])
                    # self.save_log([epoch, train_loss, val_acc], exp_name)

            # evaluate
            test_sets = [
                None, dstc_data.test1, dstc_data.test2, dstc_data.test3,
                dstc_data.test4
            ]

            for index, test_set in enumerate(test_sets):
                test_x, test_y = test_set.task1.next_batch(100)
                test_acc = sess.run(accuracy, feed_dict={x: test_x, y: test_y})
                print('test accuracy on test set {0} is {1}'.format(
                    index, test_acc))
                # save training log
                log_saver.test_result_saver([test_acc], index)
示例#5
0
def main(_):
    sess = tf.Session()
    dcgan_obj = dcgan.DCGAN(sess, FLAGS.checkpoint_dir, FLAGS.gen_sample_dim,
                            FLAGS.input_size, FLAGS.layer_ratio,
                            FLAGS.disc_base_dim, FLAGS.gen_base_dim,
                            FLAGS.learning_rate)
    dcgan_obj.build_network()
    dcgan_obj.init_model()
    dp = data_provider.DataProvider(FLAGS.data_dir, FLAGS.input_size)
    dp.load()
    for img in dp.iter():
        prob = dcgan_obj.predict([img])
        print prob
示例#6
0
def main(_):
    sess = tf.Session()
    dcgan_obj = dcgan.DCGAN(sess, FLAGS.checkpoint_dir, FLAGS.gen_sample_dim,
                            FLAGS.input_size, FLAGS.layer_ratio,
                            FLAGS.disc_base_dim, FLAGS.gen_base_dim,
                            FLAGS.learning_rate)
    dcgan_obj.build_network()
    dcgan_obj.init_model()
    dp = data_provider.DataProvider(FLAGS.data_dir, FLAGS.input_size)
    dp.load()
    try:
        for batch in dp.batchs(10000, FLAGS.batch_size):
            dcgan_obj.train(batch, FLAGS.sample_size)
    except:
        pass
    dcgan_obj.save_model()
示例#7
0
    def test_data_provider_int(self):
        # Full integrity Test including flask
        self.dp = data_provider.DataProvider(self.provider)
        self.dp.set_password(self.password)
        str_backend = StorageServer(test_dir)
        with self.str_app.app_context():
            for r in self.sr:
                # check that bloom filter is empty
                b = str_backend.bloom
                self.assertNotIn(to_base64(r.get_long_hash()), b)
            # Check that DB empty
            res = str_backend.batch_get_records(
                [to_base64(r.get_long_hash()) for r in self.sr], "client")
        # Decrypt
        result = [
            Record.from_ciphertext(json.loads(r), self.enc_keys[0])
            for h, r in res
        ]
        self.assertEqual([], result)

        s = Session(True)
        with patch("requests.get", s.get), \
             patch("requests.post", s.post), \
             patch.object(self.dp, "_receive_ots",
                          Mock(return_value=self.enc_keys_int[:len(self.sr)])):
            self.dp.store_records(self.sr)

        str_backend = StorageServer(test_dir)
        with self.str_app.app_context():
            for r in self.sr:
                # check that records are in bloom filter
                b = str_backend.bloom
                self.assertIn(to_base64(r.get_long_hash()), b)
            # Check records in db
            res = str_backend.batch_get_records(
                [to_base64(r.get_long_hash()) for r in self.sr], "client")
        # Decrypt
        result = [
            Record.from_ciphertext(json.loads(r), self.enc_keys[0])
            for h, r in res
        ]
        for m in self.sr:
            self.assertIn(m, result)
        for r in result:
            self.assertIn(r, self.sr)
示例#8
0
# MNIST normalization parameters.
NORM_STD = 0.3081
NORM_MEAN = 0.1307
MIN = -NORM_MEAN / NORM_STD + 1e-4
MAX = (1 - NORM_MEAN) / NORM_STD - 1e-4

# Which VAE model to use.
config = build_config(sys.argv[1], logger)

# Which example from data provider to modify.
example_id = int(sys.argv[2])

# How many samples to get.
n_trials = int(sys.argv[3])

mode = config['vae_expander']['mode']
dp = data_provider.DataProvider('train_labeled.p')

vaes = augment(config, dp.loader)

for i in range(n_trials):
    examples = list(itertools.islice(vaes, example_id + 1))
    z = (examples[example_id][0] * NORM_STD + NORM_MEAN).clamp(0, 1)
    file_name = '{}/vae_test_{}_{:02}_{:02}.png'.format(
        mode, mode, example_id, i)
    torchvision.utils.save_image(z, file_name, nrow=8)
    print(file_name)

vaes.print_info()
示例#9
0
    def train(self, config):
        d_optim = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(
            self.d_loss, var_list=self.d_vars)
        g_optim = tf.train.AdamOptimizer(0.0001, beta1=0.5).minimize(
            self.g_loss, var_list=self.g_vars)

        clip_ops = []
        for var in self.d_vars:
            clip_ops.append(tf.assign(var, tf.clip_by_value(var, -0.01, 0.01)))

        clip_d_vars_op = tf.group(*clip_ops)

        tf.global_variables_initializer().run()

        data_provider = dp.DataProvider(config)

        sample_images = data_provider.load_sample()
        sample_z = np.random.uniform(-1, 1, size=(1, config.batch_size, 100))

        tf.summary.image("sample_org", yuv2rgb(sample_images), 8)
        self.save_images(yuv2rgb(sample_images), [1, config.batch_size],
                         os.path.join(config.run_dir, "org.png"))

        writer = tf.summary.FileWriter(config.run_dir)
        writer.add_graph(self.sess.graph)

        counter = 0
        while counter < config.iterations:
            print(counter)
            for k_d in range(0, config.disc_step):
                print(k_d)
                batch_images = data_provider.load_data(config)
                batch_z = np.random.uniform(
                    -1, 1, [config.batch_size, 100]).astype(np.float32)
                _, _g_loss, _d_loss, _loss = self.sess.run(
                    [d_optim, self.g_loss, self.d_loss, self.total_loss],
                    feed_dict={
                        self.z: batch_z,
                        self.images_YUV: batch_images
                    })
                self.sess.run([clip_d_vars_op], feed_dict={})

            for k_g in range(0, config.gen_step):
                batch_images = data_provider.load_data(config)
                batch_z = np.random.uniform(
                    -1, 1, [config.batch_size, 100]).astype(np.float32)
                self.sess.run([g_optim],
                              feed_dict={
                                  self.z: batch_z,
                                  self.images_YUV: batch_images
                              })

            if counter % config.save_samples_interval == 0:
                _generate_image, _g_loss, _d_loss, _loss = self.sess.run(
                    [
                        self.generated_images_YUV, self.g_loss, self.d_loss,
                        self.total_loss
                    ],
                    feed_dict={
                        self.z: sample_z[0],
                        self.images_YUV: sample_images
                    })
                _generate_image_rgb = yuv2rgb(_generate_image)
                tf.summary.image("sample_gen", _generate_image_rgb, 8)
                self.save_images(
                    _generate_image_rgb, [1, config.batch_size],
                    os.path.join(config.run_dir,
                                 "step" + str(counter) + ".png"))

            summ = tf.summary.merge_all()
            [s] = self.sess.run([summ],
                                feed_dict={
                                    self.z: sample_z[0],
                                    self.images_YUV: sample_images
                                })
            writer.add_summary(s, counter)

            if counter % config.save_model_interval == 0:
                self.save_model(config, counter)

            counter += 1
示例#10
0
import sys
sys.path.append("./dataset_new")
import data_provider as dt

#dataset = 'ME2'
#dataset = 'SAMM'
dataset = 'SAMM_MEGC'

data_handler = dt.DataProvider(dataset)
videos = data_handler.produce_videos(dataset, 'all')
videos_file = dataset + '.txt'

lines = []
for video in videos:
    num_frames = data_handler.num_frames(video)
    line = '{}_{} {}\n'.format(video[0], video[1], num_frames)
    lines.append(line)

with open(videos_file, 'w') as fw:
    fw.writelines(lines)
示例#11
0
def main():

    new_model = DeepestNetwork((25, 3, 120, 214))

    N = 4

    cwd = Path(os.getcwd())
    par = cwd.parent
    data_path = str(par / "data/DAVIS//JPEGImages/480p/")
    mask_path = str(par / "data/DAVIS/Annotations/480p/")

    tvt_split = (0.5, 0.7)

    X_train_t, X_val_t, X_test_t, y_train_t, y_val_t, y_test_t = generate_dataset_temporal(
        data_path, mask_path, tvt_split, N)

    X_train_t = np.array(X_train_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_val_t = np.array(X_val_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_test_t = np.array(X_test_t).swapaxes(-1, -3).swapaxes(-2, -1)
    print(X_train_t.shape)
    print(X_val_t.shape)
    print(X_test_t.shape)
    y_train_t = np.array(y_train_t)
    y_val_t = np.array(y_val_t)
    y_test_t = np.array(y_test_t)
    print(y_train_t.shape)
    print(y_val_t.shape)
    print(y_test_t.shape)

    batch_size = 25
    train_data_t = data_providers.DataProvider(X_train_t,
                                               y_train_t,
                                               batch_size,
                                               shuffle_order=True)
    val_data_t = data_providers.DataProvider(X_val_t,
                                             y_val_t,
                                             batch_size,
                                             shuffle_order=True)
    test_data_t = data_providers.DataProvider(X_test_t,
                                              y_test_t,
                                              batch_size,
                                              shuffle_order=True)

    eb = ExperimentBuilder(new_model, "get_bear", 1, train_data_t, val_data_t,
                           test_data_t, True)

    model_path = Path(os.getcwd())
    model_path = model_path / "static_run_deepest" / "saved_models"

    bear_path = Path(
        os.getcwd()).parent / "data" / "DAVIS" / "JPEGImages" / "480p" / "bear"

    bear = np.asarray(
        Image.open(str(bear_path / "00001.jpg")).convert(mode="RGB"))

    inp = torch.Tensor(
        down_sample(np.asarray(bear),
                    4).swapaxes(0, 2).swapaxes(1, 2)).unsqueeze(0)

    out = eb.get_bear(model_path, inp)
    out = out.squeeze()

    predicted = F.sigmoid(out) > 0.5

    mask = predicted.cpu().numpy().astype('uint8')

    mask = 255 * mask

    mask_img = Image.fromarray(mask, mode='L')

    bear = down_sample(bear, 4)
    bear = Image.fromarray(bear)

    overlay = overlay_segment(bear, mask_img)

    overlay.save("cnnbear.png")
示例#12
0
import random
import sonnet as snt
import tensorflow as tf


def _showRandom(data_source):
    img, loc, label = data_source()

    print("loc: {}".format(loc))
    print("label: {}".format(label))

    plt.imshow(img)
    plt.show()


data_source = data_provider.DataProvider(48, 48)
_showRandom(data_source)
training_data = []
for _ in range(1000):
    training_data.append(data_source())

training_images = np.vstack([np.expand_dims(d[0], 0) for d in training_data])
training_locs = np.vstack([np.expand_dims(d[1], 0) for d in training_data])
training_labels = np.vstack([np.expand_dims(d[2], 0) for d in training_data])

print("shapes: {} {} {}".format(training_images.shape, training_locs.shape,
                                training_labels.shape))

images_dataset = tf.data.Dataset.from_tensor_slices(training_images)
locs_dataset = tf.data.Dataset.from_tensor_slices(training_locs)
labels_dataset = tf.data.Dataset.from_tensor_slices(training_labels)
示例#13
0
def main():

    # down_sample factor
    N = 4

    # define paths to image and mask files
    cwd = Path(os.getcwd())
    par = cwd.parent
    data_path = str(par / "data/DAVIS//JPEGImages/480p/")
    mask_path = str(par / "data/DAVIS/Annotations/480p/")

    # training, validation and test split
    tvt_split = (0.5, 0.7)

    # get datasets
    X_train, X_val, X_test, y_train, y_val, y_test = generate_dataset_static(
        data_path, mask_path, tvt_split, N)
    X_train_t, X_val_t, X_test_t, y_train_t, y_val_t, y_test_t = generate_dataset_temporal(
        data_path, mask_path, tvt_split, N)

    # reshape datasets to match CNN shapes
    X_train = np.array(X_train).swapaxes(1, 3).swapaxes(2, 3)
    X_val = np.array(X_val).swapaxes(1, 3).swapaxes(2, 3)
    X_test = np.array(X_test).swapaxes(1, 3).swapaxes(2, 3)
    print(X_train.shape)
    print(X_val.shape)
    print(X_test.shape)
    y_train = np.array(y_train)
    y_val = np.array(y_val)
    y_test = np.array(y_test)
    print(y_train.shape)
    print(y_val.shape)
    print(y_test.shape)

    X_train_t = np.array(X_train_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_val_t = np.array(X_val_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_test_t = np.array(X_test_t).swapaxes(-1, -3).swapaxes(-2, -1)
    print(X_train_t.shape)
    print(X_val_t.shape)
    print(X_test_t.shape)
    y_train_t = np.array(y_train_t)
    y_val_t = np.array(y_val_t)
    y_test_t = np.array(y_test_t)
    print(y_train_t.shape)
    print(y_val_t.shape)
    print(y_test_t.shape)

    #put data into data provider objects
    batch_size = 25
    train_data = data_providers.DataProvider(X_train,
                                             y_train,
                                             batch_size,
                                             shuffle_order=True)
    val_data = data_providers.DataProvider(X_val,
                                           y_val,
                                           batch_size,
                                           shuffle_order=True)
    test_data = data_providers.DataProvider(X_test,
                                            y_test,
                                            batch_size,
                                            shuffle_order=True)

    batch_size = 25
    train_data_t = data_providers.DataProvider(X_train_t,
                                               y_train_t,
                                               batch_size,
                                               shuffle_order=True)
    val_data_t = data_providers.DataProvider(X_val_t,
                                             y_val_t,
                                             batch_size,
                                             shuffle_order=True)
    test_data_t = data_providers.DataProvider(X_test_t,
                                              y_test_t,
                                              batch_size,
                                              shuffle_order=True)

    inputs_shape = X_train[:batch_size].shape
    inputs_shape

    inputs_shape_t = X_train_t[:batch_size].shape
    inputs_shape_t

    print("Time to make networks!")

    # generates networks of different depths and datasets
    static_net_shallow = ShallowNetwork(input_shape=inputs_shape)
    static_net_deeper = DeeperNetwork(input_shape=inputs_shape)
    temporal_net_shallow = ShallowNetwork(input_shape=inputs_shape_t)
    temporal_net_deeper = DeeperNetwork(input_shape=inputs_shape_t)
    static_net_deepest = DeepestNetwork(input_shape=inputs_shape)
    temporal_net_deepest = DeepestNetwork(input_shape=inputs_shape_t)

    # declare variables for experiments
    experiment_name = "static_run_shallow"
    num_epochs = 30
    use_gpu = False
    continue_from_epoch = -1

    # build experiment and run
    experiment_1 = ExperimentBuilder(
        network_model=static_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_1.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "static_run_deeper"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_2 = ExperimentBuilder(
        network_model=static_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_1.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_shallow"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_3 = ExperimentBuilder(
        network_model=temporal_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_3.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_deeper"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_4 = ExperimentBuilder(
        network_model=temporal_net_deeper,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_4.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "static_run_deepest"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_5 = ExperimentBuilder(
        network_model=static_net_deepest,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_5.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_deepest"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_6 = ExperimentBuilder(
        network_model=temporal_net_deepest,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_6.run_experiment(
    )  # run experiment and return experiment metrics"""
class DataProviderTest(TestCase):
    d = dp.DataProvider('userA')

    @classmethod
    def setUpClass(cls) -> None:
        """Disable logging."""
        logging.getLogger().setLevel(logging.FATAL)

    @patch("lib.base_client.BaseClient.post")
    def test_store_record_fail(self, m):
        # Fail due to bad owner
        url = (f"https://{config.STORAGESERVER_HOSTNAME}:"
               f"{config.STORAGE_API_PORT}/"
               f"{UserType.OWNER}/store_record")
        j = {
            'success': False,
            'msg': f"Invalid POST data: Owner in JSON not authenticated owner."
        }
        m.return_value.json.return_value = j
        with self.assertRaises(RuntimeError) as cm:
            self.d._store_record_on_server(b"hash", "record",
                                           "non-existing-owner")
        m.assert_called_once()
        self.assertEqual(url, m.call_args[0][0])
        self.assertIn("Invalid POST", str(cm.exception))

    @patch("lib.base_client.BaseClient.post")
    def test_store_record_success(self, m):
        url = (f"https://{config.STORAGESERVER_HOSTNAME}:"
               f"{config.STORAGE_API_PORT}/"
               f"{UserType.OWNER}/store_record")
        j = {'success': True, 'msg': None}
        m.return_value.json.return_value = j
        self.d._store_record_on_server(b"hash", {'cipher': "record"}, "userA")
        m.assert_called_once()
        expected = json.dumps({
            'hash': to_base64(b"hash"),
            'record': {
                'cipher': "record"
            },
            'owner': 'userA'
        }).encode()
        m.called_with(url, json=expected)

    @patch("lib.base_client.BaseClient.post")
    def test_batch_store_records_fail(self, m):
        # Fail due to bad owner
        url = (f"https://{config.STORAGESERVER_HOSTNAME}:"
               f"{config.STORAGE_API_PORT}/"
               f"{UserType.OWNER}/batch_store_records")
        j = {
            'success': False,
            'msg': f"Invalid POST data: Owner in JSON not authenticated owner."
        }
        m.return_value.json.return_value = j
        with self.assertRaises(RuntimeError) as cm:
            self.d._batch_store_records_on_server([])
        m.assert_called_once_with(url, json=[])
        self.assertIn("Invalid POST", str(cm.exception))

    @patch("lib.base_client.BaseClient.post")
    def test_batch_store_records_success(self, m):
        url = (f"https://{config.STORAGESERVER_HOSTNAME}:"
               f"{config.STORAGE_API_PORT}/"
               f"{UserType.OWNER}/batch_store_records")
        j = {'success': True, 'msg': None}
        m.return_value.json.return_value = j
        self.d._batch_store_records_on_server([("hash", "record", "userA")])
        expected = [("hash", "record", "userA")]
        m.assert_called_once_with(url, json=expected)

    @responses.activate
    @patch("lib.config.EVAL", False)
    def test_store_records(self):
        """Kind of integrity test, we only mock the server responses."""
        # Define server response
        # 1 - Get token
        url = (f"https://{config.KEYSERVER_HOSTNAME}"
               f":{config.KEY_API_PORT}/provider/gen_token")
        j = {
            'success':
            True,
            'token':
            'XIu2a9SDGURRTzQnJdDg19Ii_CS7wy810s3_Lrx-TY7Wvh2Hf0U4xLH'
            'NwnY_byYJ71II3kfUXpSZHOqAxA3zrw'
        }
        responses.add(responses.GET, url, json=j, status=200)
        # 2 - Hash Key
        url = f"{self.d.KEYSERVER}/hash_key"
        hash_key = to_base64(int(1).to_bytes(16, 'big'))
        j = {'success': True, 'hash_key': hash_key}
        responses.add(responses.GET, url, json=j, status=200)

        # 3 - Encryption Keys
        j = {
            'success': True,
            'port': 50000,
            'host': "127.0.0.1",
            'totalOTs': 10,
            'tls': config.OT_TLS
        }
        url = f"https://localhost:" \
              f"{config.KEY_API_PORT}/provider/key_retrieval?totalOTs=3"
        responses.add(responses.GET, url, json=j, status=200)
        # Remember to mock the OT

        r1 = Record([1.0, 2.1, 3.3, 4.4, 5.0])
        r2 = Record([1.0532, 2.15423, 3.3453, 4.4, 5.0])
        r3 = Record([1.52340, 2.1523, 3.35423, 4.4, 5.0])
        records = [r1, r2, r3]
        # Log in user
        self.d.set_password("password")

        with patch.object(self.d, "_receive_ots", return_value=[10, 9, 8]):
            # Mock OT
            with patch.object(self.d,
                              "_batch_store_records_on_server",
                              return_value=True):
                self.d.store_records(records)

    def test_parser(self):
        # Just syntax errors
        p = dp.get_provider_parser()
        self.assertTrue(isinstance(p, argparse.ArgumentParser))

    def test_store_from_file(self):
        res = [Record([1, 2, 3, 4, 5]), Record([1, 2, 3, 5, 6])]
        with patch.object(self.d, "store_records") as m:
            with tempfile.NamedTemporaryFile() as fd:
                fd.write(b'[1,2,3,4, 5]\n')
                fd.write(b'[1,2,3,5, 6]\n')
                fd.seek(0)
                self.d.store_from_file(fd.name)
            m.assert_called_with(res)
示例#15
0
                    help='how many images for a flow (the default value is 2)')
parser.add_argument(
    '--only_expressions',
    dest='only_expressions',
    type=bool,
    default=False,
    help=
    'choose whether to only display facial expressions (the default is False)')

args = parser.parse_args()
what_display = args.what_display
flow_from = args.flow_from
win_size = args.win_size
only_expressions = args.only_expressions

me2 = dt.DataProvider('ME2')
videos = me2.produce_videos('ME2', 'all')

for video in videos:
    num_frames = me2.num_frames(video)
    if num_frames < win_size: continue

    if what_display == 'crop_face' or (what_display == 'flow'
                                       and flow_from == 'none'):
        read_folder = '../processed_data/crop_faces/{}/{}'.format(
            video[0], video[1])
    elif what_display == 'flow' and flow_from == 'img':
        read_folder = '../processed_data/for_people/flow_img/size{}/{}/{}'.format(
            win_size, video[0], video[1])

    frames = me2.video_to_frames(video, "imgNames")
示例#16
0
from torch.autograd import Variable
import pandas as pd
import sys
import pickle
import data_provider


def predict_model(model, pred_data_loader):
    model.start_prediction()
    pred_loader = pred_data_loader.loader
    label_predict = []
    for _, content in enumerate(pred_loader):
        data = Variable(content[0])
        output = model.predict_batch(data)
        pred = output[1].numpy().tolist()
        label_predict += [x[0] for x in pred]
    df = pd.DataFrame({"ID": range(len(label_predict)), "label": label_predict})
    return df

if __name__ == "__main__":
    if len(sys.argv) != 4:
        print('Usage: python mnist_results.py [model_file] [output_df] [test_data_pickle_file]')
        sys.exit(0)
    else:
        model_file = sys.argv[1]
        output_file = sys.argv[2]
        test_data_file = sys.argv[3]
        model = pickle.load(open(model_file, "rb"))
        pred_label = data_provider.DataProvider(file_dir=test_data_file, train=False)
        df = predict_model(model, pred_label)
        df.to_csv(output_file, index=False)
import data_provider as dp
import tensorflow as tf

# Parameters
num_fmaps = 4  # Number of feature maps for the convolutional layer
connected_size = 200  # Number of units in the fully connected layer
learning_rate = 1e-4  # Learning rate for the Adam optimiser
num_epochs = 10  # Number of training epochs
batch_size = 100  # Number of training examples per batch

# Load data
print("Loading data...")
DP = dp.DataProvider(
    "./data/dataset_c4.pkl",  # Data file
    ["own_pieces", "opponent_pieces"],  # Inputs
    ["move"],  # Targets
    train_proportion=0.8,
    val_proportion=0.1)

# Build network

# Define inputs and outputs
input_length = DP.full_input_length()
target_length = DP.target_length
x = tf.placeholder(tf.float32, shape=[None, input_length], name="input")
t = tf.placeholder(tf.float32, shape=[None, target_length], name="target")

# The data provider gives the state of the game as a vector where the first
# 7*6 elements are "my pieces" and the second 7*6 elements are "your pieces".
# We need to reshape this into the standard format which is a 4D tensor of
# dimensions [index, height, width, feature map]. Here, the two players will be
示例#18
0
import sys, os
import caffe
import rasterio
import numpy as np
import skimage.util
import data_provider

from fake_positive_data import fake_positive_data

gtp = data_provider.DataProvider()

class BoxInputLayer(caffe.Layer):
    def setup(self,bottom,top):
        # read parameters from `self.param_str`
        params = eval(self.param_str)
        # self.combined_path = params['raster'] # location of a small-ish file with rasterized label into. 
        # self.size = params['size']  # window size (width, height)
        self.num = params.get('num', 8)   # batch size, default is 8
        # self.height_jitter = params.get('height_jitter', 1.0)   # jitter heights (uniform distr.)
        self.rotate = params.get('rotate', True)   # Apply random rotations
        self.size = 2 * gtp.radius_in_pixels
        self.translate = params.get('translate', 0.5)   # Random translation, given a scale in meters
        self.phase = params.get('phase', 'TRAIN')
        print "I was! I was set up!"
        
        self.synth = 1
        self.sscale = 40
        self.bwidth = 15
        self.edge_factor=0.7
        self.btop_amount = 0.3
 def setUp(self):
     self.val = None
     f = self.set_val #response of all function should be saved in self.val
     self.provider = dp.DataProvider(onData=f, onError=f, onStart=f, onStop=f)
     self.provider2 = dp.DataProvider(onData=f, onError=f, onStart=None, onStop=None)