def dgen(batch_size=2):
    while 1:
        p, q, a, an = [], [], [], []
        for i in range(batch_size):
            r = random.randint(0, size)
            train_ = train(r)
            p.append(train_[1].tolist())
            q.append(train_[2].tolist())

            # a_t = []
            # for ii in train_[0]:
            #     for iii in ii:
            #         a_t.extend(iii)
            # a.append(a_t)

            a_t = []
            for ii in train_[0].tolist():
                a_t.extend(ii)
            a.append(a_t)
            an.append(train_[3].tolist())
        p = np.array(p)
        q = np.array(q)
        a = np.array(a)
        an = np.array(an)
        print(p.shape, q.shape, a.shape, an.shape)
        yield ([p, q, a], [a, an])
def main():
    hook = sy.TorchHook(torch)
    squad = WebsocketClientWorker(id=1,
                                  host='localhost',
                                  port=7171,
                                  hook=hook,
                                  verbose=True)

    message = data
    message_ptr = message.send(squad)
    print(message_ptr)
    print('sent model data. now squad has %d objects' %
          (message_ptr.location.objects_count_remote()))

    # get squad updated model
    model = nn.Linear(1, 1)
    train(model)
Beispiel #3
0
	def add(self):
		l = login()
		l.get()
		axs = raw_input("Access Spec of new user :"******"Access not part of allowed set"
			return
		elif axs == 'student':
			#s = student()
			l.add(axs)
			nam = raw_input("Enter Student's name:")
			self.cur.execute('''insert into student values (?,?); ''',(nam.decode(),l.uname))
			data.train(l.uname)
			self.con.commit()
		else:
			#s = teacher()
			nam = raw_input("Enter teacher's name:")
			sub = raw_input("Enter teacher's subject:")
			l.add(axs)
			self.cur.execute('''insert into teacher values (?,?,?); ''',(nam.decode(),l.uname,sub.decode()))
			self.con.commit()
Beispiel #4
0
def main():
    paddle.init(use_gpu=True, trainer_count=1)

    img = paddle.layer.data(name='img',
                            type=paddle.data_type.dense_vector(784))
    label = paddle.layer.data(name='label',
                              type=paddle.data_type.integer_value(10))

    predict = letnet_5(img)

    cost = paddle.layer.classification_cost(input=predict, label=label)
    if os.path.exists(PARAMETERS_TAR):
        with open(PARAMETERS_TAR, 'r') as f:
            parameters = paddle.parameters.Parameters.from_tar(f)
    else:
        parameters = paddle.parameters.create(cost)
    optimizer = paddle.optimizer.Momentum(
        learning_rate=0.1 / 128,
        momentum=0.9,
        regularization=paddle.optimizer.L2Regularization(rate=0.0001))

    trainer = paddle.trainer.SGD(cost=cost,
                                 parameters=parameters,
                                 update_equation=optimizer)

    def event_handler(evt):
        if isinstance(evt, paddle.event.EndPass):
            print 'Pass id: %d, %s' % (evt.pass_id, evt.metrics)

    trainer.train(reader=paddle.batch(paddle.reader.shuffle(dataset.train(),
                                                            buf_size=8192),
                                      batch_size=64),
                  event_handler=event_handler,
                  num_passes=30)

    with open(PARAMETERS_TAR, 'w') as f:
        trainer.save_parameter_to_tar(f)

    test_reader = dataset.test()()
    test_list = []
    for img in test_reader:
        test_list.append(img)

    labels = paddle.infer(output_layer=predict,
                          parameters=parameters,
                          input=test_list)

    labels = np.argmax(labels, axis=1)
    dataset.save_result2csv('./result.csv', labels)
    print 'training end'
Beispiel #5
0
def main(FLAGS):
    paddle.enable_static() if FLAGS.static else None
    device = paddle.set_device("gpu" if FLAGS.use_gpu else "cpu")

    # yapf: disable
    inputs = [
        Input([None,1,48,384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in"),
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask"),
    ]
    # yapf: enable

    model = paddle.Model(
        Seq2SeqAttModel(
            encoder_size=FLAGS.encoder_size,
            decoder_size=FLAGS.decoder_size,
            emb_dim=FLAGS.embedding_dim,
            num_classes=FLAGS.num_classes),
        inputs,
        labels)

    lr = FLAGS.lr
    if FLAGS.lr_decay_strategy == "piecewise_decay":
        learning_rate = fluid.layers.piecewise_decay(
            [200000, 250000], [lr, lr * 0.1, lr * 0.01])
    else:
        learning_rate = lr
    grad_clip = fluid.clip.GradientClipByGlobalNorm(FLAGS.gradient_clip)
    optimizer = fluid.optimizer.Adam(
        learning_rate=learning_rate,
        parameter_list=model.parameters(),
        grad_clip=grad_clip)

    model.prepare(optimizer, WeightCrossEntropy(), SeqAccuracy())

    train_dataset = data.train()
    train_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(), data.PadTarget()])
    train_sampler = data.BatchSampler(
        train_dataset, batch_size=FLAGS.batch_size, shuffle=True)
    train_loader = paddle.io.DataLoader(
        train_dataset,
        batch_sampler=train_sampler,
        places=device,
        num_workers=FLAGS.num_workers,
        return_list=True,
        collate_fn=train_collate_fn)
    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(), data.PadTarget()])
    test_sampler = data.BatchSampler(
        test_dataset,
        batch_size=FLAGS.batch_size,
        drop_last=False,
        shuffle=False)
    test_loader = paddle.io.DataLoader(
        test_dataset,
        batch_sampler=test_sampler,
        places=device,
        num_workers=0,
        return_list=True,
        collate_fn=test_collate_fn)

    model.fit(train_data=train_loader,
              eval_data=test_loader,
              epochs=FLAGS.epoch,
              save_dir=FLAGS.checkpoint_path,
              callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
Beispiel #6
0
                             reps,
                             nPlanes,
                             residual_blocks=False,
                             downsample=[2, 2])).add(scn.BatchNormReLU(m)).add(
                                 scn.OutputLayer(dimension))
        self.linear = nn.Linear(m, data.nClassesTotal)

    def forward(self, x):
        x = self.sparseModel(x)
        x = self.linear(x)
        return x


model = Model()
print(model)
trainIterator = data.train()
validIterator = data.valid()

criterion = nn.CrossEntropyLoss()
p = {}
p['n_epochs'] = 100
p['initial_lr'] = 1e-1
p['lr_decay'] = 4e-2
p['weight_decay'] = 1e-4
p['momentum'] = 0.9
p['check_point'] = False
p['use_cuda'] = torch.cuda.is_available()
dtype = 'torch.cuda.FloatTensor' if p['use_cuda'] else 'torch.FloatTensor'
dtypei = 'torch.cuda.LongTensor' if p['use_cuda'] else 'torch.LongTensor'
if p['use_cuda']:
    model.cuda()
Beispiel #7
0
import random

from skimage.io import imread, imshow
from skimage.transform import resize
import matplotlib.pyplot as plt
from tqdm import tqdm

np.random.seed = 42

IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3

#################################

X_train, Y_train = data.train(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
X_test = data.test(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)

#################################

model = arch.architecture(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
print(model.summary())

################################

#Modelcheckpoint
checkpointer = tf.keras.callbacks.ModelCheckpoint('model_for_nuclei.h5',
                                                  verbose=1,
                                                  save_best_only=True)

callbacks = [