Exemple #1
0
def run_epoch(model, train_iterator, val_iterator, epoch):
    train_losses = []
    val_accuracies = []
    losses = []

    # Reduce learning rate as number of epochs increase
    if (epoch == int(model.config.max_epochs / 3)) or (epoch == int(2 * model.config.max_epochs / 3)):
        model.reduce_lr()

    for i, batch in enumerate(train_iterator):
        model.optimizer.zero_grad()
        x_times, x_features, x_labels = batch
        if torch.cuda.is_available():
            x_times, x_features, x_labels = x_times.cuda(), x_features.cuda(), x_labels.cuda()
            y = (x_labels[:, -1]).type(torch.cuda.LongTensor)
        else:
            y = (x_labels[:, -1]).type(torch.LongTensor) 
        y_pred = model(x_times, x_features)
        loss = model.loss_op(y_pred, y)
        loss.backward()
        losses.append(loss.data.cpu().numpy())
        model.optimizer.step()

        if i % 100 == 0:
            print("Iter: {}".format(i + 1))
            avg_train_loss = np.mean(losses)
            train_losses.append(avg_train_loss)
            print("\tAverage training loss: {:.5f}".format(avg_train_loss))
            losses = []

            # Evalute Accuracy on validation set
            evaluate(model, val_iterator)
            model.train()

    return train_losses, val_accuracies
Exemple #2
0
    def __init__(self, image):
        super(Final, self).__init__()
        root = Tk()
        root.title("Final")
        root.geometry("600x600")
        root.resizable(0, 0)
        load_model = model()
        load_model.load_model("cnn1.h5")
        images = load_model.predict(image)
        height, width, no_channels = images["orignal_image"].shape
        frame = Frame(root,
                      width=width,
                      height=height,
                      highlightthickness=1,
                      highlightbackground="black")
        canvas = Canvas(frame, width=width, height=height)
        c_img = cv2.cvtColor(images["orignal_image"].copy(), cv2.COLOR_BGR2RGB)
        photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(c_img))
        image_on_canvas = canvas.create_image(0, 0, image=photo, anchor=NW)
        canvas.pack()
        frame.pack()
        frame1 = Frame(root, width=350, height=30)
        frame1.pack()
        canvas1 = Canvas(frame1,
                         bg='#FFFFFF',
                         width=300,
                         height=300,
                         scrollregion=(0, 0, 30 * len(images["images"]), 100))
        hbar = Scrollbar(frame1, orient=HORIZONTAL)
        hbar.pack(side=BOTTOM, fill=X)
        hbar.config(command=canvas1.xview)
        canvas1.config(width=300, height=300)
        canvas1.config(xscrollcommand=hbar.set)
        canvass = []
        photo1 = []
        i = 0
        for img in images["images"]:
            canvass.append(Canvas(canvas1, width=28, height=80))
            im = img["image"].copy()
            photo1.append(
                PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(im)))
            canvass[i].create_text(12,
                                   10,
                                   fill="darkblue",
                                   font="Times 20 italic bold",
                                   text=img["output"])
            image_on_canvas = canvass[i].create_image(0,
                                                      30,
                                                      image=photo1[i],
                                                      anchor=NW)
            canvass[i].pack(side=LEFT)
            i += 1

        canvas1.pack(side=LEFT, expand=True, fill=BOTH)
        root.mainloop()
Exemple #3
0
def transform_fn(model, request_body, content_type, accept_type):
    try:
        input_object=json.loads(request_body)
        board=input_object["board"]
        count=input_object["session"].get("count",0)
        input_object["session"]["count"]=count+1

        if count>10:
            board=nd.array(board)
            board=nd.concat(
                (board==1).expand_dims(axis=0),
                (board==2).expand_dims(axis=0),dim=0
            )

            board=board.expand_dims(axis=0)

            mask=board.clip(0,1)
            mask=-(mask-1)
            mask=mask.reshape((2,-1)) 
            
            p=nd.softmax(model(board).reshape((-1,)))
            p=p*mask[0]*mask[1]
            
            while True:
                loc=int(p.argmax(axis=0).asscalar())
                y=loc//board.shape[2]
                x=loc%board.shape[2]
                if input_object["board"][y][x]==0:
                    break
                else:
                    p[loc]=0
        else:
            while True:
                x=random.randint(0,len(input_object["board"][0])-1)
                y=random.randint(0,len(input_object["board"])-1)
                if input_object["board"][y][x]==0:
                    break

        input_object["session"]["shootType"]="CNNNet"
        return bytearray(json.dumps({
            "shot":{
                "x":x,
                "y":y
            },
            "session":input_object["session"]
        }),'utf-8'),accept_type
    except Exception as e:
        print(traceback.format_exc())
        print(e)
Exemple #4
0
def main():
    args = arg_parser().parse_args()

    image_ph = tf.placeholder(tf.uint8, shape=(50, 200, 3))
    with tf.variable_scope('model'):
        logits = model(image_ph[None])
    labels = tf.argmax(logits, axis=-1)[0]

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        load_vars(sess, args.model, relaxed=True, log_fn=load_log_fn)
        for image_path in args.images:
            image = np.array(Image.open(image_path))
            labels_out = sess.run(labels, feed_dict={image_ph: image})
            print('%s: %s' % (image_path, ''.join(map(str, labels_out))))
Exemple #5
0
def evaluate(model, iterator, output_metrics_result=None, output_confusion_result=None):
    y_preds = []
    y_truths = []
    example_feature, example_pred = None, None
    for idx, batch in enumerate(iterator):
        x_times, x_features, x_labels = batch
        if torch.cuda.is_available():
            x_times, x_features, x_labels = x_times.cuda(), x_features.cuda(), x_labels.cuda()
        y_pred = model(x_times, x_features)
        predicted = torch.max(y_pred.cpu().data, 1)[1]
        if example_feature is None and example_pred is None:
            example_feature, example_pred = x_features, predicted
        y_preds.extend(predicted.numpy())
        y_truths.extend(x_labels[:, -1].cpu().data.numpy())
    print("Example-----")
    print("-----Feature: {}".format(example_feature))
    print("-----Prediction: {}".format(example_pred))
    return evaluate_multi_class(y_preds, y_truths, output_metrics_result, output_confusion_result)
Exemple #6
0
def model_fn(model_dir):
    print("loading from %s"%(model_dir))
    with open('%s/hyperparameters.json'%(model_dir), 'r') as fp:
        hyperparameters = json.load(fp)
    
    net=model(
        depth=int(hyperparameters.get("depth",2)),
        width=int(hyperparameters.get("width",3)),
    )
    try:
        print("trying to load float16")
        net.cast("float16") 
        net.collect_params().load("%s/model-0000.params"%(model_dir), ctx)
    except Exception as e: 
        print(e)
        print("trying to load float32")
        net.cast("float32") 
        net.collect_params().load("%s/model-0000.params"%(model_dir), ctx)

    net.cast("float32")

    return net
Exemple #7
0
 def get_total(self):
     image = self.image.copy()
     image = imutils.resize(image, height=500)
     crop_imgs = []
     crop_imgs.append(image[443:488, 107:216])
     crop_imgs.append(image[443:488, 220:323])
     crop_imgs.append(image[443:488, 325:450])
     crop_imgs.append(image[443:488, 451:604])
     section_and_total = []
     total = 0
     for crop_img in crop_imgs:
         img = crop_img.copy()
         im = Image(img)
         img = im.covert_to_gray()
         img = im.apply_gaussian_blur()
         img = im.apply_threshold()
         img = im.apply_canny()
         img = im.apply_morphology_close()
         con = im.find_contours()
         images = im.get_images(con)
         load_model = model()
         load_model.load_model("cnn1.h5")
         images = load_model.predict(images)
         output = ""
         if len(images["images"]) == 1:
             output += images["images"][0]["output"]
         else:
             if images["images"][0]["location"]["x"] < images["images"][1][
                     "location"]["x"]:
                 output += images["images"][0]["output"]
                 output += images["images"][1]["output"]
             else:
                 output += images["images"][1]["output"]
                 output += images["images"][0]["output"]
         total += int(output)
         section_and_total.append(int(output))
     section_and_total.append(total)
     return section_and_total
''' This file calls all the methods declared

datagen: loading the data from the directories
model: defining the model architecture
train: training the model and saving it 
'''

from datagen import datagen
from cnn import model
from train import train

training_path = 'dataset/training_set'
test_path = 'dataset/test_set'

'''loading the data '''
training_set, validation_set, test_set = datagen(training_path, test_path)

''' defining the model architecture '''
base_model = base_model()
model = model()

''' training and savinf the model '''
train(base_model, 'base_model', training_set, validation_set, 32, 50)
train(model, 'model', training_set, validation_set, 32, 50)
def train(hyperparameters, hosts, num_gpus, **kwargs):
    try:
        _ = mx.nd.array([1], ctx=mx.gpu(0))
        ctx = [mx.gpu(i) for i in range(num_gpus)]
        print("using GPU")
        DTYPE = "float16"
        host_ctx = mx.cpu_pinned(0)
    except mx.MXNetError:
        ctx = [mx.cpu()]
        print("using CPU")
        DTYPE = "float32"
        host_ctx = mx.cpu(0)

    model_dir = os.environ.get("SM_CHANNEL_MODEL")
    if model_dir:
        print("using prebuild model")
        shutil.unpack_archive("%s/model.tar.gz" % (model_dir), model_dir)
        with open('%s/hyperparameters.json' % (model_dir), 'r') as fp:
            saved_hyperparameters = json.load(fp)

        net = model(
            depth=int(saved_hyperparameters.get("depth", 2)),
            width=int(saved_hyperparameters.get("width", 3)),
        )
        try:
            print("trying to load float16")
            net.cast("float16")
            net.collect_params().load("%s/model-0000.params" % (model_dir),
                                      ctx)
        except Exception as e:
            print(e)
            print("trying to load float32")
            net.cast("float32")
            net.collect_params().load("%s/model-0000.params" % (model_dir),
                                      ctx)
        net.cast(DTYPE)
    else:
        print("building model from scratch")
        net = model(
            depth=int(hyperparameters.get("depth", 2)),
            width=int(hyperparameters.get("width", 3)),
        )
        net.cast(DTYPE)
    net.collect_params().initialize(mx.init.Xavier(), ctx=ctx)
    net.hybridize()
    print(net)

    dice = DiceLoss()
    dice.cast(DTYPE)
    dice.hybridize()

    trainer = gluon.Trainer(
        net.collect_params_layers(2) if model_dir else net.collect_params(),
        'adam', {
            "multi_precision": (DTYPE == 'float16'),
            'learning_rate': float(hyperparameters.get("learning_rate", .001))
        })
    train_iter, test_iter = get_data(int(hyperparameters.get("batch_size", 8)),
                                     DTYPE, host_ctx)

    Loss = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=False)

    best = float("inf")
    warm_up = int(hyperparameters.get("warm_up", 30))
    patience = int(hyperparameters.get("patience", 10))
    wait = 0

    for e in range(hyperparameters.get("epochs", 11)):
        print("Epoch %s" % (e))
        val_loss = 0
        st = time.time()
        training_count = 0
        testing_count = 0
        training_loss = 0

        for batch in train_iter:
            batch_size = batch.data[0].shape[0]
            training_count += batch_size
            data = gluon.utils.split_and_load(batch.data[0].astype(DTYPE), ctx)
            label = gluon.utils.split_and_load(
                batch.label[0].astype(DTYPE).reshape((batch_size, -1)), ctx)
            mask = gluon.utils.split_and_load(
                batch.label[1].astype(DTYPE).reshape((batch_size, -1)), ctx)

            with autograd.record():
                output = [net(x) for x in data]
                losses = [
                    -dice(x, y, z) for x, y, z in zip(output, label, mask)
                ]
            for loss in losses:
                loss.backward()
            trainer.step(batch_size)
            training_loss += sum(loss.sum().asscalar() for loss in losses)

        for batch in test_iter:
            batch_size = batch.data[0].shape[0]
            testing_count += batch_size

            data = gluon.utils.split_and_load(batch.data[0].astype(DTYPE), ctx)
            label = gluon.utils.split_and_load(
                batch.label[0].astype(DTYPE).reshape((batch_size, -1)), ctx)
            mask = gluon.utils.split_and_load(
                batch.label[1].astype(DTYPE).reshape((batch_size, -1)), ctx)

            output = [net(x) for x in data]
            losses = [-dice(x, y, z) for x, y, z in zip(output, label, mask)]

            val_loss += sum(loss.sum().asscalar() for loss in losses)

        et = time.time()
        print("Hyperparameters: %s;" % (hyperparameters))
        print("Training loss: %s;" % (-training_loss / training_count))
        print("Testing loss: %s;" % (-val_loss / (testing_count)))
        print("Throughput=%2.2f;" % ((training_count + testing_count) /
                                     (et - st)))
        print("Validation Loss=%2.2f;" % val_loss)
        print("Best=%2.2f;" % best)

        if e >= warm_up:
            if val_loss < best:
                print("best model: %s;" % (-val_loss / (testing_count)))
                save(net, hyperparameters)
                best = val_loss
                wait = 0
            else:
                wait += 1
        if wait > patience:
            print("stoping early")
            break
        train_iter.reset()
        test_iter.reset()
Exemple #10
0
def main(*argv):
    data_dir = "/tmp/tensorflow/mnist/input_data"
    # load dataset
    mnist = input_data.read_data_sets(data_dir)

    graph = tf.Graph()
    with graph.as_default():
        # import data as placeholder
        # batch_size * height * width * channels
        # as bs*28*28*1
        x = tf.placeholder(tf.float32, [None, 784])
        # true label
        y_ = tf.placeholder(tf.int64, [None])
        dropout_rate = tf.placeholder(tf.float32)
        y, loss, train_step = cnn.model(x, y_, dropout_rate)

    with tf.Session(graph=graph) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        correct_prediction = tf.equal(tf.argmax(y, 1), y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        # Train
        for step in range(30):
            print("step", step)
            for i in range(500):
                batch_xs, batch_ys = mnist.train.next_batch(100)
                opt, l = sess.run([train_step, loss],
                                  feed_dict={
                                      x: batch_xs,
                                      y_: batch_ys,
                                      dropout_rate: 0.4
                                  })
                if i == 499:
                    print(
                        "train acc & loss: ",
                        sess.run([accuracy, loss],
                                 feed_dict={
                                     x: batch_xs,
                                     y_: batch_ys,
                                     dropout_rate: 1.0
                                 }))

            tx, ty = mnist.test.next_batch(1000)
            print(
                "partial test acc & loss: ",
                sess.run([accuracy, loss],
                         feed_dict={
                             x: tx,
                             y_: ty,
                             dropout_rate: 1.0
                         }))

        # final with all test
        # use dirty workaround
        # since the complete test data
        # cannot fit into GPU
        block_count = 10
        total_acc = 0.0
        total_loss = 0.0
        for i in range(block_count):
            tx, ty = mnist.test.next_batch(1000)
            entry = sess.run([accuracy, loss],
                             feed_dict={
                                 x: tx,
                                 y_: ty,
                                 dropout_rate: 1.0
                             })
            total_acc += entry[0]
            total_loss += entry[1]
        final_acc = total_acc / block_count
        final_loss = total_loss / block_count
        print("final test acc & loss: ", [final_acc, final_loss])
Exemple #11
0
def main():
    parser = ArgumentParser(description='PyTorch DQN')
    parser.add_argument('--exp', type=str, default=None)
    args = parser.parse_args()
    D = 1e6  # Amnt in replay memory
    M = 1000000  # Number of epsiodes to run
    T = 150  # Number of steps to take maximum
    epsilon = 1  # Choose random actions prob
    save_iter = 200
    gamma = .99  # Forgetting factor of the past
    batch_size = 32  # Number of elements to sample from replay memory
    num_frames = 0  # Counter for the number of frames
    frame_skip = 4  # Number of frames to wait before selecting a new action
    env = gym.make('CartPole-v0')
    er = erp.experience_replay(D)
    reward_ls = []
    loss_fn = torch.nn.MSELoss()
    writer = SummaryWriter(args.exp)
    loss = 1
    checkpoint = torch.load('model/model.pt')
    cnn.model.load_state_dict(checkpoint['model_state_dict'])
    cnn.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    num_frames = checkpoint['epoch']
    epsilon = checkpoint['epsilon']
    uniform_state = torch.load('uniform_init.pt')
    # uniform_state = fill_uniform_state_buf(env, frame_skip)
    # torch.save(torch.stack(uniform_state), 'uniform_init.pt')
    # writer.add_graph(cnn.model, torch.autograd.Variable(
    #    torch.Tensor(4, 84, 84)))
    for eps in range(M):
        env.reset()
        batch = create_new_batch(env, frame_skip, act=-1)
        reward_gl = 0
        print(eps, epsilon, num_frames)
        for t in range(T):
            if random.random() < epsilon:
                act = env.action_space.sample()
            else:
                with torch.no_grad():
                    Q = cnn.model(batch)
                    act = torch.argmax(Q).item()
            _, reward, done, _ = env.step(act)
            reward = bound_reward(reward)
            # reward_ls.append(reward)
            num_frames += frame_skip
            reward_gl += reward
            if done:
                er.add_mem(batch, act, reward, False)
            else:
                new_batch = create_new_batch(env, frame_skip, act=act)
                er.add_mem(batch, act, reward, new_batch)
                batch = new_batch

            if len(er.replay) > batch_size:
                state_batch, action_batch, reward_batch, next_state_batch = er.sample_batch(
                    batch_size)
                state_back = torch.stack(state_batch)
                act = torch.tensor(action_batch)
                rew = torch.tensor(reward_batch)
                mask_nd = torch.tensor(
                    [type(mem) == torch.Tensor for mem in next_state_batch])
                non_final_next_states = torch.stack([
                    s.squeeze(0) for i, s in enumerate(next_state_batch)
                    if mask_nd[i]
                ])
                Q = cnn.model(state_back).gather(1, act.unsqueeze(1))
                Q_opt = torch.zeros(batch_size)
                Q_opt[mask_nd] = cnn.model(non_final_next_states).detach().max(
                    1)[0]
                expected_reward = rew.float() + gamma * Q_opt
                loss = loss_fn(Q.squeeze(1), expected_reward)
                #print(loss)
                cnn.optimizer.zero_grad()
                loss.backward()
                cnn.optimizer.step()

            if done:
                break
        writer.add_scalar('data/eps_len', t, num_frames)
        writer.add_scalar('data/reward', reward_gl, num_frames)
        writer.add_scalar('data/eps', epsilon, num_frames)
        with torch.no_grad():
            writer.add_scalar(
                'data/avg_Q',
                torch.mean(torch.max(cnn.model(uniform_state), 1)[0]),
                num_frames)
        epsilon = eps_anneal(epsilon)
        if eps % save_iter == 0:
            torch.save(
                {
                    'epoch': num_frames,
                    'model_state_dict': cnn.model.state_dict(),
                    'optimizer_state_dict': cnn.optimizer.state_dict(),
                    'epsilon': epsilon
                }, 'model/model.pt')
    env.close()
    writer.close()