Example #1
0
class IntelligentWebAgent(Player):
    def __init__(self, number, trained=False):
        self.network = NNet()
        self.trained = trained
        super().__init__(number)
        self.load_checkpoint()

    def choose_move(self, available, board_state):
        # uses network prediction if training flag is true
        if self.network.trained:
            with graph.as_default():
                with sess.as_default():
                    results = self.network.model.predict([
                        (np.array(board_state)).reshape((1, 6, 7))
                    ])
            policy_output, value = results
            policy = policy_output[0]
            for item in np.argsort(policy)[::-1]:
                if item + 1 in available:
                    choice = item
                    break
            print(policy)
            return choice, [
                each if i + 1 in available else 0
                for i, each in enumerate(policy)
            ]
        else:
            choice = int(random.choice(available)) - 1
            return choice, [(1 / len(available)) if i in available else 0
                            for i in range(1, 8)]

    def learn(self, states, policies, winners):
        self.network.train_on_batch(states, policies, winners)
        self.network.trained = True

    def save_checkpoint(self,
                        folder='checkpoint',
                        filename='checkpoint.pth.tar'):
        filepath = os.path.join(folder, filename)
        if not os.path.exists(folder):
            print("Checkpoint Directory does not exist! Making directory {}".
                  format(folder))
            os.mkdir(folder)
        else:
            print("Checkpoint Directory exists! ")
        self.network.model.save_weights(filepath)

    def load_checkpoint(self,
                        folder='checkpoint',
                        filename='checkpoint.pth.tar'):
        filepath = os.path.join(folder, filename)
        if not os.path.exists(filepath):
            print("No model found")
        self.network.model.load_weights(filepath)
        self.network.trained = True
Example #2
0
def main_predict(config):
    predictions = []

    for net_name in config['net_names']:
        preds = np.load(config[net_name]['predictions_path'],
                        allow_pickle=True)
        predictions.append(preds)
    mean_ensemble = np.mean(np.array(predictions), axis=0)
    np.save(file=config['final_predictions_path'], arr=mean_ensemble)
    print('saved final mean ensemble predictions at path: {}'.format(
        config['final_predictions_path']))

    dummy_model = NNet(data_paths=config['data_paths'])
    dummy_model.test_images_predictions = mean_ensemble
    mean_sub = dummy_model.create_submission_file(
        path=config['submission_path'], treshold=config['treshold'])

    save_predictions_pdf(dummy_model, config)
Example #3
0
def main_train(config):
    for net_to_train, net_name in zip(config['net_types'],
                                      config['net_names']):
        net = NNet(net_type=net_to_train, data_paths=config['data_paths'])
        single_model_training(
            model=net,
            save_path=config[net_name]['checkpoint'],
            additional_epochs=config[net_name]['additional_epochs'],
            competition_epochs=config[net_name]['competition_epochs'],
            b_size=config[net_name]['batch_size'],
            loss=config['loss'],
            l_rate_a=config['learning_rate_additional_data'],
            l_rate_b=config['learning_rate_competition_data'],
            v=config['verbose'])

        predictions = net.predict_test_data()
        np.save(file=config[net_name]['predictions_path'], arr=predictions)
        print('saved model predictions at path: {}'.format(
            config[net_name]['predictions_path']))
        sub = net.create_submission_file(path=config[net_name]['csv_path'],
                                         treshold=config['treshold'])
        print('saved model csv at path: {}'.format(
            config[net_name]['csv_path']))
Example #4
0
import pandas as pd
import numpy as np
import tensorflow as tf

from model import NNet
from preprocess import data_preprocessing
from trainer import train

if __name__ == '__main__':
    # Get dataset
    raw_data = pd.read_csv('iris.csv')
    data, target, _, _ = data_preprocessing(raw_data)
    trainable = tf.data.Dataset.from_tensor_slices((data, target)).shuffle(100)

    # Define model
    model = NNet()
    # model.compile(optimizer='adam',
    #               loss='sparse_categorical_crossentropy',
    #               metrics=['accuracy'])
    # model.fit(data, target, epochs=5)
    train(model, trainable, 5)

Example #5
0
from skimage import data
from skimage.transform import resize
import torch
from torch.autograd import Variable
from model import NNet
from matplotlib import pyplot as plt

nnet = NNet()

img = data.camera()
img = torch.from_numpy(resize(img, (256, 256))).float()

var = Variable(img, requires_grad=True).unsqueeze(0).unsqueeze(0)

result = nnet(var)

print(result.shape)







Example #6
0
 def __init__(self, number, trained=False):
     self.network = NNet()
     self.trained = trained
     super().__init__(number)
Example #7
0
 def __init__(self, number, ip='127.0.0.1'):
     self.network = NNet()
     self.ip = ip
     self.port = 4230
     self.choice = None
     super().__init__(number)
Example #8
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)

    # load the pre-trained weights
    model = NNet()
    # model = torch.nn.DataParallel(model).cuda()
    model = torch.nn.DataParallel(model)
    model.load_state_dict(
        torch.load('models/model_best.pth.tar',
                   map_location=torch.device('cpu'))['state_dict'])
    accuracy = True

    test_dataset = ImageNet(args.test_root, paths=True)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=8,
                                              pin_memory=True)
    print("=> Loaded data, length = ", len(test_dataset))
    model.eval()
    for i, (img, target, imgInfo) in enumerate(test_loader):
        imgPath = imgInfo[0]
        dir_name, file_name = imgPath.split('/val/')[1].split('/')
        if img is None:
            continue
        # var = Variable(img.float(), requires_grad=True).cuda()
        var = Variable(img.float(), requires_grad=True)
        output = model(var)
        decoded_output = utils.decode(output)
        lab = np.zeros((256, 256, 3))
        # lab[:,:,0] = cv2.resize((img+50.0).squeeze(0).squeeze(0).numpy(), (256,256))
        # lab[:,:,1:] = cv2.resize(decoded_output.squeeze(0).detach().numpy().transpose((1,2,0)),(256,256))
        lab[:, :, 0] = resize((img + 50.0).squeeze(0).squeeze(0).numpy(),
                              (256, 256))
        lab[:, :, 1:] = resize(
            decoded_output.squeeze(0).detach().numpy().transpose((1, 2, 0)),
            (256, 256))
        rgb = lab2rgb(lab)
        try:
            plt.imsave(
                "img/imagenet-mini/generated/" + dir_name + '/' + file_name,
                rgb)
            #plt.savefig("img/imagenet-mini/generated/"+ dir_name+ '/'+ file_name)
        except FileNotFoundError:
            os.mkdir("img/imagenet-mini/generated/" + dir_name)
            plt.imsave(
                "img/imagenet-mini/generated/" + dir_name + '/' + file_name,
                rgb)
            #plt.savefig("img/imagenet-mini/generated/"+ dir_name+ '/'+ file_name)
        print("Forwarded image number: " + str(i + 1))
        if accuracy:
            count = 0
            for j in range(56):
                for k in range(56):
                    pixel_acc = (np.linalg.norm(target[0, :, j, k].detach(
                    ).numpy() - decoded_output[0, :, j, k].detach().numpy()) <
                                 range(151)) + 0
                    count += sum(pixel_acc)
            print('Accuracy is: ', count / (150 * 56 * 56))
        if i == 4:
            break
Example #9
0
#%%
import pandas as pd

from preprocess import data_preprocessing, IrisData
from model import NNet, train, eval

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader

if __name__ == '__main__':
    raw_data = pd.read_csv('iris.csv')

    # Data preprocessing
    data, target = data_preprocessing(raw_data)
    iris = IrisData(data, target)
    irisloader = DataLoader(iris, shuffle=True, batch_size=16)

    # Define Model, Loss function and Optimizer
    model = NNet().float()
    criteria = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)

    # Model processing
    train(model, irisloader, criteria, optimizer, 10)
    # eval(model, irisloader, criteria)