コード例 #1
0
def main():
    args = sys.argv
    argc = len(args)

    if argc != 2:
        print("Usage: ", args[0], " <csv data file>")
        quit()
    datafile = args[1]
    model = Model()
    m, c = model.train(datafile)
    print("Slope(m): ", m, "Intercept(c): ", c)

    choice = input("Do you want to make predictions(Y/n): ")
    if choice in ['y', 'Y']:
        print("Enter any character to stop predictions")
        make_predictions(model)
    else:
        choice = input("Do you want to SAVE the trained model?(Y/n): ")
        if choice in ['y', 'Y']:
            while True:
                file = input("Please provide a file to save the model: ")
                try:
                    fp = open(file, "wb")
                    fp.close()
                    model.save_model(file)
                    quit()
                except Exception as e:
                    print("Exception occurred while opening the file ", file)
                    print(e)
        else:
            print("Abort")
コード例 #2
0
def main():
    dataset_file, object_file = check_usage()

    model = Model()
    model.train(dataset_file)
    model.calc_RSquare()
    model.save_model(object_file)

    while True:
        x = input("Enter an integer: ")
        try:
            x = int(x)
        except:
            quit()
        model.predict(x)
コード例 #3
0
    def test_init(self):
        settings.update(load_settings("tests", "minimal_with"))
        settings["optimization"]["optimize"] = True
        settings["optimization"]["constrained"] = False
        model = Model()

        self.assertEqual(model.n_const, 0)

        self.assertFalse(model.trained)
        self.assertEqual(model.no_samples, 0)
        self.assertEqual(model.sampling_iterations, 0)

        self.assertFalse(model.optimization_converged)
コード例 #4
0
            best_choice = choice
            max_val = values[0, choice]
    return max_val


#we don't want the network to be different for X and O, so we make each player see the board as X would
def state_from_board(board, counter):
    state = np.array(board.board)
    if counter == 1:
        state = -state
    state = state.reshape((1, 9))
    return state


memory = Memory(3000)
model = Model(0.001)

epsilon = 0.9
with tf.Session() as sess:
    sess.run(model.var_init)
    for game in range(10000):
        if game % 100 == 0:
            print(game)
        board = Board()
        winner = ''
        counter = 0
        symbols = ['X', 'O']
        #we need to store samples temporarily because we don't get their values till the end of each game
        samples = [
        ]  #each sample contains state, action, reward, and next state
        while winner == '':
コード例 #5
0
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import pandas as pd
import pickle
import numpy as np

#
# breast-cancer-wisconsin.csv-processed.csv
#

os.system('clear')  #clear the screen

#Load model
model_obj = Model()
print('What is the name of your model?')
model_name = raw_input('> ')
model_obj = pickle.load(open('{}.p'.format(model_name), 'rb'))
print('Model loaded successfuly.')
#_ = raw_input()

###########Format data for network###########
print('What is the name of your preprocessed training data?')
file_name = raw_input('> ')
df = pd.read_csv(file_name)
print('\nFile loaded successfuly.')
print(
    '\nWhat is the label of the column the the network should be predicting?')
target_col = raw_input('> ')
コード例 #6
0
parameter_prior_type = 'Zellner'
parameter_prior_range = None
parameter_prior_mu = None
parameter_prior_sigma = None
zellner_g = None
'''
Path to dataframe, this is expected to be a tab-separated file, it will be
loaded using pandas and ought to have columns according to
    N   Z   A   Element Ebinding    Eunc    "basis functions"
where "basis functions" can be any number of columns corresponding to a design
matrix, must start with "phi" for code to find it. Author used AME as data.
'''
dataframe_path = 'MASTERFRAMES/SEMF_ame16.tsv'

# ----------------------------------------------------------------------------
# RUN
# ----------------------------------------------------------------------------

model = Model(model_name=model_name,
              model_discrepancy=model_discrepancy,
              model_prior_type=model_prior_type,
              dilution_factor=dilution_factor,
              parameter_prior_type=parameter_prior_type,
              parameter_prior_range=parameter_prior_range,
              parameter_prior_mu=parameter_prior_mu,
              parameter_prior_sigma=parameter_prior_sigma,
              zellner_g=zellner_g,
              dataframe_path=dataframe_path)

model.analyse()
コード例 #7
0
ファイル: main_file.py プロジェクト: huguensjean/YOLOv5
from model_class import Model 
import torch
import os

#config file path
config_file = 'models/yolov5x.yaml'

#setting the device for torch and ONNX
device = torch.device('cpu')

yolov5_xlarge_model = Model(config_file).to(device)
yolov5_xlarge_model.eval()
print('Loaded the model...')




### Converting the model to ONNX format...
print('converting the model to ONNX...')

yolov5_xlarge_model.model[-1].export = True

dummy_input = torch.randn(1, 3, 640, 640, device='cpu')
torch.onnx.export(yolov5_xlarge_model, dummy_input, 
	'onnx/yolov5.onnx',
	verbose=True,
	opset_version=11,
	input_names = ['input'],
	output_names = ['output'])

print('ONNX conversion done ....')
コード例 #8
0
def digit_classifier_generator(filepath,
                               train_batch_size=64,
                               learn_rate=0.005,
                               epochs=10,
                               optimizer_type='Adam',
                               dropout=0.15,
                               hidden_layers_count=2,
                               hidden_layer_size=128,
                               rand_seed=7):
    """Generates and saves a trained model on the MNIST dataset
	Inputs: 
	filepath -- Where the model will be saved to
	Hyperparamters -- The various hyperparameters used to build the model
	
	"""

    # Hyperparameters
    train_batch_size = train_batch_size
    learn_rate = learn_rate
    epochs = epochs
    optimizer_type = optimizer_type
    dropout = dropout
    hidden_layers_count = hidden_layers_count
    hidden_layer_size = hidden_layer_size
    rand_seed = rand_seed

    # Set random seed
    torch.manual_seed(rand_seed)

    # Define transform
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.1307, ), (0.3081, ))
    ])

    # Load datasets
    train_dataset = torchvision.datasets.MNIST('MNIST_dataset',
                                               train=True,
                                               transform=transform,
                                               download=True)
    test_dataset = torchvision.datasets.MNIST('MNIST_dataset',
                                              train=False,
                                              transform=transform)

    # Make data loaders
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=train_batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1000,
                                              shuffle=True)

    # Create model object
    model = Model(hidden_layers_count, hidden_layer_size, dropout)

    # Make an optimizer
    if optimizer_type == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=learn_rate)
    elif optimizer_type == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=learn_rate)

    # Define the loss
    criterion = nn.CrossEntropyLoss()

    for e in range(epochs):

        training_loss = 0

        # Turns on dropout
        model.train()

        # Decay learning rate
        if e == 10:
            optimizer.lr = learn_rate / 2
            print("Decaying learn rate")
        if e == 15:
            optimizer.lr = learn_rate / 2
            print("Decaying learn rate")

        # Training stage
        for images, labels in train_loader:

            # Flatten images - will be 64x784
            images = images.reshape(images.shape[0], -1)

            # Zero out optimizer
            optimizer.zero_grad()

            # Forward pass through model - results in un-normalized logits
            logits = model(images, hidden_layers_count)

            # Calculate loss and add to training loss
            loss = criterion(logits, labels)
            training_loss += loss

            # Find gradient then optimize
            loss.backward()
            optimizer.step()

        print("Training loss {}".format(training_loss / len(train_loader)))

        with torch.no_grad():

            # Turn off dropout for evaluation
            model.eval()

            running_accuracy = 0
            for images, labels in test_loader:

                # Flatten images
                images = images.reshape(images.shape[0], -1)

                # Find predictions of the model
                logits = model(images, hidden_layers_count)
                probs, predictions = torch.topk(logits, 1, dim=1)

                # See where the predictions match up with the labels
                correct_points = predictions == labels.view(*predictions.shape)

                # Convert booleans to 1's and 0's to find accuracy
                correct_points = correct_points.type(torch.FloatTensor)

                # Calculate accuracy
                accuracy = torch.sum(correct_points) / len(correct_points)

                # Add to total
                running_accuracy += accuracy.item()

            running_accuracy = running_accuracy / len(test_loader)

            print("Testing accuracy: {}".format(running_accuracy))

    # Save model
    checkpoint = {
        'hidden_layer_size': hidden_layer_size,
        'hidden_layers_count': hidden_layers_count,
        'learn_rate': learn_rate,
        'epochs': epochs,
        'optimizer_type': optimizer_type,
        'dropout': dropout,
        'accuracy': running_accuracy,
        'dropout': dropout,
        'state_dict': model.state_dict()
    }

    torch.save(checkpoint, filepath)
コード例 #9
0
ファイル: create_model.py プロジェクト: VemityAI/AI_Builder
import os
from tqdm import tqdm  #show progress bar on for loops
from model_class import Model
from layers import Input, Fully_Connected, Convolutional
import numpy as np
import pickle

os.system('clear')
print(
    "What would you like to do?\n\n1. Create new model \n2. Load existing model\n"
)
in1 = input('> ')

os.system('clear')
model = Model()
if in1 == 1:  #create new model
    print('\nWhat would you like to name your model?\n')
    in2 = raw_input('> ')
    model.name = in2
    print('Model created successfuly!')
elif in1 == 2:
    print('\nWhat is the name of your model?\n')
    in2 = raw_input('> ')
    model = pickle.load(open('{}.p'.format(in2), 'rb'))

editing = True
while (editing):
    os.system('clear')
    print(
        '\n\nWhat would you like to do with your model?\n\n1. Add a layer\n2. Remove a layer\n3. Display the model\n4. Save Model\n5. Quit'
    )
コード例 #10
0
def load_and_test_model(filepath):
    """This function loads and tests accuracy of a model
	Input: filepath to the desired model
	"""

    # Print the model that is to be loaded
    print("Model of filepath {}".format(filepath))

    # Load the desired checkpoint
    checkpoint = torch.load(filepath)

    # Attain model structure info
    hidden_layers_count = checkpoint['hidden_layers_count']
    hidden_layer_size = checkpoint['hidden_layer_size']

    # Create model object (with random parameters for now)
    model = Model(hidden_layers_count, hidden_layer_size, 0.1)

    # Load state_dict into model from checkpoint
    model.load_state_dict(checkpoint['state_dict'])

    # Define transform for the testing dataset
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    # Load test data
    test_dataset = torchvision.datasets.MNIST('MNIST_dataset',
                                              train=False,
                                              transform=transform)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1000,
                                              shuffle=True)

    running_accuracy = 0

    # Turns off dropout
    model.eval()

    # Loop through images and labels in test_loader
    for images, labels in test_loader:

        # Flatten images
        images = images.reshape(images.shape[0], -1)

        # Obtain logits
        logits = model(images, hidden_layers_count)

        # Obtain the index with highest logit in each row (this is the prediction)
        ps, predictions = torch.topk(logits, 1, dim=1)

        # Compare the labels to predictions and count correct predictions
        correct = 0
        for i in range(len(labels)):
            if labels[i] == predictions[i]:
                correct += 1

        # Add the accuracy for this batch to the running accuracy
        running_accuracy += float(correct) / len(labels)

    # The accuracy is the running acuracy divided by amount of batches in the test data
    print("Accuracy: {0:.2f}%".format(running_accuracy * 100 /
                                      len(test_loader)))
コード例 #11
0
    #act randomly
    if r < epsilon or np.linalg.norm(state) == 0:
        return np.random.randint(0,9)
    else:#follow policy
        return np.argmax(model.predict_one(state, sess))

#we don't want the network to be different for X and O, so we make each player see the board as X would
def state_from_board(board, counter):
    state = np.array(board.board)
    if counter == 1:
        state = -state
    state = state.reshape((1,9))
    return state    
    
memory = Memory(3000)#not sure how many samples will crash the computer, so let's keep it conservative for now
model = Model()

epsilon = 0.9
#we'll play a thousand games
with tf.Session() as sess:
    sess.run(model.var_init)
    for game in range(4000):
        if game%100 == 0:
            print(game)
        board = Board()
        winner = ''
        counter = 0
        symbols = ['X', 'O']
        #we need to store samples temporarily because we don't get their values till the end of each game
        samples = []#each sample contains state, action, reward, and next state
        while winner == '':
コード例 #12
0
        return np.argmax(model.predict_one(state, sess))


#we don't want the network to be different for X and O, so we make each player see the board as X would
def state_from_board(board, counter):
    state = np.array(board.board)
    if counter == 1:
        state = -state
    state = state.reshape((1, 9))
    return state


memory = Memory(
    3000
)  #not sure how many samples will crash the computer, so let's keep it conservative for now
model = Model(0.01)

epsilon = 0.9
#we'll play a thousand games
with tf.Session() as sess:
    sess.run(model.var_init)
    for game in range(4000):
        if game % 100 == 0:
            print(game)
        board = Board()
        winner = ''
        counter = 0
        symbols = ['X', 'O']
        #we need to store samples temporarily because we don't get their values till the end of each game
        samples = [
        ]  #each sample contains state, action, reward, and next state