def main():
    runman = RunManager()

    # Choose initialState, either from user-inputted parameters or randomly
    if len(sys.argv) > 1:
        initialState = [eval(xx) for xx in sys.argv[1].split()]
    else:
        initialState = randUniformPoint(SineModel5.typicalRanges)

    runman.explore_dimensions(initialState, SineModel5.typicalRanges, pointsPerDim = 9, repetitions = 2)
示例#2
0
def main():
    runman = RunManager()

    # Choose initialState, either from user-inputted parameters or randomly
    if len(sys.argv) > 1:
        initialState = [eval(xx) for xx in sys.argv[1].split()]
    else:
        initialState = randUniformPoint(SineModel5.typicalRanges)

    runman.explore_dimensions(initialState,
                              SineModel5.typicalRanges,
                              pointsPerDim=9,
                              repetitions=2)
示例#3
0
def doRun():
    runman = RunManager()

    # Choose initialState, either from user-inputted parameters or randomly
    neatFile = None
    if len(sys.argv) > 1:
        if (len(sys.argv) > 2 and
            sys.argv[1] == '-simplex' or sys.argv[1] == '-svm'):
            # Simplex filename
            import pickle
            filename = sys.argv[2]
            ff = open(filename, 'r')
            strategy = pickle.load(ff)
            ff.close()
        elif (len(sys.argv) > 2 and sys.argv[1] == '-neat'):
            neatFile = sys.argv[2]
            currentState = None
        elif (len(sys.argv) > 2 and sys.argv[1] == '-filt'):
            filtFile = sys.argv[2]
            currentState = None
        else:
            # normal
            currentState = [eval(xx) for xx in sys.argv[1].split()]
    else:
        currentState = randUniformPoint(SineModel5.typicalRanges)


    try:
        strategy
    except:
        #strategy = UniformStrategy(currentState, SineModel5.typicalRanges)
        #strategy = GaussianStrategy(currentState, SineModel5.typicalRanges)
        #strategy = GradientSampleStrategy(currentState)
        #strategy = LinearRegressionStrategy(currentState)
        #strategy = SimplexStrategy(currentState, SineModel5.typicalRanges)
        strategy = RandomStrategy(currentState, SineModel5.typicalRanges)
        #strategy = SVMLearningStrategy(currentState, SineModel5.typicalRanges)
        #strategy = NEATStrategy(currentState, SineModel5.typicalRanges, neatFile = neatFile)   # these args aren't used
        #strategy = FileStrategy(filtFile = filtFile)
        
    #runman.do_many_runs(currentState, lambda state: Neighbor.gaussian(SineModel5.typicalRanges, state))
    #runman.do_many_runs(currentState, lambda state: gradient_search(SineModel5.typicalRanges, state))
    runman.do_many_runs(strategy, SineModel5.typicalRanges)
示例#4
0
def doRun():
    runman = RunManager()

    # Choose initialState, either from user-inputted parameters or randomly
    neatFile = None
    if len(sys.argv) > 1:
        if (len(sys.argv) > 2 and sys.argv[1] == '-simplex'
                or sys.argv[1] == '-svm'):
            # Simplex filename
            import pickle
            filename = sys.argv[2]
            ff = open(filename, 'r')
            strategy = pickle.load(ff)
            ff.close()
        elif (len(sys.argv) > 2 and sys.argv[1] == '-neat'):
            neatFile = sys.argv[2]
            currentState = None
        elif (len(sys.argv) > 2 and sys.argv[1] == '-filt'):
            filtFile = sys.argv[2]
            currentState = None
        else:
            # normal
            currentState = [eval(xx) for xx in sys.argv[1].split()]
    else:
        currentState = randUniformPoint(SineModel5.typicalRanges)

    try:
        strategy
    except:
        #strategy = UniformStrategy(currentState, SineModel5.typicalRanges)
        strategy = GaussianStrategy(currentState, SineModel5.typicalRanges)
        #strategy = GradientSampleStrategy(currentState)
        #strategy = LinearRegressionStrategy(currentState)
        #strategy = SimplexStrategy(currentState, SineModel5.typicalRanges)
        #strategy = RandomStrategy(currentState, SineModel5.typicalRanges)
        #strategy = SVMLearningStrategy(currentState, SineModel5.typicalRanges)
        #strategy = NEATStrategy(currentState, SineModel5.typicalRanges, neatFile = neatFile)   # these args aren't used
        #strategy = FileStrategy(filtFile = filtFile)

    #runman.do_many_runs(currentState, lambda state: Neighbor.gaussian(SineModel5.typicalRanges, state))
    #runman.do_many_runs(currentState, lambda state: gradient_search(SineModel5.typicalRanges, state))
    runman.do_many_runs(strategy, SineModel5.typicalRanges)
示例#5
0
def main():
    MIN_ARGS = 4

    if len(sys.argv) >= MIN_ARGS:
        if len(sys.argv) > 2 and sys.argv[1] == '-SineModel5':
            sineModel5Params = [eval(xx) for xx in sys.argv[2].split()]
            print 'Using SineModel5 with params: ', sineModel5Params

            motionFunction = lambda time: SineModel5().model(
                time, parameters=sineModel5Params)
        elif len(sys.argv) > 2 and sys.argv[1] == '-neatFiltFile':
            raise Exception('not yet')
            filtFile = sys.argv[2]
            currentState = None
        else:
            usage()

        runName = sys.argv[3]

        if len(sys.argv) > 4:
            timeScale = float(sys.argv[4])
        else:
            timeScale = 1
    else:
        usage()

    motionFunctionScaled = scaleTime(motionFunction, timeScale)

    runman = RunManager()

    print 'Run name is:', runName

    for ii in range(1):
        print
        print 'Iteration', ii
        runman.run_function_and_log(motionFunctionScaled,
                                    runSeconds=10,
                                    timeScale=1,
                                    logFilename='log_%s.txt' % runName)
示例#6
0
def main():
    MIN_ARGS = 4
    
    if len(sys.argv) >= MIN_ARGS:
        if len(sys.argv) > 2 and sys.argv[1] == '-SineModel5':
            sineModel5Params = [eval(xx) for xx in sys.argv[2].split()]
            print 'Using SineModel5 with params: ', sineModel5Params
            
            motionFunction = lambda time: SineModel5().model(time,
                                                             parameters = sineModel5Params)
        elif len(sys.argv) > 2 and sys.argv[1] == '-neatFiltFile':
            raise Exception('not yet')
            filtFile = sys.argv[2]
            currentState = None
        else:
            usage()

        runName = sys.argv[3]

        if len(sys.argv) > 4:
            timeScale = float(sys.argv[4])
        else:
            timeScale = 1
    else:
        usage()

    motionFunctionScaled = scaleTime(motionFunction, timeScale)

    runman = RunManager()

    print 'Run name is:', runName
    
    for ii in range(1):
        print
        print 'Iteration', ii
        runman.run_function_and_log(motionFunctionScaled, runSeconds = 10, timeScale = 1, logFilename = 'log_%s.txt' % runName)
from itertools import product
from collections import namedtuple
from collections import OrderedDict

from RunManager import RunManager
from RunBuilder import RunBuilder

from model import MyConvNet

params = OrderedDict(lr=[0.01, 0.001],
                     batch_size=[1000, 10000],
                     shuffle=[True, False],
                     num_workers=[1, 2, 4])

m = RunManager()

device = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

print(f'processing on: {device.type}')
if device.type == 'cuda':
    print(f"Number of GPU(s): {torch.cuda.device_count()}")

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

init_train_data = datasets.CIFAR10('data',
                                   train=True,
示例#8
0
def main():

    print(torch.__version__)
    print(torchvision.__version__)

    train_set, valid_set, test_set = create_datasets()

    device = None
    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'

    networks = {
        '2conv2fc': lambda: Network(),
        '2conv_24_3fc': lambda: Network2(),
        '2conv_48_3fc': lambda: Network3()
    }
    '''
	parameters = OrderedDict(
		#network=['2conv_48_3fc']
		network=['2conv2fc', '2conv_24_3fc', '2conv_48_3fc']
		#network=list(networks.keys())
		, lr=[.01, .001]
		, batch_size=[1000]
		, shuffle=[True]
		, epochs=[10]
		, device=[device]
		, nw = [2]
	)
	'''

    parametersLeNet = OrderedDict(network=['LeNet'],
                                  lr=[.005, .01],
                                  batch_size=[1000],
                                  epochs=[10],
                                  device=[device],
                                  nw=[2],
                                  conv_out=[[16, 32], [24, 48], [32, 64]],
                                  conv_ks=[[3, 3], [3, 5], [5, 5]],
                                  dropout=[0.0, 0.2, 0.5],
                                  lin_out=[[200, 84], [500, 200, 84]],
                                  in_size=[(28, 28)],
                                  out_size=[10]
                                  #, batch_norm=[True]
                                  )

    parametersBiggerLeNet = OrderedDict(network=['BiggerLeNet'],
                                        lr=[.001, .005, .01],
                                        batch_size=[1000],
                                        epochs=[10],
                                        device=[device],
                                        nw=[2],
                                        conv_out=[[16, 32, 64], [32, 64, 128],
                                                  [48, 96, 192]],
                                        conv_ks=[[3, 3, 3]],
                                        dropout=[0.0, 0.2, 0.5],
                                        lin_out=[[200, 84], [512], [200]],
                                        in_size=[(28, 28)],
                                        out_size=[10]
                                        #, batch_norm=[True]
                                        )

    parametersVggLikeNet = OrderedDict(network=['VggLikeNet'],
                                       lr=[.001, .005, .01],
                                       batch_size=[1000],
                                       epochs=[10],
                                       device=[device],
                                       nw=[10],
                                       conv_out=[[[16, 16], [32, 32]],
                                                 [[24, 24], [48, 48]],
                                                 [[32, 32], [64, 64]]],
                                       conv_ks=[[[3, 3], [3, 3]]],
                                       dropout=[0.0, 0.2, 0.5],
                                       lin_out=[[200, 84], [512], [200]],
                                       in_size=[(28, 28)],
                                       out_size=[10]
                                       #, batch_norm=[True]
                                       )

    runs_data = {}
    best_models = None
    experiments = [('LeNet', parametersLeNet),
                   ('BiggerLeNet', parametersBiggerLeNet),
                   ('VggLikeNet', parametersVggLikeNet)]

    for networkName, parameters in experiments.__reversed__():
        #i = 0
        #if i > 0:
        #	break
        #i += 1
        m = RunManager()
        use_batch_norm = True
        for run in RunBuilder.get_runs(parameters):

            print("Run starting:", run)

            random_seed = random.seed()
            valid_split = 0.1
            pin_memory = (run.device != 'cpu')

            train_loader, valid_loader, test_loader = get_train_valid_test_loader(
                train_set, valid_set, test_set, run.batch_size, random_seed,
                valid_split, True, run.nw, pin_memory)

            network = construct_network(run, use_batch_norm)

            print('network.name: :', networkName, ' chosen')
            print("network architecture: \n", network)

            optimizer = optim.Adam(network.parameters(), lr=run.lr)

            runManager_train(runManager=m,
                             run=run,
                             network=network,
                             optimizer=optimizer,
                             train_loader=train_loader,
                             valid_loader=valid_loader,
                             test_loader=None,
                             valid_split=valid_split,
                             names=train_set.classes)

            best_models = sorted(m.best_models, reverse=True)

            best_models_str = "\n".join(
                str(model) for model in best_models[:5])

            runs_data[networkName] = best_models
            m.save(f'results_{networkName}')
            with open(f'best_models_{networkName}.txt', 'w',
                      encoding='utf-8') as f:
                f.write(best_models_str)

    return runs_data
示例#9
0
    for key, value in runs_data.items():
        #best_vgg = runs_data['VggLikeNet'][0]
        best_model = value[0]

        best_run_params = best_model.run_params
        pin_memory = (best_run_params.device != 'cpu')

        train_loader, valid_loader, test_loader = get_train_valid_test_loader_for_final_training(
            train_set, valid_set, test_set, best_run_params.batch_size,
            random_seed, True, best_run_params.nw, pin_memory)

        best_network = construct_network(best_run_params)
        optimizer = optim.Adam(best_network.parameters(),
                               lr=best_run_params.lr)
        runManager = RunManager()

        runManager_final_train(runManager=runManager,
                               run=best_run_params,
                               network=best_network,
                               optimizer=optimizer,
                               train_loader=train_loader,
                               test_loader=test_loader,
                               names=train_set.classes)

        best_final_models = sorted(runManager.best_models, reverse=True)
        print("After final training:\n",
              best_final_models[0].run_params.network, "\n")

        best_models_str = str(best_final_models[0])
示例#10
0
    trainsets = {'not_normal': train_set, 'normal': train_set_normal}

    # 指定训练设备
    if torch.cuda.is_available():
        mydevice = ['cuda']
    else:
        mydevice = ['cpu']
    # determine the value of hyper-parameters
    params = OrderedDict(lr=[.01],
                         batch_size=[100, 1000],
                         shuffle=[True, False],
                         num_workers=[1],
                         device=mydevice,
                         trainset=['not_normal', 'normal'])
    # RunManager instance
    m = RunManager()

    # 三、训练网络
    for run in RunBuilder.get_runs(params):

        device = torch.device(run.device)
        # 生成模型实例
        network = Network().to(device)
        # 指定优化器,learning rate
        optimizer = optim.Adam(network.parameters(), lr=run.lr)
        # batch_size
        data_loader = DataLoader(trainsets[run.trainset],
                                 run.batch_size,
                                 shuffle=run.shuffle,
                                 num_workers=run.num_workers)
        # record hyper-parameters
示例#11
0
def train():

    train_set = torchvision.datasets.FashionMNIST(
        root='./data/FashionMNIST',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor()
        ])
    )

    params = OrderedDict(
        lr=[0.01, 0.001],
        batch_size=[100, 1000],
        shuffle=[True, False],
        num_workers=[2]
    )

    runManager = RunManager()

    for run in RunBuilder.get_runs(params):

        network = finalModel()
        loader = DataLoader(
            train_set, batch_size=run.batch_size, shuffle=run.shuffle, num_workers=run.num_workers)

        optimizer = optim.Adam(network.parameters(), lr=run.lr)

        runManager.begin_run(run, network, loader)

        for epoch in range(10):
            runManager.begin_epoch()
            for batch in loader:

                images, labels = batch
                # support computation based on device type
                images = images.to(get_device_type())
                labels = labels.to(get_device_type())

                preds = network(images)
                loss = F.cross_entropy(preds, labels)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                runManager.track_loss(loss)
                runManager.track_num_correct(preds, labels)

            runManager.end_epoch()

        runManager.end_run()
    runManager.save('results')
    def __train_network(self, model, train_set, run, save_logistics_file_path, epochs, type_of_model, show_plot):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print("-------------------------------------------------------------------", device)
        loss_val = []
        acc_val = []
        batch_size = run.batch_size
        lr = run.lr
        shuffle = run.shuffle

        # set batch size
        data_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=1,
                                                  pin_memory=True)

        save_file_name = save_logistics_file_path + self.__get_file_name(type_of_model, shuffle, lr, batch_size)
        # model = self.__getModel(type_of_model)
        tb_summary = self.__get_tb_summary_title(type_of_model)

        # set optimizer - Adam

        optimizer = optim.Adam(model.parameters(), lr=lr)

        # initialise summary writer
        run_manager = RunManager()

        run_manager.begin_run(run, model, data_loader, device, tb_summary)

        torch.backends.cudnn.enabled = False

        # start training
        for epoch in range(epochs):
            run_manager.begin_epoch()

            for batch in data_loader:
                images, labels = batch
                images = images.to(device)
                labels = labels.to(device)

                # forward propagation
                predictions = model(images)

                loss = F.cross_entropy(predictions, labels)

                # zero out grads for every new iteration
                optimizer.zero_grad()

                # back propagation
                loss.backward()

                # update weights
                # w = w - lr * grad_dw
                optimizer.step()

                run_manager.track_loss(loss)
                run_manager.track_total_correct_per_epoch(predictions, labels)

            run_manager.end_epoch()
            loss_val.append(run_manager.get_final_loss_val())
            acc_val.append(run_manager.get_final_accuracy())

        run_manager.end_run()
        run_manager.save(save_file_name)
        if show_plot:
            self.plot_loss_val(loss_val, run)
            self.plot_accuracy_val(acc_val, run)

        return model