def main():

    import dataloader as dl

    #store MNIST in HDF5
    myMemLoader = dl.InMemDataLoader(dataset="MNIST")
    myMemLoader.storeAsHDF5()
    print("MNIST stored in HDF5")

    #store CIFAR 10
    myMemLoader = dl.InMemDataLoader(dataset="CIFAR10")
    myMemLoader.storeAsHDF5()
    print("CIFAR10 stored in HDF5")

    #STORE CIFAR 100
    #myMemLoader = dl.InMemDataLoader(dataset= "CIFAR100")
    #  myMemLoader.storeAsHDF5()
    #  print("CIFAR100 stored in HDF5")

    #THE CODE BELOW WILL BE USED LATER FOR CNNs
    """
Beispiel #2
0
def main():

    np.random.seed(11)
    torch.manual_seed(11)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    dataset = "ELLIPSE"
    # Assume that we are on a CUDA machine, then this should print a CUDA device:

    print(device)
    #torch.set_printoptions(precision=10)

    dtype = torch.float
    download = True  #False
    input = torch.tensor([[.1, .1], [-.1, -.1], [0, 0.5]], dtype=dtype)
    target = torch.tensor([[.1, .1], [-.1, -.1], [0, 0.5]], dtype=dtype) + 1

    num_features, num_classes = dl.getDims(dataset)

    weights = None  # torch.tensor([[2, 0],[-2, 2]], dtype = dtype)
    bias = None  #torch.tensor([0,0], dtype = dtype)

    begin = 0
    end = 10000
    reg = False

    #-----------hyper parameters
    learn_rate = 0.1
    step = 1.0
    alpha = 0.0001
    epochs = 50
    gpu = False

    N = 4
    batch_size = 256
    error_func = nn.CrossEntropyLoss()

    t_data = time.perf_counter()

    dataloader = dl.InMemDataLoader(dataset)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=True)

    print("data time", time.perf_counter() - t_data)

    res = rn.ResNet(device,
                    N,
                    num_features,
                    num_classes,
                    torch.tanh,
                    F.softmax,
                    gpu=gpu)
    if gpu == True:
        res.to(device)

    train_time = time.perf_counter()
    res.train(loader,
              error_func,
              learn_rate,
              epochs,
              begin,
              end,
              step,
              reg,
              alpha,
              graph=False)
    train_time = time.perf_counter() - train_time

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=False)

    results = res.test(loader, begin, end, step)

    print("results: ", results)
    print("--- %s seconds ---" % (train_time))
Beispiel #3
0
def main(argv):   
    
    #complex net parameters
    M =2   
    
    #---------------training data--------------------          
    dataset_name = "CIFAR10" # choose from MNIST, CIFAR10, CIFAR100, ELLIPSE, SWISS
    choice = 'v'
    conv= True
    gpu = True
    
     #neural net parameters---------------------------------------------------------
    
    weights = None
    bias = None  
    reg_f = True
    reg_c = False   
    alpha_f = 0.0001
    alpha_c = 0.00025
    graph = False
    
    #-----------hyper parameters
    batch_size = 256
    N = 16 #-note coarse model will be 2* this, fine model with be 4* this  
    learn_rate_c = .001
    f_step_c = .1
    learn_rate_f = .001
    f_step_f = .05 #coarse Verlet 64 could use .075
    epochs = 25#0#000  
      
    gamma = 0.02    
    begin = 0
    end = 10000#50#000
    
    #batch_size = 64
    func_f = torch.nn.ReLU()
    func_c = F.softmax    
    error_func = nn.CrossEntropyLoss()         
    
    multilevel = False       
    #------------------------------------------------------------------------------
       
    if len(argv) > 0:
        
        N = int(argv[0])
        epochs = int(argv[1])
        learn_rate_c = float(argv[2])
        f_step_c = float(argv[3])        
        learn_rate_f = float(argv[4])
        f_step_f = float(argv[5])
        choice = argv[6]
        graph = argv[7]
        print("N", N, "epochs", epochs, "lr_c", learn_rate_c, "step_c", f_step_c,
              "lr_f", learn_rate_f,  "step_f", f_step_f, "choice", choice, "graph", graph)      
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    dataloader = dl.InMemDataLoader(dataset_name, conv_sg=conv)
        
    num_features, num_classes, in_channels = dl.getDims(dataset_name)
    
    #load training data                              
    loader = dataloader.getDataLoader(batch_size, shuffle = True, num_workers = 0, pin_memory = True, train = True)     
    
    multilevel = True     
    #------------------------------------------------------------------------------
    
    #init complex network
    complexNet = pa.complexNeuralNetwork(device, M, gpu, conv, in_channels)
    
    #init sub-neural networks
    complexNet.init_nets(N, num_features, num_classes, func_f, func_c, weights, bias,
                         choice, gamma, multilevel)
        
    #init SG modules
    complexNet.init_sgs(num_features=num_features, batch_size=batch_size)  
      
    #train coarse model 
    torch.cuda.synchronize()
    coarse_time = time.time()
    complexNet.train_multi_level(loader, error_func, learn_rate_c, epochs, begin, end
                     ,f_step_c, reg_f, alpha_f, reg_c, alpha_c, graph = False)
    torch.cuda.synchronize()
    coarse_time = time.time() - coarse_time
    
    print("after coarse train")
    coarse_result = complexNet.test(loader, begin = 0, end = 10000, f_step = f_step_c)
    #print("after coarse test")
    
    complexNet.double_complex_net()    
    
    #train model with distributed algorithm
    train_time = complexNet.distTrain(loader, error_func, learn_rate_f, epochs, begin, end
                    ,f_step_f, reg_f, alpha_f, reg_c, alpha_c, graph, False, M) 
        
    result_train = complexNet.test(loader, begin = 0, end = 10000, f_step = f_step_f)
    
    #load test dataset
    loader = dataloader.getDataLoader(batch_size, shuffle = False, num_workers = 0, pin_memory = False, train = False)
    
    
    result_test = complexNet.test(loader, begin = 0, end = 10000, f_step = f_step_f)
    
    print ("coarse train results", coarse_result, "\n")
    print ("coarse time", coarse_time, "\n")    
    
    print("fine train result", result_train, "\n")
    print("fine test result", result_test, "\n")
    
           
    print("total time", train_time + coarse_time)        
Beispiel #4
0
def main():
    """
    The training algorithm incorporates synthetic gradients and trains the sub neural networks
    in series. It can be used to calculate theoretical speed-ups factors for the training time.
    """

    #complex net parameters
    M = 2
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    #---------------training data--------------------

    np.random.seed(11)
    torch.manual_seed(11)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    dataset_name = "MNIST"  # choose from MNIST, CIFAR10, CIFAR100, ELLIPSE, SWISS
    choice = 'r'
    conv = True
    gpu = True

    #neural net parameters---------------------------------------------------------

    weights = None
    bias = None
    reg_f = False
    reg_c = False
    alpha_f = 0.001
    alpha_c = 0.00025
    graph = True

    #-----------hyper parameters
    batch_size = 256
    N = 2  #2#64#-note  model will be 2* this
    learn_rate = 0.025
    f_step = .025
    epochs = 10  #10#00

    gamma = 0.02
    begin = 0
    end = 10000  #50#000

    #batch_size = 64
    func_f = torch.tanh
    func_c = F.softmax
    error_func = nn.CrossEntropyLoss()

    dataloader = dl.InMemDataLoader(dataset_name, conv_sg=conv)

    num_features, num_classes, in_channels = dl.getDims(dataset_name)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=True,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=True)

    multilevel = False
    #------------------------------------------------------------------------------

    #init complex network
    complexNet = pa.complexNeuralNetwork(device, M, gpu, conv, in_channels)

    #init sub-neural networks
    complexNet.init_nets(N, num_features, num_classes, func_f, func_c, weights,
                         bias, choice, gamma, multilevel)

    #init SG modules
    complexNet.init_sgs(num_features=num_features, batch_size=batch_size)

    #train_network
    torch.cuda.synchronize()
    start_time = time.perf_counter()
    train_time = complexNet.train(loader, error_func, learn_rate, epochs,
                                  begin, end, f_step, reg_f, alpha_f, reg_c,
                                  alpha_c, graph)
    torch.cuda.synchronize()
    end_time = time.perf_counter() - start_time

    print("total time in series:", end_time)
    #During training, each epoch we see the loss and mse for synthetic gradient

    result_train = complexNet.test(loader, begin=0, end=10000, f_step=f_step)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=True,
                                      num_workers=0,
                                      pin_memory=False,
                                      train=False)

    result_test = complexNet.test(loader, begin=0, end=10000, f_step=f_step)

    print("fine train result", result_train, "\n")
    print("fine test result", result_test, "\n")

    #theoretical time is the training time using the lowest on each batch of training
    print("Total time:", train_time[0], "\ntheoretical time:", train_time[1])
    print("Batch load time adjusted speed up", train_time[3])
Beispiel #5
0
def main():

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # Assume that we are on a CUDA machine, then this should print a CUDA device:
    print(device)

    #set the flag for the database you want equal to trye
    conv = False  #
    dataset_name = "ELLIPSE"  #choose from MNIST, CIFAR10, CIFAR100, ELLIPSE, SWISS
    gpu = False

    #architecture choices
    num_proc = 2
    N = 4
    num_features = 2
    num_classes = 2
    batch_size = 8

    learn_rate = 0.1
    iterations = 10  #00 # max number of iterations of PVD algorithm
    epochs = 100  # epochs that each process will train for in each iteration of the PVD

    func_f = torch.tanh
    func_c = F.softmax
    weights = None
    bias = None
    error_func = nn.CrossEntropyLoss()

    choice = 'v'
    gamma = 0.1

    begin = 0
    end = 100
    f_step = 0.5

    reg_f = False
    alpha_f = 0.01
    reg_c = False
    alpha_c = 0.01
    graph = False
    acc = 85

    dataloader = dl.InMemDataLoader(dataset_name,
                                    conv_sg=False)  # only uses FCN layers

    num_features, num_classes, in_channels = dl.getDims(dataset_name)

    #load training dataset
    trainloader = dataloader.getDataLoader(batch_size,
                                           shuffle=True,
                                           num_workers=0,
                                           pin_memory=True,
                                           train=True)
    testloader = dataloader.getDataLoader(batch_size,
                                          shuffle=False,
                                          num_workers=0,
                                          pin_memory=False,
                                          train=False)

    pvd = PVD(device, num_proc)
    pvd.set_models(N, num_features, num_classes, func_f, func_c, weights, bias,
                   gpu, choice, gamma)

    torch.cuda.synchronize()
    start_time = time.perf_counter()

    timer = pvd.pvd_algo(iterations, trainloader, testloader, error_func,
                         learn_rate, epochs, begin, end, f_step, reg_f,
                         alpha_f, reg_c, alpha_c, graph, acc)

    torch.cuda.synchronize()
    print("total time:", time.perf_counter() - start_time)
    print("theoretical distributed time:", timer)
    torch.cuda.synchronize()
    #print("time_old: ", (time.time()-start_time)/num_proc)
    result = pvd.models[0].test(testloader, begin, end, f_step)
    print("test accuracy result:", result)
def main():

    #complex net parameters
    M = 2
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    torch.manual_seed(11)
    np.random.seed(11)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    gpu = True
    conv = True
    dataset_name = "MNIST"
    choice = 'r'

    multilevel = True

    #neural net parameters---------------------------------------------------------
    weights = None
    bias = None
    reg_f = False
    reg_c = False
    graph = True

    #-----------hyper parameters
    batch_size = 256
    #-note coarse model will be 2* this, fine model with be 4* this
    N = 2
    learn_rate_c = .1
    f_step_c = .1
    learn_rate_f = .05
    f_step_f = .05
    epochs = 1  #5#10#0

    # 0.00005
    alpha_f = 0.001
    alpha_c = 0.00025
    gamma = 0.05

    begin = 0
    end = 1  #0000

    # choose from MNIST, CIFAR10, CIFAR100, ELLIPSE, SWISS

    dataloader = dl.InMemDataLoader(dataset_name)

    num_features, num_classes, in_channels = dl.getDims(dataset_name)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=True,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=True)

    #batch_size = 64
    func_f = torch.tanh
    func_c = F.softmax
    error_func = nn.CrossEntropyLoss()
    #------------------------------------------------------------------------------

    #init complex network
    complexNet = pa.complexNeuralNetwork(device, M, gpu, conv, in_channels)
    #init sub-neural networks
    complexNet.init_nets(N, num_features, num_classes, func_f, func_c, weights,
                         bias, choice, gamma, multilevel)

    #init SG modules
    complexNet.init_sgs(num_features=num_features, batch_size=batch_size)
    print(
        "-------------------------------- training coarse model ------------------------"
    )
    #train coarse model
    torch.cuda.synchronize()
    coarse_time = time.time()
    complexNet.train_multi_level(loader,
                                 error_func,
                                 learn_rate_c,
                                 epochs,
                                 begin,
                                 end,
                                 f_step_c,
                                 reg_f,
                                 alpha_f,
                                 reg_c,
                                 alpha_c,
                                 graph=False)
    torch.cuda.synchronize()
    coarse_time = time.time() - coarse_time

    coarse_result = complexNet.test(loader,
                                    begin=0,
                                    end=10000,
                                    f_step=f_step_c)

    complexNet.double_complex_net()

    #train fine model
    print(
        "-------------------------------- training fine model ------------------------"
    )
    torch.cuda.synchronize()
    start_time = time.perf_counter()
    train_time = complexNet.train(loader, error_func, learn_rate_f, epochs,
                                  begin, end, f_step_f, reg_f, alpha_f, reg_c,
                                  alpha_c, graph)
    torch.cuda.synchronize()
    end_time = time.perf_counter() - start_time

    print("coarse train results", coarse_result, "\n")
    print("coarse time", coarse_time, "\n")

    print("\ntotal time in series:", end_time)
    #During training, each epoch we see the loss and mse for synthetic gradient

    result_train = complexNet.test(loader, begin=0, end=10000, f_step=f_step_f)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=False,
                                      train=False)

    result_test = complexNet.test(loader, begin=0, end=10000, f_step=f_step_f)

    print("fine train result", result_train, "\n")
    print("fine test result", result_test, "\n")

    print("Total time:", train_time[0], "\ntheoretical time:", train_time[1])
    print("Batch load time adjusted speed up", train_time[3])

    print("\n--------------------- Total theoretical time: ",
          coarse_time + train_time[1], "--------------------------")
Beispiel #7
0
def main(argv):

    #complex net parameters
    M = 2
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #---------------training data--------------------

    dataset_name = "CIFAR10"  # choose from MNIST, CIFAR10, CIFAR100, ELLIPSE, SWISS
    choice = 'v'
    conv = True
    gpu = True

    #neural net parameters---------------------------------------------------------

    weights = None
    bias = None
    reg_f = True
    reg_c = False
    alpha_f = 0.0001
    alpha_c = 0.00025
    graph = False

    #-----------hyper parameters
    batch_size = 256
    N = 32  #32#128#56#-note  model will be 2* this
    learn_rate = 0.001
    f_step = .05
    epochs = 50  #10#000

    gamma = 0.02
    begin = 0
    end = 10000  #50#000

    #batch_size = 64
    func_f = torch.nn.ReLU()
    func_c = F.softmax
    error_func = nn.CrossEntropyLoss()

    multilevel = False
    #------------------------------------------------------------------------------

    if len(argv) > 0:
        #print(argv)
        N = int(argv[0])
        epochs = int(argv[1])
        learn_rate = float(argv[2])
        step = float(argv[3])
        choice = argv[4]
        graph = argv[5]
        print("N", N, "epochs", epochs, "lr", learn_rate, "step", step,
              "choice", choice, "graph", graph)

    dataloader = dl.InMemDataLoader(dataset_name, conv_sg=conv)

    num_features, num_classes, in_channels = dl.getDims(dataset_name)

    #load training dataset
    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=True,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=True)

    multilevel = False
    #------------------------------------------------------------------------------

    #init complex network
    complexNet = pa.complexNeuralNetwork(device, M, gpu, conv, in_channels)

    #init sub-neural networks
    complexNet.init_nets(N, num_features, num_classes, func_f, func_c, weights,
                         bias, choice, gamma, multilevel)

    #init SG modules
    complexNet.init_sgs(num_features=num_features, batch_size=batch_size)

    #accBefore = complexNet.test(loader, begin = 0, end = 10000, f_step = f_step)

    #train model with distributed algorithm
    train_time = complexNet.distTrain(loader, error_func, learn_rate, epochs,
                                      begin, end, f_step, reg_f, alpha_f,
                                      reg_c, alpha_c, graph, False, M)

    print("survived training")

    # print("-------------------------------------out of function---------------------------:", complexNet.getFirstNetParams())

    result_train = complexNet.test(loader, begin=0, end=10000, f_step=f_step)

    #load test dataset
    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=False,
                                      train=False)

    result_test = complexNet.test(loader, begin=0, end=10000, f_step=f_step)

    print("fine train result", result_train, "\n")
    print("fine test result", result_test, "\n")

    print("train time", train_time)
Beispiel #8
0
def main(argv):
    #torch.set_num_threads(2)
    #preliminaires
    np.random.seed(11)
    torch.manual_seed(11)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    dataset_name = "CIFAR10"  # choose from MNIST, CIFAR10, CIFAR100, ELLIPSE, SWISS
    #choose model
    choice = "v"  # "v"
    gpu = True
    conv = True

    batch_size = 256  #1024
    #hyper parameters
    N = 64
    learn_rate = 0.001  #0.05
    step = .01
    epochs = 50  #20#0#50
    begin = 0
    end = 10000
    reg_f = True
    reg_c = False
    graph = False
    #0.059..

    alpha_f = 0.0001
    alpha_c = 0.01

    error_func = nn.CrossEntropyLoss()
    func_f = torch.nn.ReLU()

    func_c = F.softmax
    #load trainset

    if len(argv) > 0:
        #print(argv)
        N = int(argv[0])
        epochs = int(argv[1])
        learn_rate = float(argv[2])
        step = float(argv[3])
        choice = argv[4]
        graph = argv[5]
        print("N", N, "epochs", epochs, "lr", learn_rate, "step", step,
              "choice", choice, "graph", graph)

    model = chooseModel(dataset_name,
                        device,
                        N,
                        func_f,
                        func_c,
                        gpu,
                        choice,
                        conv=conv,
                        first=True)

    dataloader = dl.InMemDataLoader(dataset_name)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=True,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=True)
    #train
    if gpu == True:
        model.to(device)
        torch.cuda.synchronize()
    train_time = time.perf_counter()
    model.train(loader, error_func, learn_rate, epochs, begin, end, step,
                reg_f, alpha_f, reg_c, alpha_c, graph)
    torch.cuda.synchronize()
    train_time = time.perf_counter() - train_time

    result_train = model.test(loader, begin=0, end=10000, f_step=step)

    #load testset
    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=False,
                                      train=False)
    #test
    result_test = model.test(loader, begin=0, end=10000, f_step=step)

    print("\nfine train result", result_train)
    print("fine test result", result_test, "\n")

    print("--- %s seconds ---" % (train_time))
def main():

    np.random.seed(11)
    torch.manual_seed(11)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    dataloader = dl.InMemDataLoader(dataset_name, conv_sg=conv)

    num_features, num_classes, in_channels = dl.getDims(dataset_name)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=True)

    multilevel = False
    #------------------------------------------------------------------------------
    #-------------------sg parameters--------------------------
    sg_func = syn.sgLoss
    sg_loss = nn.MSELoss
    #initial optimisation parameters for sg modules
    #sg_args = [torch.rand(size=(1,num_features)), torch.rand(size=(3,1)), torch.rand((1))]

    #init complex network
    complexNet = pa.complexNeuralNetwork(device, M, gpu, conv, in_channels)

    #init sub-neural networks
    complexNet.init_nets(N, num_features, num_classes, func_f, func_c, weights,
                         bias, choice, gamma, multilevel)

    #init SG modules
    complexNet.init_sgs(sg_func,
                        sg_loss,
                        num_features=num_features,
                        batch_size=batch_size)

    #accBefore = complexNet.test(loader, begin = 0, end = 10000, f_step = f_step)

    #train_network
    #torch.cuda.synchronize()
    #start_time = time.perf_counter()
    train_time = complexNet.distTrain(loader, error_func, learn_rate, epochs,
                                      begin, end, f_step, reg_f, alpha_f,
                                      reg_c, alpha_c, graph, False, M)

    accAfter = complexNet.test(loader, begin=0, end=10000, f_step=f_step)

    print("accBefore", accBefore)
    print("accAfter", accAfter)
    #torch.cuda.synchronize()
    #end_time = time.perf_counter() - start_time

    #print("total time in series:" , end_time)
    #During training, each epoch we see the loss and mse for synthetic gradient

    result_train = complexNet.test(loader, begin=0, end=10000, f_step=f_step)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=False,
                                      train=False)

    result_test = complexNet.test(loader, begin=0, end=10000, f_step=f_step)

    print("fine train result", result_train, "\n")
    print("fine test result", result_test, "\n")