Beispiel #1
0
def test_different_layer_architectures(train_data_tuples, valid_data_tuples,
                                       test_data_tuples):
    network_no_hl = Networks.Network([784, 10], [None, sigmoid],
                                     test_data_tuples)
    hl_nodes = [0]
    val_accuracies = []
    train_accuracies = []
    test_accuracies = []

    validation_accuracy, training_accuracy, test_accuracy = network_no_hl.training(
        train_data_tuples, 5, 1, 1.0, valid_data_tuples, 0)
    val_accuracies.append(np.mean(validation_accuracy))
    test_accuracies.append(np.mean(test_accuracy))
    train_accuracies.append(np.mean(training_accuracy))

    for hl in range(0, 25, 5):
        if hl == 0:
            continue
        network = Networks.Network([784, hl, 10], [None, sigmoid, sigmoid],
                                   test_data_tuples)
        validation_accuracy, training_accuracy, test_acc = network.training(
            train_data_tuples, 5, 1, 1.0, valid_data_tuples, 0)
        val_accuracies.append(np.mean(validation_accuracy))
        train_accuracies.append(np.mean(training_accuracy))
        test_accuracies.append(np.mean(test_acc))
        hl_nodes.append(hl)

    print('HL nodes', hl_nodes)
    print('Val Accuracy performance', val_accuracies)
    print('Train Accuracy performance', train_accuracies)
    print('Test Accuracy performance', test_accuracies)
Beispiel #2
0
def get_Solvers():
    '''
    此函数用于得到三个Solver:特征提取的Solver,SVM预测分类的solver,Reg_Box预测框回归的Solver
    
    :return: 
    '''
    weight_outputs = ['train_alexnet', 'fineturn', 'SVM_model', 'Reg_box']
    for weight_output in weight_outputs:
        output_path = os.path.join(cfg.Out_put, weight_output)
        if not os.path.exists(output_path):
            os.makedirs(output_path)

    if len(os.listdir(r'./output/train_alexnet')) == 0:
        Train_alexnet = tf.Graph()
        with Train_alexnet.as_default():
            Train_alexnet_data = process_data.Train_Alexnet_Data()
            Train_alexnet_net = Networks.Alexnet_Net(is_training=True, is_fineturn=False, is_SVM=False)
            Train_alexnet_solver = Solver(Train_alexnet_net, Train_alexnet_data, is_training=True, is_fineturn=False, is_Reg=False)
            Train_alexnet_solver.train()

    if len(os.listdir(r'./output/fineturn')) == 0:
        Fineturn = tf.Graph()
        with Fineturn.as_default():
            Fineturn_data = process_data.FineTun_And_Predict_Data()
            Fineturn_net = Networks.Alexnet_Net(is_training=True, is_fineturn=True, is_SVM=False)
            Fineturn_solver = Solver(Fineturn_net, Fineturn_data, is_training=True, is_fineturn=True, is_Reg=False)
            Fineturn_solver.train()

    Features = tf.Graph()
    with Features.as_default():
        Features_net = Networks.Alexnet_Net(is_training=False, is_fineturn=True, is_SVM=True)
        Features_solver = Solver(Features_net, None, is_training=False, is_fineturn=True, is_Reg=False)
        Features_data = process_data.FineTun_And_Predict_Data(Features_solver, is_svm=True, is_save=True)

    svms = []
    if len(os.listdir(r'./output/SVM_model')) == 0:
        SVM_net = Networks.SVM(Features_data)
        SVM_net.train()
    for file in os.listdir(r'./output/SVM_model'):
        svms.append(joblib.load(os.path.join('./output/SVM_model', file)))

    Reg_box = tf.Graph()
    with Reg_box.as_default():
        Reg_box_data = Features_data
        Reg_box_net = Networks.Reg_Net(is_training=True)
        if len(os.listdir(r'./output/Reg_box')) == 0:
            Reg_box_solver = Solver(Reg_box_net, Reg_box_data, is_training=True, is_fineturn=False, is_Reg=True)
            Reg_box_solver.train()
        else:
            Reg_box_solver = Solver(Reg_box_net, Reg_box_data, is_training=False, is_fineturn=False, is_Reg=True)

    return Features_solver, svms, Reg_box_solver
Beispiel #3
0
def main():
    training_data, validation_data, test_data = database_loader.load_data()

    # -- Data Preparation ---
    train_data = list(zip(*training_data))
    x, y = train_data
    # list of (input, target) training samples
    train_data_tuples = list(zip(x, y))

    test_data = list(zip(*test_data))
    x_test, y_test = test_data
    # list of (input, target) training samples
    test_data_tuples = list(zip(x_test, y_test))

    valid_data = list(zip(*validation_data))
    x_val, y_val = valid_data
    # list of (input, target) training samples
    valid_data_tuples = list(zip(x_val, y_val))

    # --- EXPERIMENTS ---
    test_different_layer_architectures(train_data_tuples, valid_data_tuples,
                                       test_data_tuples)
    baseline_network = Networks.Network([784, 20, 10],
                                        [None, sigmoid, sigmoid],
                                        test_data_tuples)

    learning_rate_search(baseline_network, train_data_tuples,
                         valid_data_tuples)
    batch_size_search(baseline_network, train_data_tuples, valid_data_tuples)
    test_regularization_score(baseline_network, train_data_tuples,
                              valid_data_tuples)

    activation_functions_search(baseline_network, train_data_tuples,
                                valid_data_tuples, test_data_tuples)
    advanced_test_different_layer_depth_architectures(train_data_tuples,
                                                      valid_data_tuples,
                                                      test_data_tuples)
    advanced_test_different_layer_width_architectures(train_data_tuples,
                                                      valid_data_tuples,
                                                      test_data_tuples)

    test_baseline_fine_tuned_networks(train_data_tuples, valid_data_tuples,
                                      test_data_tuples)
    # test higher width and depth in network
    deeper_network = Networks.Network([784, 50, 50, 10],
                                      [None, sigmoid, relu, sigmoid],
                                      test_data_tuples)
    validation_accuracy, training_accuracy, test_acc = deeper_network.training(
        train_data_tuples, 30, 40, 5.0, valid_data_tuples, 30)
    print('Validation', np.mean(validation_accuracy), validation_accuracy)
    print('Training', np.mean(training_accuracy), training_accuracy)
    print('Testing', np.mean(np.mean(test_acc)), test_acc)
Beispiel #4
0
def TestBetweennessSimple():
    """
    Makes a simple graph for which one can calculate the betweenness by 
    hand, to check your algorithm.
    """
    g = Networks.UndirectedGraph()
    g.AddEdge(0,1)
    g.AddEdge(0,2)
    g.AddEdge(1,3)
    g.AddEdge(2,3)
    g.AddEdge(3,4)
    edgeBt, nodeBt = Networks.EdgeAndNodeBetweenness(g)
    return g, edgeBt, nodeBt
Beispiel #5
0
def PlotLogLogBeta(L=10, pc=0.5, n=10, repeats=1):
    p = pc + 2.**numpy.arange(-n, 0.1)
    # Storage for totals
    #note: with numpy1.0, default type for zeros is float,
    #      and numpy.Float is no longer defined.
    #      float conversion in array constructors below not necessary
    #      since float is default, but included for illustration
    Ptot_site = numpy.zeros(len(p), float)
    P2tot_site = numpy.zeros(len(p), float)
    Ptot_bond = numpy.zeros(len(p), float)
    P2tot_bond = numpy.zeros(len(p), float)
    # Iterate over repeats
    for r in xrange(repeats):
        # Site percolation
        P = numpy.zeros(len(p))
        for i in xrange(len(p)):
            g = MakeTriangularSitePercolation(L, p[i])
            cl = Networks.FindAllClusters(g)
            P[i] = len(cl[0])
        Ptot_site += P
        P2tot_site += (P * P)
        # Bond percolation
        P = numpy.zeros(len(p))
        for i in xrange(len(p)):
            g = MakeSquareBondPercolation(L, p[i])
            cl = Networks.FindAllClusters(g)
            P[i] = len(cl[0])
        Ptot_bond += P
        P2tot_bond += (P * P)
    # Multiplot needs dictionary of data to plot: labels are curve names
    pmpc = dict([(name, p - pc) for name in ['site', 'bond', 'power']])
    Pbar = {}
    Pbar['site'] = Ptot_site / repeats
    Pbar['bond'] = Ptot_bond / repeats
    Pbar['power'] = L**(91. / 48.) * (p - pc)**(5. / 36.)
    Psig = {}
    if repeats > 1:
        Psig['site'] = numpy.sqrt(
            ((P2tot_site / repeats) - Pbar['site']**2) / (repeats - 1))
        Psig['bond'] = numpy.sqrt(
            ((P2tot_bond / repeats) - Pbar['bond']**2) / (repeats - 1))
        Psig['power'] = 0. * p
    # Plot!
    MultiPlot.MultiPlot(pmpc,
                        Pbar,
                        xlabel='p-pc',
                        ylabel='P',
                        yerrdata=Psig,
                        log='xy',
                        loc='lower left',
                        showIt=True)
Beispiel #6
0
def setup(args: argparse.Namespace):
    device = torch.device("cpu") if (not torch.cuda.is_available() or args.cpu) else torch.device("cuda")

    net = getattr(Networks, args.network)(len(args.in_channels), args.n_class, in_channels_lower=len(args.lower_in_channels), extractor_net=args.extractor_net, TriD=args.in3D)
    net = net.float()
    to_init = Networks.weights_init(args.network)
    net.apply(to_init)

    start_epoch = 0
    optimizer =  optim.Adam(net.parameters(), lr=args.l_rate, betas=(0.9, 0.99), amsgrad=False)
    if args.restore_from!="":
        #restore pretrained and saved network:
        loaded = torch.load(args.restore_from)
        #assert loaded['name'] == args.network
        net.load_state_dict(loaded['state_dict'])
        optimizer.load_state_dict(loaded['optimizer'])
        start_epoch = loaded['epoch']

        # now individually transfer the optimizer parts...
        for state in optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(device)
    
    model = net.to(device)
    loss = MultiTaskLoss(args.losses, device, in3d=args.in3D)
    train_loader, val_loader = get_loaders(args.network, args.dataset, args.n_class, args.in_channels, args.lower_in_channels, args.batch_size, args.debug)

    return model, optimizer, loss, train_loader, val_loader, device, start_epoch
Beispiel #7
0
def MakeTriangularSitePercolation(L, p):
    """Triangular lattice is implemented by bonds to neighbors separated
    by [0,1], [1,0], [-1, 1] and their negatives, so we need an edge
    connecting [i,j] to [i,j+1], [i+1,j], [i-1,j+1], for each point
    in the lattice, modulo the lattice size L.
    Either
    (a) add one node at a time, and fill in all the neighbors, or
    (b) use numpy.random.random((L,L)) to
        fill a whole matrix at once to determine which sites are occupied;
      (i) add nodes and edges as appropriate, or
      (ii) dispense with the dictionary and write GetNodes() and
      GetNeighbors functions directly from the array
    Check your answer using
    NetGraphics.DrawTriangularNetworkSites(g, cl)
    (For small L, the graphics may cut off your graph early: use
    NetGraphics.DrawTriangularNetworkSites(g, cl, L) to fix this)
    """
    g = Networks.UndirectedGraph()
    nbrs = [[0, 1], [0, -1], [1, 0], [-1, 0], [1, -1], [-1, 1]]
    for i in range(L):
        for j in range(L):
            if random.random() < p:
                g.AddNode((i, j))
                for nbr in nbrs:
                    site = ((i + nbr[0]) % L, (j + nbr[1]) % L)
                    if g.HasNode(site):
                        g.AddEdge((i, j), site)
    return g
Beispiel #8
0
def Percolate(graph, deltas):
    sizes = []
    for delta in deltas:
        g = RandomPrune(graph, delta)
        cl = Networks.FindAllClusters(g)
        sizes.append(len(cl[0]))
    return numpy.array(sizes)
Beispiel #9
0
def get_model(model_name):
    try:
        pickle_in = open("{}_model.pickle".format(model_name), 'rb')
        model = pickle.load(pickle_in)

        pickle_in = open("{}_optimizer.pickle".format(model_name), 'rb')
        optimizer = pickle.load(pickle_in)

        pickle_in = open("{}_results.pickle".format(model_name), 'rb')
        results = pickle.load(pickle_in)
    except FileNotFoundError:
        # Standard classifier that uses softmax_cross_entropy as loss function
        model = Classifier(Networks.Convolutional())
        optimizer = optimizers.SGD()
        optimizer.setup(model)
        print("Model not found! Starting training ...")
        results = train_network(model, optimizer)
        with open('{}_model.pickle'.format(model_name), 'wb') as f:
            pickle.dump(model, f)
        with open('{}_optimizer.pickle'.format(model_name), 'wb') as f:
            pickle.dump(optimizer, f)
        with open('{}_results.pickle'.format(model_name), 'wb') as f:
            pickle.dump(results, f)

    return model, optimizer, results
Beispiel #10
0
def train(bit_model, input_shape, classes, x_train, x_test, y_train, y_test,
          batch_size):
    """
    Train each model within the generation 

    return 
    [bit_model, acc, model]
    bit_model       String of binary 1/0 for offspring generation 
    Acc             Accuracy of the model for fitness calculation 
    Model           Keras model for saving the json file 
    """
    optimizer = 'adam'  #Can be optimized wi, x_train, x_test, y_train, y_testth GA
    cost = 'mse'
    epochs = 100

    #Find out how to use the learning rate
    num_hidden_layers, num_nodes_per_layer, learning_rate, activation_function_per_layer, dropout_rate, used = TOOLS.ANN_bit_to_model(
        bit_model, MAX_NUM_HIDDEN_LAYERS, MAX_NUM_NODES, classes)

    network = Networks.Model()
    model = network.create_architecture(num_hidden_layers, num_nodes_per_layer,
                                        activation_function_per_layer,
                                        dropout_rate, input_shape, optimizer,
                                        cost)
    acc = network.train(model, x_train, x_test, y_train, y_test, batch_size,
                        epochs)
    #print(model)
    #model.save_weights("model.h5")
    #print("Saved model to disk")
    #exit()
    return [bit_model, acc, model]
Beispiel #11
0
def RandomPrune(graph, delta):
    g = Networks.UndirectedGraph()
    for node1 in graph.GetNodes():
        for node2 in graph.GetNeighbors(node1):
            if random.random() < delta:
                g.AddEdge(node1, node2)
    return g
Beispiel #12
0
def Program(nMax=100, GRAPHICS=False):
    primes = Primes.MakePrimeList(nMax)
    primeGraph = Networks.UndirectedGraph()

    for p in primes:
        primeGraph.AddNode(p)
        for p2 in primeGraph.GetNodes():
            if Primes.isPrime(concatenateIntegers(p, p2), primes):
                if Primes.isPrime(concatenateIntegers(p2, p), primes):
                    primeGraph.AddEdge(p, p2)

    for p1 in primes:
        possibles = primeGraph.GetNeighbors(p1)
        for p2 in possibles:
            possibles2 = listIntersection(possibles,
                                          primeGraph.GetNeighbors(p2))
            for p3 in possibles2:
                possibles3 = listIntersection(possibles2,
                                              primeGraph.GetNeighbors(p3))
                for p4 in possibles3:
                    possibles4 = listIntersection(possibles3,
                                                  primeGraph.GetNeighbors(p4))
                    for p5 in possibles4:
                        print(p1, p2, p3, p4, p5), p1 + p2 + p3 + p4 + p5

    if GRAPHICS:
        NetGraphics.DisplayCircleGraph(primeGraph)

    return primeGraph
Beispiel #13
0
def PrepareMaskedMLP(data, myseed, initializer, activation, masktype, trainW,
                     trainM, p1, alpha):
    dense_arch = [data[0].shape[-1], 300, 100, data[-1]]
    network = Networks.makeMaskedMLP(dense_arch, activation, myseed,
                                     initializer, masktype, trainW, trainM, p1,
                                     alpha)
    return network
Beispiel #14
0
def PlotWattsStrogatzFig2(L, Z, numTries,
                          parray=10.**numpy.arange(-4, 0.001, 0.25)):
    """Duplicate Watts and Strogatz Figure 2: rescale vertical axes"""
    clustering, csigmas = GetClustering_vs_p(L, Z, numTries, parray)
    g = MakeSmallWorldNetwork(L,Z,0)
    c0 = (Networks.ComputeClusteringCoefficient(g))
    ell0 = Networks.FindAveragePathLength(g)
    pathlengths, psigmas = GetPathLength_vs_p(L, Z, numTries, parray)
    if numTries>0:
        pylab.errorbar(parray, clustering/c0, yerr=csigmas/c0)
        pylab.errorbar(parray, pathlengths/ell0, yerr=psigmas/ell0)
    else:
        pylab.plot(parray, clustering/c0)
        pylab.plot(parray, pathlengths/ell0)
    pylab.semilogx()
    pylab.show()
 def __init__(self, n_0, average_degreee_0, n, m, degree_only=False):
     
     # Define os principais parâmetros
     self.n_0 = n_0
     self.average_degreee_0 = average_degreee_0
     self.n = n
     self.m = m
     
     # Cria grafo inicial como uma rede de Erdos-Renyi
     self.G = Networks.ErdosRenyi_network(self.n_0, self.average_degreee_0).G
     new_edge = np.zeros(self.m)
     
     # Adiciona novo nó
     for j in range(1, self.n-self.n_0+1):
         
         # print(str(j)+'/'+str(self.n-self.n_0))
         self.G.add_node(self.n_0+j)
         
         # Obtem nós com seus respectivos graus
         possible_edges = np.array(self.G.degree, dtype=float)[:,]
         # Normaliza
         possible_edges[:,1] = possible_edges[:,1]/np.sum(possible_edges[:,1])
         
         # Realiza nova conexão
         for i in range(self.m):
             # Escolhe conexão
             new_edge[i] = np.random.choice(possible_edges[:,0], p=possible_edges[:,1])
             # print(new_edge[i])
             
             # Elimina nó escolhido
             possible_edges = np.delete(possible_edges, np.where(possible_edges[:,0] == new_edge[i]), axis=0)
             
             # Renormaliza
             possible_edges[:,1] = possible_edges[:,1]/np.sum(possible_edges[:,1])
             
             self.G.add_edge(self.n_0+j, new_edge[i])
    
     # Obtem a média e o desvio padrão do grau
     self.degree = np.array(sorted([d for n, d in self.G.degree()], reverse=True))
     self.degree_mu = self.degree.mean()
     self.degree_sigma = self.degree.std()
     
     if not degree_only:
         # Obtem a média e o desvio padrão do coeficiente de aglomeração
         self.clustering_coefficient = np.array(sorted([nx.clustering(self.G,n) for n in nx.clustering(self.G)],
                                                       reverse=True))
         self.clustering_coefficient_mu = self.clustering_coefficient.mean()
         self.clustering_coefficient_sigma = self.clustering_coefficient.std()
         
         # Obtem a média e o desvio padrão do caminho mínimo para todos os pares de pontos
         # através do método de Floyd-Warshall
         fw_aux = np.asarray(nx.floyd_warshall_numpy(self.G)).reshape(-1)
         self.floyd_warshall = np.array(np.delete(fw_aux, np.where(np.logical_or(fw_aux == 0, fw_aux == float('inf')))), dtype=int)   
         self.shortest_path_length_mu = self.floyd_warshall.mean()
         self.shortest_path_length_sigma = self.floyd_warshall.std()
     
     #Identificador único do grafo gerado
     self.dt_string = datetime.now().strftime("_%d-%m-%Y_%H-%M-%S")
     self.filename = 'img/Barabasi-Albert'+'_n='+str(self.n)+'_m='+str(self.m)+self.dt_string
Beispiel #16
0
def useTriplet(DS_Train,digitData,parameters):
    tripnet = Networks.Network("triplet",digitData)
    print("training triplet")
    results = tripnet.train_custom(DS_Train,parameters)
    saveString = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
    saveString = "triplet" + saveString
    print("saving model to: " , saveString)
    torch.save(tripnet,saveString)
    return results
Beispiel #17
0
def advanced_test_different_layer_depth_architectures(train_data_tuples,
                                                      valid_data_tuples,
                                                      test_data_tuples):
    layers = [1, 2, 3, 4, 5]
    hidden_units = [15]  # , 50, 150, 500, 1000]

    val_statistics = np.zeros(shape=(len(layers), len(hidden_units)))
    train_statistics = np.zeros(shape=(len(layers), len(hidden_units)))
    test_statistics = np.zeros(shape=(len(layers), len(hidden_units)))
    total_seconds = []

    for hidden_layer_size in layers:
        for nr_hidden_units in hidden_units:
            network_structure = np.empty(shape=(hidden_layer_size + 2),
                                         dtype=int)
            activations = np.empty(shape=(hidden_layer_size + 2), dtype=object)
            # print(activations)
            activations[0] = None
            activations[-1] = Networks.sigmoid
            network_structure[0] = 784
            network_structure[-1] = 10

            for layer in range(1, len(network_structure) - 1):
                network_structure[layer] = nr_hidden_units
                activations[layer] = Networks.sigmoid

            final_network_architecture = list(network_structure)

            network = Networks.Network(final_network_architecture, activations,
                                       test_data_tuples)

            start = time.time()
            validation_accuracy, training_accuracy, test_accuracy = network.training(
                train_data_tuples, 40, 50, 10.0, valid_data_tuples, 20)
            end = time.time()
            mins, secs, total_sec = print_time_output(start, end)
            total_seconds.append(total_sec)

            hl_size_case = layers.index(hidden_layer_size)
            nr_units_case = hidden_units.index(nr_hidden_units)
            val_statistics[hl_size_case][nr_units_case] = np.mean(
                validation_accuracy)
            train_statistics[hl_size_case][nr_units_case] = np.mean(
                training_accuracy)
            test_statistics[hl_size_case][nr_units_case] = np.mean(
                test_accuracy)
    print()
    print('Validation')
    print(val_statistics)
    print('Training')
    print(train_statistics)
    print('Testing')
    print(test_statistics)
    print('Final test data performance')
    print(network.evaluate(test_data_tuples))
    print('Time')
    print(total_seconds)
Beispiel #18
0
    def __init__(self, args):
        super().__init__()
        self.nclasses = args.nclasses

        # define sizes and perspective transformation
        resize = args.resize
        size = torch.Size([args.batch_size, args.nclasses, args.resize, 2*args.resize])
        M, _ = get_homography(args.resize, args.no_mapping)
        M = torch.from_numpy(M).unsqueeze_(0).expand([args.batch_size, 3, 3]).float()

        # Define network
        out_channels = args.nclasses + int(not args.end_to_end)

        self.net = Networks.define_model(mod=args.mod, layers=args.layers, 
                                         in_channels=args.channels_in,
                                         out_channels=out_channels, 
                                         pretrained=args.pretrained, pool=args.pool)
        # Init activation
        self.activation = activation_layer(args.activation_layer, args.no_cuda)
        # Init grid generator
        self.grid = ProjectiveGridGenerator(size, M, args.no_cuda)
        # Init LS layer
        self.ls_layer = Weighted_least_squares(size, args.nclasses, args.order, 
                args.no_cuda, args.reg_ls, args.use_cholesky)

        # mask configuration
        zero_rows = ceil(args.resize*args.mask_percentage)
        self.idx_row = torch.linspace(0, zero_rows-1, zero_rows).long()
        n_row = 13
        self.idx_col1 = Variable(torch.linspace(1, n_row, n_row+1).long())
        self.idx_col2 = Variable(torch.linspace(0, n_row, n_row+1).long())+2*resize-(n_row+1)
        idx_mask = (np.arange(resize)[:, None] < np.arange(2*resize)-(resize+10))*1
        idx_mask = np.flip(idx_mask, 1).copy() + idx_mask
        self.idx_mask = Variable(torch.from_numpy(idx_mask)) \
                .type(torch.ByteTensor).expand(
                        args.batch_size, args.nclasses, resize, 2*resize)

        self.end_to_end = args.end_to_end
        self.pretrained = args.pretrained
        self.classification_branch = args.clas
        if self.classification_branch:
            size_enc = (32, 64)
            chan = 128
            self.line_classification = Classification('line', size=size_enc, 
                    channels_in=chan, resize=resize)
            self.horizon_estimation = Classification('horizon', size=size_enc, 
                    channels_in=chan, resize=resize)

        # Place on GPU if specified
        if not args.no_cuda:
            self.idx_row = self.idx_row.cuda()
            self.idx_col1 = self.idx_col1.cuda()
            self.idx_col2 = self.idx_col2.cuda()
            self.idx_mask = self.idx_mask.cuda()
            if self.classification_branch:
                self.line_classification = self.line_classification.cuda()
                self.horizon_estimation = self.horizon_estimation.cuda()
Beispiel #19
0
def MakePathLengthHistograms(L=100, Z=4, p=0.1):
    """
    Plots path length histograms for small world networks.
    Find list of all lengths
    Use pylab.hist(lengths, bins=range(max(lengths)), normed=True) """
    histograms = []
    g = MakeSmallWorldNetwork(L, Z, p)
    lengths = Networks.FindAllPathLengths(g).values()
    pylab.hist(lengths, normed=True, bins=range(max(lengths)))
    pylab.show()
Beispiel #20
0
def useSiamase(DS_Train,digitData,parameters):
    sia_net = Networks.Network("siamase",digitData)
    print("training sia")
    results= sia_net.train_custom(DS_Train,parameters)
    saveString = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
    saveString = "siamase" + saveString
    print("saving model to: " , saveString)
    torch.save(sia_net,saveString)
    
    return results
Beispiel #21
0
def PlotSitePercolationBiggest(L=10, p=0.5, seed=1, scale=0):
    random.seed(seed)
    """
    Uses DrawTriangularNetworkSites to draw only the largest cluster,
    by setting cl to the result of Networks.FindAllClusters (presuming
    it sorts by size) and by passing in [cl[0]] as the cluster list.
    """
    g = MakeTriangularSitePercolation(L, p)
    cl = Networks.FindAllClusters(g)
    imfile = 'SitePercolation%s_%s_%s.tif' % (L, p, seed)
    im = NetGraphics.DrawTriangularNetworkSites(g, [cl[0]], L, imfile, scale)
Beispiel #22
0
def PlotBondPercolationBonds(L=10, p=0.5, seed=1):
    """
    Uses DrawSquareNetworkBonds in NetGraphics to graph the percolation
    network made by MakeSquareBondPercolation and the clusters returned
    by Networks.FindAllClusters. Best for small networks to debug.
    """
    random.seed(seed)
    g = MakeSquareBondPercolation(L, p)
    cl = Networks.FindAllClusters(g)
    imfile = 'BondPercolation_%s_%s_%s.tif' % (L, p, seed)
    im = NetGraphics.DrawSquareNetworkBonds(g, cl, imfile=imfile)
Beispiel #23
0
def test():
    args = parse_args()
    model = Networks.ResNet18_ARM___RAF()

    print("Loading pretrained weights...", args.checkpoint)
    checkpoint = torch.load(args.checkpoint)
    model.load_state_dict(checkpoint["model_state_dict"], strict=False)

    data_transforms_test = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    test_dataset = RafDataSet(args.raf_path,
                              phase='test',
                              transform=data_transforms_test)
    test_size = test_dataset.__len__()
    print('Test set size:', test_size)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              num_workers=args.workers,
                                              shuffle=False,
                                              pin_memory=True)

    model = model.cuda()

    pre_labels = []
    gt_labels = []
    with torch.no_grad():
        bingo_cnt = 0
        model.eval()
        for batch_i, (imgs, targets, _) in enumerate(test_loader):
            outputs, _ = model(imgs.cuda())
            targets = targets.cuda()
            _, predicts = torch.max(outputs, 1)
            correct_or_not = torch.eq(predicts, targets)
            pre_labels += predicts.cpu().tolist()
            gt_labels += targets.cpu().tolist()
            bingo_cnt += correct_or_not.sum().cpu()

        acc = bingo_cnt.float() / float(test_size)
        acc = np.around(acc.numpy(), 4)
        print(f"Test accuracy: {acc:.4f}.")

    if args.plot_cm:
        cm = confusion_matrix(gt_labels, pre_labels)
        cm = np.array(cm)
        labels_name = ['SU', 'FE', 'DI', 'HA', 'SA', 'AN', "NE"]  # 横纵坐标标签
        plot_confusion_matrix(cm, labels_name, 'RAF-DB', acc)
Beispiel #24
0
def MakeRingGraph(num_nodes, Z):
    """
    Makes a ring graph with Z neighboring edges per node.
    """
    g = Networks.UndirectedGraph()
    if Z/2. != Z/2:
        raise ValueError, "must specify even number of edges per node"
    for i in range(num_nodes):
        for di in range(-Z/2, Z/2+1):
            if di != 0:
            	j = (i+di) % num_nodes
            	g.AddEdge(i,j)
    return g
Beispiel #25
0
def PlotSitePercolation(L=10, p=0.5, seed=1, scale=0):
    """
    Uses DrawTriangularNetworkSites to draw clusters.
    """
    random.seed(seed)
    g = MakeTriangularSitePercolation(L, p)
    cl = Networks.FindAllClusters(g)
    imfile = 'SitePercolation_%s_%s_%s.tif' % (L, p, seed)
    im = NetGraphics.DrawTriangularNetworkSites(g,
                                                cl,
                                                L,
                                                scale=scale,
                                                imfile=imfile)
Beispiel #26
0
def PrepareConv2(data, myseed, initializer, activation, masktype, trainW,
                 trainM, p1, alpha):
    in_shape = data[0][0].shape

    kernelsize = 3
    cnn_arch = [[kernelsize, kernelsize, 3, 64],
                [kernelsize, kernelsize, 64, 64], []]
    dense_arch = [256, 256, data[-1]]
    network = Networks.makeMaskedCNN(in_shape, cnn_arch, dense_arch,
                                     activation, myseed, initializer, masktype,
                                     trainW, trainM, p1, alpha)

    return network
Beispiel #27
0
 def update_policy(self,
                   inp_dim,
                   h_layers,
                   reg_coef=0.01,
                   learning_rate=0.01,
                   new_agent_max_memory=50000):
     self.agent_memory.push(self.live_agent)
     new_net = Networks.value_network(inp_dim, h_layers, reg_coef=reg_coef)
     new_net.compile(optimizer=Optimizers.SGD(learning_rate=learning_rate),
                     loss='mean_squared_error')
     new_agent_memory = ValueFunctionMemory(max_memory=new_agent_max_memory)
     new_agent = IndividualValueAgent(qnet=new_net, memory=new_agent_memory)
     self.live_agent = new_agent
Beispiel #28
0
def run():
    for N in range(1, 4):
        model = Classifier(Networks.FullyConnectedNet(N, 10))
        optimizer = optimizers.SGD()
        optimizer.setup(model)

        train_loss_list, test_loss_list = trainNetwork(model, optimizer)
        plt.plot(test_loss_list, label='Test Loss')
        plt.plot(train_loss_list, label='Train Loss')
        plt.legend()
        plt.ylabel("Loss")
        plt.xlabel("Epoch")
        plt.title("Loss as a function of epochs, N=%s" % N)
        plt.show()
Beispiel #29
0
def PlotBondPercolation(L=10, p=0.5, seed=1):
    """
    Uses DrawSquareNetworkSites in NetGraphics to graph the percolation
    network made by MakeSquareBondPercolation and the clusters returned
    by Networks.FindAllClusters. Best for large networks to explore
    universality.
    """
    random.seed(seed)
    g = MakeSquareBondPercolation(L, p)
    cl = Networks.FindAllClusters(g)
    imfile = 'BondPercolation_%s_%s_%s.tif' % (L, p, seed)
    im = NetGraphics.DrawSquareNetworkSites(g, cl, imfile=imfile)
    imfile = 'BondPercolationBiggest_%s_%s_%s.tif' % (L, p, seed)
    im = NetGraphics.DrawSquareNetworkSites(g, [cl[0]], imfile=imfile)
Beispiel #30
0
def SmallWorldBetweenness(L, Z, p, dotscale=4, linescale=2, windowMargin=0.02):
    """
    Display small world network with edge and node betweenness,
    using NetGraphics routine DisplayCircleGraph, passing in arguments
    for edge-weights and node_weights. Passes through the arguments for 
    dotscale, linescale, and windowMargin, to fine-tune the graph
    """
    g = MakeSmallWorldNetwork(L, Z, p)
    edgeBt, nodeBt = Networks.EdgeAndNodeBetweenness(g)
    im = NetGraphics.DisplayCircleGraph(g, edgeBt, nodeBt,
                                                   dotscale=dotscale,
                                                   linescale=linescale,
                                                   windowMargin=windowMargin)
    return g