Ejemplo n.º 1
0
skip_steps = [
    [7,3]
]
#check if totalskips + last skip at is within total epochs
if sum(j for i,j in skip_steps) + max(skip_steps)[0] > epochs:
    print("Exiting:skip_steps + last skip exceeds total Epochs")
    quit()
    
FirstSkip = skip_steps[0][0]

TotalSkips = sum([j for i,j in skip_steps]) 

reg_train_steps = 6
clusters = 5
project_paths = get_project_paths(sys.argv[0], to_tmp=False)
    
callback_weights_reg = StoreWeights(project_paths["weights"], reg_train_steps=0,dtw_clusters=0, file_prefix ="Oweights" , weight_pred_ind=False,weighs_dtw_cluster_ind=False, replicate_csvs_at = FirstSkip )

callback_weights_pred = StoreWeights(project_paths["weights"], reg_train_steps,dtw_clusters=0, file_prefix ="Rweights" ,skip_array=skip_steps, weight_pred_ind=True,weighs_dtw_cluster_ind=True, replicate_csvs_at = 0)






fashion_mnist = tf.keras.datasets.fashion_mnist

(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
Ejemplo n.º 2
0
def main(argv):

    batch_size = 500
    epochs = 21
    (x_train, y_train), (x_test,
                         y_test) = cifar100.load_data(label_mode='fine')
    c1 = np.concatenate((x_train, x_test), axis=0)
    c2 = np.concatenate((y_train, y_test), axis=0)
    x_train = c1[:-5000]
    y_train = c2[:-5000]
    x_test = c1[-5000:]
    y_test = c2[-5000:]
    num_classes = len(np.unique(y_train))
    #(x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.astype('float32') / 255.0
    x_test = x_test.astype('float32') / 255.0
    input_shape = x_train.shape[1:]
    num_classes = len(np.unique(y_train))
    num_train_samples = x_train.shape[0]

    # Convert y to categorical one-hot vectors
    y_train = to_categorical(y_train, num_classes=num_classes)
    y_test = to_categorical(y_test, num_classes=num_classes)

    # Create and compile model

    model = dense_model1(input_shape=input_shape,
                         n_classes=num_classes,
                         dropout=0.5,
                         model_name=get_project_name(argv[0]))

    model.compile(
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'])

    # Print summary and save model as plot and node-link-graph
    project_paths = get_project_paths(argv[0], to_tmp=False)
    import pdb
    pdb.set_trace()
    #save_graph_plot(model, project_paths["plots"] + "/model.ps")
    #save_graph_json(model, project_paths["weights"] + "/model.json")

    weight = model.get_weights()

    logs = project_paths["weights"] + "/mod_hist_U5_mnist_acc.csv"
    #np.savetxt('weight.csv' , weight , fmt='%s', delimiter=',')
    csv_logger = CSVLogger(logs, append=True)

    # Train model while saving weights as checkpoints after each epoch
    model.fit(
        x_train,
        y_train,
        steps_per_epoch=num_train_samples / batch_size /
        5,  # 5 epochs per full dataset rotation
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        callbacks=[
            ModelCheckpoint(project_paths["checkpoints"] +
                            "/weights_epoch-{epoch:02d}.hdf5",
                            save_weights_only=True,
                            save_freq='epoch'), csv_logger
        ],
        validation_data=(x_test, y_test))