Example #1
0
def main(args):
    if len(args) < 1:
        sys.stderr.write("Error - one required argument: <data directory>\n")
        sys.exit(-1)

    working_dir = args[0]

    #    print("Reading data...")
    Y, X = ctk_io.read_multitask_liblinear(
        working_dir)  # ('data_testing/multitask_assertion/train_and_test')
    stopper = nn_models.get_early_stopper()

    num_examples, dimension = X.shape
    num_y_examples, num_labels = Y.shape
    assert num_examples == num_y_examples

    #print("Data has %d examples and dimension %d" % (num_examples, dimension) )
    #print("Output has %d dimensions" % (num_labels) )

    X = np.reshape(X, (num_examples, 11, dimension / 11))

    Y_adj, indices = ctk_io.flatten_outputs(Y)

    #print("After reshaping the data has shape %s" % (str(X.shape)))

    for label_ind in range(0, Y.shape[1]):

        num_outputs = indices[label_ind + 1] - indices[label_ind]
        model = nn_models.get_cnn_model(X.shape, num_outputs)

        #print("For label ind %d, grabbing indices from %d to %d" % (label_ind, int(indices[label_ind]), int(indices[label_ind+1])))

        train_y = Y_adj[:, int(indices[label_ind]):int(indices[label_ind + 1])]

        #if(train_y.shape[-1] == 1):
        #    print("Number of values=1 is %d" % (train_y.sum()))

        #print("Shape of y is %s, shape of X is %s, max value in y is %f and min is %f" % (str(train_y.shape), str(X.shape), train_y.max(), train_y.min()) )

        model.fit(X,
                  train_y,
                  nb_epoch=nb_epoch,
                  batch_size=batch_size,
                  verbose=1,
                  validation_split=0.2,
                  callbacks=[stopper])

        model.summary()

        json_string = model.to_json()
        open(os.path.join(working_dir, 'model_%d.json' % label_ind),
             'w').write(json_string)
        model.save_weights(os.path.join(working_dir,
                                        'model_%d.h5' % label_ind),
                           overwrite=True)

        #print("This model has %d layers and layer 3 has %d weights" % (len(model.layers), len(model.layers[3].get_weights()) ) )
        #print("The weight of the first layer at index 50 is %f" % model.layers[3].get_weights()[50])

    sys.exit(0)
def main(args):
    if len(args) < 1:
        sys.stderr.write("Error - one required argument: <data directory>\n")
        sys.exit(-1)

    working_dir = args[0]
    
#    print("Reading data...")
    Y, X = ctk_io.read_multitask_liblinear(working_dir) # ('data_testing/multitask_assertion/train_and_test') 
    stopper = nn_models.get_early_stopper()
    
    num_examples, dimension = X.shape
    num_y_examples, num_labels = Y.shape
    assert num_examples == num_y_examples
    
    #print("Data has %d examples and dimension %d" % (num_examples, dimension) )
    #print("Output has %d dimensions" % (num_labels) )

    X = np.reshape(X, (num_examples, 11, dimension / 11))
    
    Y_adj, indices = ctk_io.flatten_outputs(Y)

    #print("After reshaping the data has shape %s" % (str(X.shape)))
    
    for label_ind in range(0, Y.shape[1]):
        
        num_outputs = indices[label_ind+1] - indices[label_ind]
        model = nn_models.get_cnn_model(X.shape, num_outputs)

        #print("For label ind %d, grabbing indices from %d to %d" % (label_ind, int(indices[label_ind]), int(indices[label_ind+1])))
        
        train_y = Y_adj[:, int(indices[label_ind]):int(indices[label_ind+1])]

        #if(train_y.shape[-1] == 1):
        #    print("Number of values=1 is %d" % (train_y.sum()))

        #print("Shape of y is %s, shape of X is %s, max value in y is %f and min is %f" % (str(train_y.shape), str(X.shape), train_y.max(), train_y.min()) )
        
        model.fit(X, train_y,
                  nb_epoch=nb_epoch,
                  batch_size=batch_size,
                  verbose=1,
                  validation_split=0.2,
                  callbacks=[stopper])
        
        model.summary()
        
        json_string = model.to_json()
        open(os.path.join(working_dir, 'model_%d.json' % label_ind), 'w').write(json_string)
        model.save_weights(os.path.join(working_dir, 'model_%d.h5' % label_ind), overwrite=True)
        
        #print("This model has %d layers and layer 3 has %d weights" % (len(model.layers), len(model.layers[3].get_weights()) ) )
        #print("The weight of the first layer at index 50 is %f" % model.layers[3].get_weights()[50])
        
    sys.exit(0)
Example #3
0
def run_one_eval(epochs, config, train_x, train_y, valid_x, valid_y,
                 vocab_size, num_outputs):
    print("Testing with config: %s" % (config))
    model = nn_models.get_cnn_model(train_x.shape,
                                    vocab_size,
                                    num_outputs,
                                    conv_layers=config['num_filters'],
                                    fc_layers=config['layers'],
                                    embed_dim=config['embed_dim'],
                                    filter_widths=config['filters'])

    history = model.fit(train_x,
                        train_y,
                        nb_epoch=epochs,
                        batch_size=config['batch_size'],
                        verbose=1,
                        validation_data=(valid_x, valid_y))

    return history.history['loss'][-1]
def run_one_eval(epochs, config, train_x, train_y, valid_x, valid_y, vocab_size, num_outputs):
    print("Testing with config: %s" % (config))
    model = nn_models.get_cnn_model(
        train_x.shape,
        vocab_size,
        num_outputs,
        conv_layers=config["num_filters"],
        fc_layers=config["layers"],
        embed_dim=config["embed_dim"],
        filter_widths=config["filters"],
    )

    history = model.fit(
        train_x,
        train_y,
        nb_epoch=epochs,
        batch_size=config["batch_size"],
        verbose=1,
        validation_data=(valid_x, valid_y),
    )

    return history.history["loss"][-1]
Example #5
0
def main(args):
    if len(args) < 1:
        sys.stderr.write("Error - one required argument: <working_dir>\n")
        sys.exit(-1)

    working_dir = args[0]
    
    ### Extract existing model:
    print("Extracting existing model")
    with ZipFile(os.path.join(working_dir, 'script.model'), 'r') as myzip:
        myzip.extract('model.h5', working_dir)
        myzip.extract('alphabets.pkl', working_dir)

    (feature_alphabet, label_alphabet) = pickle.load( open(os.path.join(working_dir, 'alphabets.pkl'), 'r' ) )
    label_lookup = {val:key for (key,val) in label_alphabet.iteritems()}
    model = load_model(os.path.join(working_dir, "model.h5"))
    #config = model.get_config()
    
    #model = Container.from_config(config)
    
    ## Find the model params needed by CNN method and get a cnn with one extra FC layer:
    # nn_models.get_cnn_model(X_array.shape, len(feature_alphabet), num_outputs, conv_layers=convs, fc_layers=layers, 
    #                                    embed_dim=embed_dim, filter_widths=width)
    print("Building new model with extra layer")
    convs = []
    dense = []
    for layer in model.layers:
        if 'convolution' in layer.name:
            convs.append(layer)
        if 'dense' in layer.name:
            dense.append(layer)
            
    filters = [x.filter_length for x in convs]
    nb_filters = (convs[0].nb_filter,)
    fc_widths = [x.output_dim for x in dense]
    fc_widths.append(fc_widths[-1] //2)
    
    new_model = nn_models.get_cnn_model(model.layers[0].input_shape, model.layers[1].input_dim, model.layers[-1].output_dim, 
                              conv_layers=nb_filters, fc_layers=fc_widths, embed_dim=model.layers[1].output_dim, filter_widths=filters )
    
    ## Just so i don't accidentally try to refer to this later
    del model
    
    ## Change the name of the output layer so that we don't try to read those weights in -- we will have a different number of parameters:
    #new_model.layers[-1].name = "NewOutput"
    
    ## Load as many weights as possible taking advantage of consistently named layers:
    new_model.load_weights(os.path.join(working_dir, "model.h5"), by_name=True)
    

    ## Re-load data and retrain model:
    print("Reading data...")
    Y, label_alphabet, X_array, feature_alphabet = ctk_io.read_token_sequence_data(working_dir)
    
    Y_array = np.array(Y)
    #print("Shape of X is %s and Y is %s" % (str(X.shape), str(Y.shape)))
    
    num_examples, dimension = X_array.shape
    num_outputs = 1 if len(label_alphabet) == 2 else len(label_alphabet)
    num_y_examples = len(Y)
    
    Y_adj, indices = ctk_io.flatten_outputs(Y_array)
    out_counts = Y_adj.sum(0)
        
    stopper = nn_models.get_early_stopper()
    
    print("Retraining model")
    new_model.fit(X_array, Y_adj,
                  nb_epoch=nb_epoch,
                  batch_size=batch_size,
                  verbose=1,
                  validation_split=0.2) #,
                  #callbacks=[stopper]) #,
                  #class_weight=class_weights)
                  
    new_model.summary()
    
    new_model.save(os.path.join(working_dir, 'new_model.h5'), overwrite=True)
    
    with ZipFile(os.path.join(working_dir, 'extended.model'), 'w') as myzip:
        myzip.write(os.path.join(working_dir, 'new_model.h5'), 'model.h5')
        myzip.write(os.path.join(working_dir, 'alphabets.pkl'), 'alphabets.pkl')
Example #6
0
def main(args):
    if len(args) < 1:
        sys.stderr.write("Error - one required argument: <working_dir>\n")
        sys.exit(-1)

    working_dir = args[0]

    ### Extract existing model:
    print("Extracting existing model")
    with ZipFile(os.path.join(working_dir, 'script.model'), 'r') as myzip:
        myzip.extract('model.h5', working_dir)
        myzip.extract('alphabets.pkl', working_dir)

    (feature_alphabet, label_alphabet) = pickle.load(
        open(os.path.join(working_dir, 'alphabets.pkl'), 'r'))
    label_lookup = {val: key for (key, val) in label_alphabet.iteritems()}
    model = load_model(os.path.join(working_dir, "model.h5"))
    #config = model.get_config()

    #model = Container.from_config(config)

    ## Find the model params needed by CNN method and get a cnn with one extra FC layer:
    # nn_models.get_cnn_model(X_array.shape, len(feature_alphabet), num_outputs, conv_layers=convs, fc_layers=layers,
    #                                    embed_dim=embed_dim, filter_widths=width)
    print("Building new model with extra layer")
    convs = []
    dense = []
    for layer in model.layers:
        if 'convolution' in layer.name:
            convs.append(layer)
        if 'dense' in layer.name:
            dense.append(layer)

    filters = [x.filter_length for x in convs]
    nb_filters = (convs[0].nb_filter, )
    fc_widths = [x.output_dim for x in dense]
    #fc_widths.append(fc_widths[-1] //2)
    fc_widths.append(fc_widths[-1])

    new_model = nn_models.get_cnn_model(model.layers[0].input_shape,
                                        model.layers[1].input_dim,
                                        model.layers[-1].output_dim,
                                        conv_layers=nb_filters,
                                        fc_layers=fc_widths,
                                        embed_dim=model.layers[1].output_dim,
                                        filter_widths=filters)

    ## Just so i don't accidentally try to refer to this later
    del model

    ## Change the name of the output layer so that we don't try to read those weights in -- we will have a different number of parameters:
    #new_model.layers[-1].name = "NewOutput"

    ## Load as many weights as possible taking advantage of consistently named layers:
    new_model.load_weights(os.path.join(working_dir, "model.h5"), by_name=True)

    ## Re-load data and retrain model:
    print("Reading data...")
    Y, label_alphabet, X_array, feature_alphabet = ctk_io.read_token_sequence_data(
        working_dir)

    Y_array = np.array(Y)
    #print("Shape of X is %s and Y is %s" % (str(X.shape), str(Y.shape)))

    num_examples, dimension = X_array.shape
    num_outputs = 1 if len(label_alphabet) == 2 else len(label_alphabet)
    num_y_examples = len(Y)

    Y_adj, indices = ctk_io.flatten_outputs(Y_array)
    out_counts = Y_adj.sum(0)

    stopper = nn_models.get_early_stopper()

    print("Retraining model")
    new_model.fit(X_array,
                  Y_adj,
                  nb_epoch=nb_epoch,
                  batch_size=batch_size,
                  verbose=1,
                  validation_split=0.2)  #,
    #callbacks=[stopper]) #,
    #class_weight=class_weights)

    new_model.summary()

    new_model.save(os.path.join(working_dir, 'new_model.h5'), overwrite=True)

    with ZipFile(os.path.join(working_dir, 'extended.model'), 'w') as myzip:
        myzip.write(os.path.join(working_dir, 'new_model.h5'), 'model.h5')
        myzip.write(os.path.join(working_dir, 'alphabets.pkl'),
                    'alphabets.pkl')