Beispiel #1
0
def test_admission():
    var_count = 4
    vtree = sdd.sdd_vtree_new(var_count, "balanced")
    mgr = sdd.sdd_manager_new(vtree)

    # WFEG
    # ( w ^ g )
    alpha = sdd.sdd_conjoin(sdd.sdd_manager_literal(1, mgr),
                            sdd.sdd_manager_literal(4, mgr), mgr)
    # ( w ^ f ^ e )
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(1, mgr),
                           sdd.sdd_manager_literal(2, mgr), mgr)
    beta = sdd.sdd_conjoin(beta, sdd.sdd_manager_literal(3, mgr), mgr)
    # ( f ^ e ^ g )
    gamma = sdd.sdd_conjoin(sdd.sdd_manager_literal(2, mgr),
                            sdd.sdd_manager_literal(3, mgr), mgr)
    gamma = sdd.sdd_conjoin(gamma, sdd.sdd_manager_literal(4, mgr), mgr)
    alpha = sdd.sdd_disjoin(alpha, beta, mgr)
    alpha = sdd.sdd_disjoin(alpha, gamma, mgr)

    alpha = sdd.sdd_negate(alpha, mgr)
    beta, pmgr = primes(alpha, mgr)
    _sanity_check(alpha, mgr, beta, pmgr)
    vtree = sdd.sdd_manager_vtree(mgr)
    pvtree = sdd.sdd_manager_vtree(pmgr)

    import models
    for model in models.models(alpha, vtree):
        print models.str_model(model)

    for model in models.models(beta, pvtree):
        print models.str_model(model)

    for model in models.models(alpha, vtree):
        print "==", models.str_model(model)
        model_list = [model[var] for var in sorted(model.keys())]
        gamma, pmgr = compatible_primes(alpha,
                                        model_list,
                                        mgr,
                                        primes_mgr=(beta, pmgr))
        pvtree = sdd.sdd_manager_vtree(pmgr)
        for prime_model in models.models(gamma, pvtree):
            print models.str_model(prime_model)
            term = prime_to_dict(prime_model, var_count)
            print " ".join([
                ("*" if var not in term else "+" if term[var] == 1 else "-")
                for var in xrange(1, var_count + 1)
            ])

    print "dead-nodes:", sdd.sdd_manager_dead_count(mgr)
    print "dead-nodes:", sdd.sdd_manager_dead_count(pmgr)
Beispiel #2
0
def primes_by_length(primes, pmgr, var_count):
    by_length = defaultdict(list)
    pvtree = sdd.sdd_manager_vtree(pmgr)
    for model in models.models(primes, pvtree):
        term = prime_to_dict(model, var_count)
        by_length[len(term)].append(term)
    return by_length
def fineTuningModel(name,
                    num_classes,
                    is_freeze,
                    pretrained=True):  #freeze #true true시 r

    model = models(name, pretrained)
    if 'resnet' in name:
        input_features = model.fc.in_features
        model.fc = nn.Linear(in_features=input_features,
                             out_features=num_classes,
                             bias=True)
        if is_freeze and pretrained:
            model = freezeResNet(model)
    elif 'vgg' in name:
        input_features = model.classifier[-1].in_features
        model.classifier[-1] = nn.Linear(in_features=input_features,
                                         out_features=num_classes,
                                         bias=True)
        if is_freeze and pretrained:
            model = freezeVGG(model)
    elif 'densenet' in name:
        input_features = model.classifier.in_features
        model.classifier = nn.Linear(in_features=input_features,
                                     out_features=num_classes,
                                     bias=True)
        if is_freeze and pretrained:
            model = freezeDenseNet(model)

    return model
Beispiel #4
0
def main(batch_size=64,
         max_epochs=100,
         validation_split=0.2,
         early_stop=EarlyStopping()):
    model_hdf5_path = "./hdf5s/"

    if args.dataset == 'taxi':
        sampler = file_loader.file_loader()
    elif args.dataset == 'bike':
        sampler = file_loader.file_loader(config_path="data_bike.json")
    else:
        raise Exception("Can not recognize dataset, please enter taxi or bike")
    modeler = models.models()

    if args.model_name == "stdn":
        # training
        att_cnnx, att_flow, att_x, cnnx, flow, x, y = sampler.sample_stdn(datatype="train",
                                                                          att_lstm_num=args.att_lstm_num, \
                                                                          long_term_lstm_seq_len=args.long_term_lstm_seq_len,
                                                                          short_term_lstm_seq_len=args.short_term_lstm_seq_len, \
                                                                          nbhd_size=args.nbhd_size,
                                                                          cnn_nbhd_size=args.cnn_nbhd_size)

        print(("Start training {0} with input shape {2} / {1}".format(
            args.model_name, x.shape, cnnx[0].shape)))

        model = modeler.stdn(att_lstm_num=args.att_lstm_num,
                             att_lstm_seq_len=args.long_term_lstm_seq_len, \
                             lstm_seq_len=len(cnnx), feature_vec_len=x.shape[-1], \
                             cnn_flat_size=args.cnn_flat_size, nbhd_size=cnnx[0].shape[1],
                             nbhd_type=cnnx[0].shape[-1])

        model.fit( \
            x=att_cnnx + att_flow + att_x + cnnx + flow + [x, ], \
            y=y, \
            batch_size=batch_size, validation_split=validation_split, epochs=max_epochs,
            callbacks=[early_stop])

        att_cnnx, att_flow, att_x, cnnx, flow, x, y = sampler.sample_stdn(
            datatype="test",
            nbhd_size=args.nbhd_size,
            cnn_nbhd_size=args.cnn_nbhd_size)
        y_pred = model.predict( \
            x=att_cnnx + att_flow + att_x + cnnx + flow + [x, ], )
        threshold = float(
            sampler.threshold) / sampler.config["volume_train_max"]
        print(("Evaluating threshold: {0}.".format(threshold)))
        (prmse, pmape), (drmse, dmape) = eval_lstm(y, y_pred, threshold)
        print((
            "Test on model {0}:\npickup rmse = {1}, pickup mape = {2}%\ndropoff rmse = {3}, dropoff mape = {4}%"
            .format(args.model_name, prmse, pmape * 100, drmse, dmape * 100)))

        currTime = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        model.save(model_hdf5_path + args.model_name + currTime + ".hdf5")
        return

    else:
        print("Cannot recognize parameter...")
        return
Beispiel #5
0
def test():
    var_count = 4
    vtree = sdd.sdd_vtree_new(var_count, "balanced")
    mgr = sdd.sdd_manager_new(vtree)

    # A v B
    alpha = sdd.sdd_disjoin(sdd.sdd_manager_literal(1, mgr),
                            sdd.sdd_manager_literal(2, mgr), mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(-3, mgr),
                           sdd.sdd_manager_literal(-4, mgr), mgr)
    # A v B v ( ~C ^ ~D )
    alpha = sdd.sdd_disjoin(alpha, beta, mgr)

    beta, pmgr = primes(alpha, mgr)
    _sanity_check(alpha, mgr, beta, pmgr)
    pvtree = sdd.sdd_manager_vtree(pmgr)

    import models
    #beta2 = sdd.sdd_global_minimize_cardinality(beta,pmgr)
    beta2 = beta
    for model in models.models(beta2, pvtree):
        print models.str_model(model)

    global cache_hits
    print "cache-hits:", cache_hits

    print "all-ones"
    beta, pmgr = compatible_primes(alpha, [1, 1, 1, 1], mgr)
    pvtree = sdd.sdd_manager_vtree(pmgr)
    for model in models.models(beta, pvtree):
        print models.str_model(model)

    print "all-zeros"
    beta, pmgr = compatible_primes(alpha, [0, 0, 0, 0], mgr)
    pvtree = sdd.sdd_manager_vtree(pmgr)
    for model in models.models(beta, pvtree):
        print models.str_model(model)

    print "blah"
    beta, pmgr = compatible_primes(alpha, [1, 0, 1, 0], mgr)
    pvtree = sdd.sdd_manager_vtree(pmgr)
    for model in models.models(beta, pvtree):
        print models.str_model(model)

    print "dead-nodes:", sdd.sdd_manager_dead_count(mgr)
    print "dead-nodes:", sdd.sdd_manager_dead_count(pmgr)
Beispiel #6
0
def enumerate_primes(primes, pmgr, var_count):
    pvtree = sdd.sdd_manager_vtree(pmgr)
    while not sdd.sdd_node_is_false(primes):
        mincard = sdd.sdd_global_minimize_cardinality(primes, pmgr)
        for model in models.models(mincard, pvtree):
            term = prime_to_dict(model, var_count)
            yield term
        primes = sdd.sdd_conjoin(primes, sdd.sdd_negate(mincard, pmgr), pmgr)
Beispiel #7
0
def annotate(text):
    ann_strings = models(text)
    tokens_soup = []
    comments = {}
    for i, ann in enumerate(ann_strings):
        tokens_soup.append([ann[0], int(ann[1])])
        if ann[2]:
            comments["comment" + str(i)] = ann[2]
    return tokens_soup, comments
Beispiel #8
0
def test_andy():
    var_count = 3
    vtree = sdd.sdd_vtree_new(var_count, "balanced")
    mgr = sdd.sdd_manager_new(vtree)

    # 100, 101, 111, 001, 011
    alpha = sdd.sdd_manager_false(mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(1, mgr),
                           sdd.sdd_manager_literal(-2, mgr), mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(-3, mgr), beta, mgr)
    alpha = sdd.sdd_disjoin(alpha, beta, mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(1, mgr),
                           sdd.sdd_manager_literal(-2, mgr), mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(3, mgr), beta, mgr)
    alpha = sdd.sdd_disjoin(alpha, beta, mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(1, mgr),
                           sdd.sdd_manager_literal(2, mgr), mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(3, mgr), beta, mgr)
    alpha = sdd.sdd_disjoin(alpha, beta, mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(-1, mgr),
                           sdd.sdd_manager_literal(-2, mgr), mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(3, mgr), beta, mgr)
    alpha = sdd.sdd_disjoin(alpha, beta, mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(-1, mgr),
                           sdd.sdd_manager_literal(2, mgr), mgr)
    beta = sdd.sdd_conjoin(sdd.sdd_manager_literal(3, mgr), beta, mgr)
    alpha = sdd.sdd_disjoin(alpha, beta, mgr)

    beta, pmgr = primes(alpha, mgr)
    _sanity_check(alpha, mgr, beta, pmgr)
    vtree = sdd.sdd_manager_vtree(mgr)
    pvtree = sdd.sdd_manager_vtree(pmgr)

    import models
    for model in models.models(alpha, vtree):
        print models.str_model(model)

    for model in models.models(beta, pvtree):
        print models.str_model(model)

    print "dead-nodes:", sdd.sdd_manager_dead_count(mgr)
    print "dead-nodes:", sdd.sdd_manager_dead_count(pmgr)
Beispiel #9
0
def main(att_lstm_num = 3, long_term_lstm_seq_len = 3, short_term_lstm_seq_len = 7, cnn_nbhd_size = 3, nbhd_size = 2, cnn_flat_size = 128,\
     batch_size = 64, max_epochs = 100, validation_split = 0.2, early_stop = EarlyStopping()):
    model_hdf5_path = "./hdf5s/"

    model_name = ""
    if len(sys.argv) == 1:
        print("no parameters")
        return
    else:
        model_name = sys.argv[1]

    sampler = file_loader.file_loader()
    modeler = models.models()

    if model_name[2:] == "stdn":
        #training
        att_cnnx, att_flow, att_x, cnnx, flow, x, y = sampler.sample_stdn(datatype = "train", att_lstm_num = att_lstm_num,\
        long_term_lstm_seq_len = long_term_lstm_seq_len, short_term_lstm_seq_len = short_term_lstm_seq_len,\
        nbhd_size = nbhd_size, cnn_nbhd_size = cnn_nbhd_size)

        print("Start training {0} with input shape {2} / {1}".format(
            model_name[2:], x.shape, cnnx[0].shape))

        model = modeler.stdn(att_lstm_num = att_lstm_num, att_lstm_seq_len = long_term_lstm_seq_len,\
        lstm_seq_len = len(cnnx), feature_vec_len = x.shape[-1],\
        cnn_flat_size = cnn_flat_size, nbhd_size = cnnx[0].shape[1], nbhd_type = cnnx[0].shape[-1])


        model.fit(\
        x = att_cnnx + att_flow + att_x + cnnx + flow + [x,],\
        y = y,\
        batch_size=batch_size, validation_split = validation_split, epochs=max_epochs, callbacks=[early_stop])

        att_cnnx, att_flow, att_x, cnnx, flow, x, y = sampler.sample_stdn(
            datatype="test", nbhd_size=nbhd_size, cnn_nbhd_size=cnn_nbhd_size)
        y_pred = model.predict(\
        x = att_cnnx + att_flow + att_x + cnnx + flow + [x,],)
        threshold = float(
            sampler.threshold) / sampler.config["volume_train_max"]
        print("Evaluating threshold: {0}.".format(threshold))
        (prmse, pmape), (drmse, dmape) = eval_lstm(y, y_pred, threshold)
        print(
            "Test on model {0}:\npickup rmse = {1}, pickup mape = {2}%\ndropoff rmse = {3}, dropoff mape = {4}%"
            .format(model_name[2:], prmse, pmape * 100, drmse, dmape * 100))

        currTime = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        model.save(model_hdf5_path + model_name[2:] + currTime + ".hdf5")
        return

    print("Cannot recognize parameter...")
    return
Beispiel #10
0
    def main(self):
        #Import all configurations constants

        ROWS = config.ROWS
        COLS = config.COLS
        CH = config.CH

        PATH_FILE = config.PATH_FILE
        BATCH_SIZE = config.BATCH_SIZE
        EPOCHS = config.EPOCHS
        LOG = config.LOG

        DATA = pd.read_csv(config.PATH_FILE + config.LOG)

        #get generators
        gen = generators_funcs()
        #import models
        model = models("Rambo")
        model = model.model

        #Split data into train and test sets, that is 90% training 10% validation
        train, test = train_test_split(DATA, test_size=0.10)
        n_train_samples = len(train)
        n_val_samples = len(test)

        gen_train = gen.train_generator(train, BATCH_SIZE)
        gen_val = gen.val_generator(test, BATCH_SIZE)

        model.fit_generator(generator=gen_train,
                            samples_per_epoch=n_train_samples,
                            validation_data=gen_val,
                            nb_val_samples=n_val_samples,
                            nb_epoch=EPOCHS,
                            verbose=1)

        filepath = "weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
        checkpoint = ModelCheckpoint(filepath,
                                     monitor='val_acc',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max')
        callbacks_list = [checkpoint]
        #Save final model in formats as specified in Udacity ruberic
        model_json = model.to_json()

        with open("rambo_model.json", "w") as json_file:
            json_file.write(model_json)
        model.save('rambo_model.h5')
        print("Saved model to disk")
Beispiel #11
0
def _sanity_check(f, mgr, g, pmgr):
    """f is original function and g is its prime implicants"""

    alpha = sdd.sdd_manager_false(mgr)
    pvtree = sdd.sdd_manager_vtree(pmgr)
    for prime in models.models(g, pvtree):
        term = prime_to_term(prime, mgr)
        beta = sdd.sdd_conjoin(term, f, mgr)
        assert term == beta
        assert _is_prime(prime, f, mgr)
        alpha = sdd.sdd_disjoin(alpha, term, mgr)
    mc1 = sdd.sdd_global_model_count(f, mgr)
    mc2 = sdd.sdd_global_model_count(alpha, mgr)
    print "mc-check:", mc1, mc2, ("ok" if mc1 == mc2 else "NOT OK")
    assert mc1 == mc2
    assert alpha == f
Beispiel #12
0
def initializer(variables_array=None):
    models_tmp = {}
    for model in MODELS:
        tmp_name = model['name']
        models_tmp[tmp_name] = models(
            load(model['patch'])
        )
        fkey = []
        for x in model['foreignkey']:
            fkey.append(x)
        for key in fkey:
            for x in models_tmp[tmp_name].table:
                x[key['related_name']] = models_tmp[key['model_to']].get(
                    key['col_to'], x[key['col_from']]
                )
            del x[key['col_from']]
        if variables_array is not None:
            variables_array[tmp_name] = models_tmp[tmp_name]
            return variables_array
    return models_tmp
Beispiel #13
0
def main(dataloc, path_to_weights, model='vgg', batch_size=8, rgb=False):

    # load test data
    test_df = utils.load_test_data(dataloc)

    #     categories = utils.define_categories(test_df)
    categories = [
        'any', 'epidural', 'intraparenchymal', 'intraventricular',
        'subarachnoid', 'subdural'
    ]
    print("CATEGORIES = {}".format(categories))

    # load model
    model = models(model,
                   input_image_size=512,
                   number_of_output_categories=len(categories))

    # load weights
    model.load_weights(path_to_weights)

    # instantiate generator

    test_generator = create_test_generator(test_df, categories, batch_size)

    test_generator.__reset__(
    )  # make sure the generator is starting at index 0!!!
    # predict
    y_pred = model.predict_generator(test_generator,
                                     steps=len(test_df) // batch_size + 1,
                                     verbose=1)

    # build submission
    submission_filename = build_submission(test_df, y_pred, dataloc)

    print('uploading submission...')

    # submit response
    upload_submission(submission_filename)
Beispiel #14
0
    parser.add_argument("--DISCRET_muV", default=4, type=int,\
                        help="discretization of the 3d grid for muV")
    parser.add_argument("--DISCRET_sV", default=8, type=int,\
                        help="discretization of the 3d grid for sV")
    parser.add_argument("--DISCRET_TvN", default=4, type=int,\
                        help="discretization of the 3d grid for TvN")
    parser.add_argument("--RANGE_FOR_3D", type=float, default=[], nargs='+',\
                        help="possibility to explicitely set the 3D range scanned")
    parser.add_argument("--SUFFIX_NAME", default='',\
                        help="suffix to the file name in ../data/MODEL_*.npz")

    args = parser.parse_args()

    print args.RANGE_FOR_3D

    Mlist = models.models(args.MODEL)

    if args.WITH_TM_VARIATIONS:

        def make_sim(MODEL):
            print 'by default we vary Tm !!'
            make_simulation_for_model(MODEL, args, sampling=args.sampling)
            make_simulation_for_model(MODEL + '__minus',
                                      args,
                                      sampling=args.sampling)
            make_simulation_for_model(MODEL + '__plus',
                                      args,
                                      sampling=args.sampling)
    else:

        def make_sim(MODEL):
Beispiel #15
0
####################### Feature Expansion ################################
if classifier!="nn" and classifier!="bow":
    X_tr = feature_exp(X_tr)
    X_te = feature_exp(X_te)
D = X_tr.shape[1]

print "After Feature Expansion: Training : [Inputs x Features ] = [%d x %d]" % (N_tr,D)
print "After Feature Expansion: Test     : [Inputs x Features ] = [%d x %d]" % (N_te,D)

###################### Normalizing data ##################################
scaler = preprocessing.StandardScaler().fit(X_tr)
X_tr_n = scaler.transform(X_tr)
X_te_n = scaler.transform(X_te)

end = time.time()
print "\nTime taken for Data preparation = %f sec" % (end-start)

start = time.time()
print time.ctime()

y_te_p = models(X_tr_n, y_tr, X_te_n, classifier)

if isinstance(y_te_p,np.ndarray):
    if submission != 1:
        print_accuracy(y_te, y_te_p, "Test")
    else:
        save_out(y_te_p,labels_string,sorted_files_te,submission_fname)

end = time.time()
print "\nTime taken by classifier = %f sec" % (end-start)
Beispiel #16
0
from models import models
#import ksvm_test

paths = ['./Data3']
sector = [0]

for path in paths:
    print(
        "\n|-------------------------------------------------------------------------------|"
    )
    print("\nRunning for: " + path + ":")
    print(
        "\n|-------------------------------------------------------------------------------|"
    )
    for sector_id in sector:
        run_models = models(path)

        #print("|-------------------------------------------------------------------------------|")

        run_models.run_DNN(run_models.x_train, run_models.y_train,
                           run_models.x_test, run_models.y_test, sector_id)
        #run_models.run_Logistic_regression(run_models.x_train,run_models.y_train,run_models.x_test,run_models.y_test,sector_id)
        #run_models.run_Linear_SVM(run_models.x_train,run_models.y_train,run_models.x_test,run_models.y_test)
Beispiel #17
0
from keras.preprocessing.image import ImageDataGenerator

from models import models
from param import param

p = param()

vgg = models().vgg_net_v2()
vgg.summary()

train_data_generator = ImageDataGenerator(
    rescale=1./255,
    shear_range=.2,
    zoom_range=.2,
    horizontal_flip=True
)

train_generator = train_data_generator.flow_from_directory(
    p.train_fold,
    target_size=(150, 150),
    batch_size=p.batch_size,
    class_mode='binary'
)

test_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(
    p.test_fold,
    target_size=(150, 150),
    batch_size=p.batch_size,
    class_mode='binary'
)
Beispiel #18
0
    '''
    ct = 0
    for i in data:
        ct += len(i[0].phrase.split()) + len(i[1].phrase.split())
    data = []
    print ct
    idx = 0
    ct2 = 0
    while ct > 0:
        dd = d[idx]
        data.append(dd)
        v = len(dd[0].phrase.split()) + len(dd[1].phrase.split())
        ct -= v
        ct2 += v
        idx += 1
    print ct2
    '''
if params.wordfile:
    (words, We) = utils.get_wordmap(params.wordfile)

model = models(We, params)

if params.loadmodel:
    base_params = cPickle.load(open(params.loadmodel, 'rb'))
    lasagne.layers.set_all_param_values(model.final_layer, base_params)

print " ".join(sys.argv)
print "Num examples:", len(data)

model.train(data, words, params)
Beispiel #19
0
    terrors = []
    verrors = []
    feats = []
    precisions = []
    f1s = []
    recalls = []
    minfeat = models[i][4]
    maxfeat = models[i][4] + 1
    step = 1000
    for j in range(minfeat, maxfeat, step):
        models[i][4] = j

        model = md.models(stoplist,
                          stoppunc,
                          ngram=models[i][1],
                          lowercase=models[i][2],
                          stopwords=models[i][3],
                          nfeats=models[i][4],
                          min_df=models[i][5])

        sentences = sentences[:10000]
        Y = Y[:10000]
        model.setpred(sentences_test)
        Terror, Verror, precision, recall, f1 = model.kfoldCV(
            sentences, Y, 5, models[i][7], classes, 1)
        print(models[i])
        print("Training Error = ", Terror)
        print("Validation Error = ", Verror)
        print(models[i][4])
        print("Precision", precision)
        print("Recall", recall)
Beispiel #20
0
    args.add_argument('--kernel_size', type=int, default=3)
    args.add_argument('--strides', type=int, default=2)

    args.add_argument('--max_features', type=int, default=251)

    args.add_argument('--batch_size', type=int, default=64)

    config = args.parse_args()

    DATA_PATH = '../data/nsmc-master/'
    DATASET_PATH = os.path.join(DATA_PATH, 'ratings_train.txt')
    TESTSET_PATH = os.path.join(DATA_PATH, 'ratings_test.txt')

    #model
    print("model creating...")
    model_builder = models()
    model = model_builder.get_model(config)
    model_name = config.model
    model.summary()

    # Train Mode
    if config.mode == 'train':
        print("data loading...")
        dataset = MovieReviewDataset(DATASET_PATH, config.strmaxlen)
        x_trn = np.array(dataset.reviews)
        y_trn = np.array(dataset.labels)

        x_tr, y_tr = shuffle(x_trn, y_trn, random_state=1991)

        # callbacks
        checkpoint = ModelCheckpoint('./{}_best.hdf5'.format(model_name),
Beispiel #21
0
from flask import Flask, json, render_template, request, redirect, Response, url_for
from models import models

app = Flask(__name__, template_folder="templates")

obj_mod = models()


@app.route("/")
def main():
    return render_template('page1.html')


@app.route("/page2", methods=['GET'])
def to_page2():
    datals = obj_mod.process_detail()
    return render_template('page2.html', size=len(datals), prod_list=datals)


@app.route("/page3", methods=['POST', 'GET'])
def to_page3():
    app.logger.debug("Masuk to page3: ")
    return render_template(('page3.html'))


@app.route('/submitProductLink', methods=['POST', 'GET'])
def submitProduct():
    app.logger.debug("Isi request form: {}".format(request.form))
    if request.method == 'POST':
        product_link = request.form['productLink']
        if product_link:
Beispiel #22
0
from keras.preprocessing.image import ImageDataGenerator

from models import models
from param import param

p = param()

scratch = models().scratch_net()
scratch.summary()

# train_data_generator = ImageDataGenerator(
#     rotation_range=40,
#     width_shift_range=.2,
#     height_shift_range=.2,
#     shear_range=.2,
#     zoom_range=.2,
#     horizontal_flip=True,
#     fill_mode='nearest'
# )

train_data_generator = ImageDataGenerator(
    rescale=1./255,
    shear_range=.2,
    zoom_range=.2,
    horizontal_flip=True
)

train_generator = train_data_generator.flow_from_directory(
    p.train_fold,
    target_size=(150, 150),
    batch_size=p.batch_size,
Beispiel #23
0
from rest import app
from flask import Flask, request
from models import atomizer,models,sensor,nozzle
import jsonpickle,datetime

app = Flask(__name__)

access_tokens = [None,None]

model = models()

model.atomizer = atomizer(False,1000,datetime.datetime.now(),True,100)

@app.route('/sensors', methods=['GET'])
def get_list():
    json = request.json

    return jsonpickle.encode(model, unpicklable=False)


@app.route('/sensors', methods=['POST'], )
def update_list():
    global model
    json = request.json

    print(json)
    print(json['time'])
    model.add_sensor(sensor(json['id'], json['type'], json['value'], str(datetime.datetime.now()).replace(' ', 'T')))

    return jsonpickle.encode(model, unpicklable=False)
Beispiel #24
0
import pandas as pd
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
from preprocessing import Preprocessing
Preprocessing = Preprocessing()
from models import models
models = models()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, auc, roc_curve, normalized_mutual_info_score


data = pd.read_csv('./data.csv',index_col=0)
data.drop(['patkey', 'index_date', 'MATCHID'], axis=1)
data['age_at_index'] = data['age_at_index']-5
data = Preprocessing.FeatureEncoding(data)
data = Preprocessing.MissingData(data)
data.to_csv('data_complete.csv')
data = pd.read_csv('./data_complete.csv',index_col=0)
#==========================================================================================
#After using the KNN to deal with missing data, count and plot the histogram of features
'''
print(data.loc[:,'Smoking_status'].value_counts())
print(data.loc[:,'BMI_group'].value_counts())
print(data.loc[:,'Alcohol_status'].value_counts())
    
def autolabel(rects):
    for rect in rects:
        height = rect.get_height()
        plt.text(rect.get_x()+rect.get_width()/2.-0.2, 1.03*height, '%s' % int(height))
Beispiel #25
0
                                 params,
                                 We_initial_lstm=We_lstm)
        else:
            print "Please enter a valid combination type. Exiting."
            sys.exit(0)

    print "Num examples:", len(data)
    print "Num n-grams:", len(words_3grams)
    print "Num words:", len(words_words)

    model.train(data, words_3grams, words_words, params)
else:
    if params.loadmodel:
        saved_params = cPickle.load(open(params.loadmodel, 'rb'))
        words = saved_params.pop(-1)
        model = models(saved_params[0], params)
        lasagne.layers.set_all_param_values(model.final_layer, saved_params)
    else:
        if params.wordtype == "words":
            if params.random_embs:
                words, We = utils.get_words(data, params)
            else:
                words, We = utils.get_wordmap(args.wordfile)
        else:
            words, We = utils.get_ngrams(data, params)

        model = models(We, params)

    print "Num examples:", len(data)
    print "Num words:", len(words)
    parser.add_argument("--DISCRET_muV", default=4, type=int,\
                        help="discretization of the 3d grid for muV")
    parser.add_argument("--DISCRET_sV", default=8, type=int,\
                        help="discretization of the 3d grid for sV")
    parser.add_argument("--DISCRET_TvN", default=4, type=int,\
                        help="discretization of the 3d grid for TvN")
    parser.add_argument("--RANGE_FOR_3D", type=float, default=[], nargs='+',\
                        help="possibility to explicitely set the 3D range scanned")
    parser.add_argument("--SUFFIX_NAME", default='',\
                        help="suffix to the file name in ../data/MODEL_*.npz")

    args = parser.parse_args()

    print args.RANGE_FOR_3D
    
    Mlist = models.models(args.MODEL)

    if args.WITH_TM_VARIATIONS:
        def make_sim(MODEL):
            print 'by default we vary Tm !!'
            make_simulation_for_model(MODEL, args, sampling=args.sampling)
            make_simulation_for_model(MODEL+'__minus', args, sampling=args.sampling)
            make_simulation_for_model(MODEL+'__plus', args, sampling=args.sampling)
    else:
        def make_sim(MODEL):
            make_simulation_for_model(MODEL, args, sampling=args.sampling)
        
    if Mlist is None: # means it is a single model
        make_sim(args.MODEL)
    else:
        for m in Mlist:
Beispiel #27
0
import models as mod
import knn as kk
cc = cs.csvclass()
ser = ser.services()
stop_wrods = ser.get_stop_wrods()
cc.load_csv_file('file.csv')
ans = cc.train_build()
pp = ex.explor(ans, stop_wrods)
pp.print_total()
pp.print_gender("male")
pp.print_gender("female")
pp.print_language_length("male")
pp.print_language_length("female")
tf_buid = tf.tfidfmodel(ans, stop_wrods)
top = tf_buid.extractTf()
run_mod = mod.models(top[0], top[1], top[2], top[3], top[4])
run_mod.start()
kkk = kk.knn(ans)
kkk.runKNN()
ff = cc.twitt_build("football_tweets.csv")
twit_diction = tf_buid.twit_build(ff)
tup = tf_buid.extracttwitttf(twit_diction)
cccc = run_mod.run_twiit_file(tup[1], "football_tweets.csv")
kkk.runKnnOnTwiit(ff, "football_tweets.csv")
ff = cc.twitt_build("baking_tweets.csv")
twit_diction = tf_buid.twit_build(ff)
tup = tf_buid.extracttwitttf(twit_diction)
cccc = run_mod.run_twiit_file(tup[1], "baking_tweets.csv")
kkk.runKnnOnTwiit(ff, "baking_tweets.csv")
ff = cc.twitt_build("no_subject_tweets.csv")
twit_diction = tf_buid.twit_build(ff)
Beispiel #28
0
Datei: main.py Projekt: db434/EWC
        4. permute: Start with the full dataset, and add permutations of the
                    pixels. Permutations are shared for each split.
    
    `splits` controls how many subsets to split the dataset into, with the given
    `epochs` being distributed evenly across them."""),
        formatter_class=argparse.RawTextHelpFormatter
    )

    training = parser.add_argument_group(title="Normal training")
    training.add_argument("--batch-size", type=int, default=256, metavar="N",
                          help="Number of inputs to process simultaneously")
    training.add_argument("--epochs", type=int, default=30, metavar="N",
                          help="Number of iterations through training dataset")
    training.add_argument("--learning-rate", type=float, default=0.001,
                          metavar="LR", help="Initial learning rate for Adam")
    training.add_argument("--model", type=str, choices=models.models(),
                          default="mlp", help="Neural network to train")
    training.add_argument("--dataset", type=str, choices=datasets.datasets(),
                          help="Dataset to use (default: specified by model)")

    inc = parser.add_argument_group(title="Incremental learning")
    inc.add_argument("--dataset-update", type=str,
                     choices=trainer.increment_options(), default="full",
                     help="How to change the dataset during training")
    inc.add_argument("--splits", type=int, default=5, metavar="N",
                     help="Number of dataset partitions (equally sized)")

    ewc = parser.add_argument_group(title="Elastic weight consolidation")
    ewc.add_argument("--ewc", action="store_true",
                     help="Use EWC to preserve accuracy on previous datasets")
    ewc.add_argument("--ewc-lambda", type=float, default=0.1, metavar="L",
        ax.plot(1e3*t, 1e3*va1, 'k:')
        ax.plot(1e3*t, 1e3*va2+30, 'k:')
    ax.plot([1e3*t[0], 1e3*t[0]], [1e3*v1[0], 1e3*v2[0]+30], 'k>', ms=10)
    ax.annotate('-70mV', (.1,.3), xycoords='axes fraction')


    # plt.tight_layout()
    ax.plot([10,10],[-25,-15], 'gray', lw=3)
    ax.plot([10,60],[-25,-25], 'gray', lw=3)
    ax.annotate('10mV', (16,-10), textcoords='data', size=13)
    ax.annotate(str(int(abs(1e12*I0)))+'pA', (16,-20), textcoords='data',size=13)
    ax.annotate('50ms', (17,-40), textcoords='data', size=13)
    ax.annotate('-70mV', (-10, -70), textcoords='data', size=13)


if __name__=='__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('MODEL', default='LIF')
    parser.add_argument('--I0', type=float, default=100., help='value of current input')
    args = parser.parse_args()

    if args.MODEL!='full':
        fig = make_model_figure(args.MODEL, I0=args.I0*1e-12, savefig=False)
        plt.show()
    else:
        for m in models.models('all_models'):
            fig = make_model_figure(m, I0=args.I0*1e-12, savefig=True)


    ax.plot(1e3 * t, 1e3 * v2 + 30, color=color2)
    if (MODEL1.split("-")[0] == "iAdExp") or (MODEL1.split("-")[0] == "iLIF"):
        ax.plot(1e3 * t, 1e3 * va1, "k:")
        ax.plot(1e3 * t, 1e3 * va2 + 30, "k:")
    ax.plot([1e3 * t[0], 1e3 * t[0]], [1e3 * v1[0], 1e3 * v2[0] + 30], "k>", ms=10)
    ax.annotate("-70mV", (0.1, 0.3), xycoords="axes fraction")

    # plt.tight_layout()
    ax.plot([10, 10], [-25, -15], "gray", lw=3)
    ax.plot([10, 60], [-25, -25], "gray", lw=3)
    ax.annotate("10mV", (16, -10), textcoords="data", size=13)
    ax.annotate(str(int(abs(1e12 * I0))) + "pA", (16, -20), textcoords="data", size=13)
    ax.annotate("50ms", (17, -40), textcoords="data", size=13)
    ax.annotate("-70mV", (-10, -70), textcoords="data", size=13)


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("MODEL", default="LIF")
    parser.add_argument("--I0", type=float, default=100.0, help="value of current input")
    args = parser.parse_args()

    if args.MODEL != "full":
        fig = make_model_figure(args.MODEL, I0=args.I0 * 1e-12, savefig=False)
        plt.show()
    else:
        for m in models.models("all_models"):
            fig = make_model_figure(m, I0=args.I0 * 1e-12, savefig=True)
    def build(self):

        # load training df
        self.tdf = utils.load_training_data(self.dataloc, stage=2)

        # drop missing image
        drop_idx = [
            i for i, row in self.tdf['filename'].iteritems()
            if fnmatch.fnmatch(row, '*ID_33fcf4d99*')
        ]
        self.tdf = self.tdf.drop(drop_idx)

        # set up training fraction
        ## train and validate dataframes
        shuff = self.tdf.sample(frac=self.training_fraction,
                                random_state=self.random_state)
        self.train_df = shuff.iloc[:int(0.90 * len(shuff))]
        self.validate_df = shuff.iloc[int(0.90 * len(shuff)):]
        len(shuff), len(self.train_df), len(self.validate_df)

        # set up generators
        self.categories = utils.define_categories(self.train_df)

        self.train_generator = utils.Three_Channel_Generator(
            self.train_df.reset_index(),
            ycols=self.categories,
            desired_size=self.img_size,
            batch_size=self.batch_size,
            random_transform=self.random_transform,
            rgb=True)

        self.validate_generator = utils.Three_Channel_Generator(
            self.validate_df.reset_index(),
            ycols=self.categories,
            desired_size=self.img_size,
            batch_size=self.batch_size,
            random_transform=False,
            rgb=True)

        # load model
        self.model = models(self.model_name,
                            input_image_size=self.img_size,
                            number_of_output_categories=len(self.categories))

        if self.weights_path is not None:
            self.model.load_weights(self.weights_path)

        # setup callbacks
        earlystop = EarlyStopping(patience=10)

        learning_rate_reduction = ReduceLROnPlateau(
            monitor='categorical_accuracy',
            patience=2,
            verbose=1,
            factor=0.5,
            min_lr=0.00001)

        checkpoint_name = "model_weights_vgg19_{}.h5".format(self.datestamp)
        checkpoint = ModelCheckpoint(checkpoint_name,
                                     monitor='val_acc',
                                     verbose=0,
                                     save_best_only=True,
                                     save_weights_only=True,
                                     mode='auto')

        self.callbacks = [earlystop, checkpoint]
	terrors=[]
	verrors=[]
	feats=[]
	precisions=[]
	f1s=[]
	recalls=[]
	minfeat=1
	maxfeat=6
	lr=0.1
	step=1
	for j in range(minfeat,maxfeat,step):
		#hiddenl = "(100"
		#for h in range(1,j):
		#	hiddenl = hiddenl + ",100"
		#hiddenl = hiddenl + ")"
		model=md.models()
		Terror,Verror,precision,recall,f1 = model.kfoldCV(X,Y,models[i][2],classes,0)
		print(models[i])
		print("Training Error = ",Terror)
		print("Validation Error = ",Verror)
		#precision=np.array(precision)
		#recall=np.array(recall)		
		print("Avg Precision",np.mean(precision))
		print("Avg Recall",np.mean(recall))
		print("Avg F1",np.mean(f1))
		terrors.append(Terror)
		verrors.append(Verror)
		precisions.append(precision)
		recalls.append(recall)
		f1s.append(f1)
		models[i][1]=lr