Esempio n. 1
0
def cli():
    if args.recommend:
        recommendation.start(args.meme)
    if args.force_index:
        index_data.start(args.force_index)
    if args.search:
        searchp.start(args)
    if args.preprocess:
        preprocess.start(args)
    if args.generate:
        meme_generator.start(args)
Esempio n. 2
0
def cli():
    #uses command line args to invoke sevices
    if args.recommend:
        recommendation.start(args.meme)
    if args.force_index:
        index_data.start(args.force_index)
    if args.search:
        searchp.start(args)
    if args.preprocess:
        preprocess.start(args)
    if args.generate:
        meme_generator.start(args)
Esempio n. 3
0
def run_experiment(args):
    """run training and save to wandb"""
    wandb.init(project=args.project_name)
    sample_rate = args.sampling_rate
    X, y_labels = preprocess.start("Intra",
                                   sample_rate,
                                   test_path_folder=None,
                                   generator=False)

    model = build_model(X[0].shape,
                        conv_1_size=args.conv_1_size,
                        batch_norm=args.batch_norm,
                        conv_2_size=args.conv_2_size,
                        kernel_size=args.kernel_size,
                        layer_1_size=args.layer_1_size,
                        dropout=args.dropout,
                        layer_2_size=args.layer_2_size,
                        optimizer=args.optimizer,
                        learning_rate=args.learning_rate,
                        weight_initializers=args.weight_initializers)
    # log all values of interest to wandb
    wandb.config.update(args)

    dictionary_metrics = {out: [] for i, out in enumerate(model.metrics_names)}
    skf = StratifiedKFold(n_splits=5)
    for train_index, test_index in skf.split(X, y_labels):
        y = preprocess.convert_y_labels_to_hot_vectors(y_labels)
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        model.fit(X_train,
                  y_train,
                  epochs=args.epochs,
                  batch_size=args.batch_size,
                  verbose=1)
        metrics = model.evaluate(X_test, y_test)
        for i, metrics_names in enumerate(model.metrics_names):
            dictionary_metrics[metrics_names].append(metrics[i])

    for key in dictionary_metrics:
        dictionary_metrics[key] = np.array(dictionary_metrics[key]).mean()

    wandb.log(dictionary_metrics)
def run_experiment(args):
    """run training and save to wandb"""
    wandb.init(project=args.project_name)

    sample_rate = args.sampling_rate
    X, y_labels = preprocess.start(
        "Cross", sample_rate, test_path_folder=None, generator=False)
    X_train, X_val, y_train, y_val = train_test_split(
        X, y_labels, test_size=0.06, random_state=42, stratify=y_labels)
    y_train = preprocess.convert_y_labels_to_hot_vectors(y_train)
    y_val = preprocess.convert_y_labels_to_hot_vectors(y_val)
    X_train = preprocess.outliers_removal(X_train)
    # First head configuration
    kernel_size_head_1 = int(X_train[1] * 0.05)
    strides_head_1 = int(kernel_size_head_1/2)

    # Second head configuration
    kernel_size_head_2 = int(X_train[1] * 0.25)
    strides_head_2 = int(kernel_size_head_2/2)

    wandb.config.update(args)
    indices_train = np.arange(X_train.shape[0])
    indices_val = np.arange(X_val.shape[0])
    np.random.shuffle(indices_train)
    np.random.shuffle(indices_val)

    X_train = X_train[indices_train]
    y_train = y_train[indices_train]

    X_val = X_train[indices_val]
    y_val = y_train[indices_val]
    model = build_model(X_train, y_train, X_val, y_val, conv_1_size=args.conv_1_size, conv_2_size=args.conv_2_size, kernel_size_head_1=kernel_size_head_1,  strides_head_1=strides_head_1, kernel_size_head_2=kernel_size_head_2,
                        strides_head_2=strides_head_2, layer_1_size=args.layer_1_size, dropout_conv=args.dropout_conv, dropout_dense=args.dropout_dense, layer_2_size=args.layer_2_size, optimizer=args.optimizer, learning_rate=args.learning_rate, weight_initializers=args.weight_initializers)
    # log all values of interest to wandb
    model.fit([X_train, X_train], y_train, validation_data=([X_val, X_val], y_val), epochs=args.epochs,
              batch_size=args.batch_size, callbacks=[WandbCallback(monitor="val_loss", mode="min")], verbose=1)
Esempio n. 5
0
			camera.capture(image_path[count])
			print("mengambil gambar", count+1, " selesai")
			GPIO.output(flash, GPIO.LOW)
			count += 1
			start = False

	print('Mengambil gambar selesai')
	bunyi(buzz, 2)
	stop = False

	waktu_mulai = time.clock()
	pic = 0
	text = ""
	while pic < count:
		im = Image.open(image_path[pic])
		im = preprocess.start(im)

		temp = pytesseract.image_to_string(im, lang='ind', config='--psm 6')
		temp = temp.replace('-', ' ')
		temp = temp.replace('\n', ' ')
		temp = re.sub(r'[^a-zA-Z0-9 ]','', temp)
		text = text + temp
		#im.close()
		pic += 1

	print(text)

	bunyi(buzz, 3)
	print("INFO:  Saying Text")
	print('Total huruf : ', len(text))
	selesai = time.clock() - waktu_mulai
Esempio n. 6
0
        feature_kappas_LDAC[feature] = calculate_kappa(grade_dict,
                                                       LDAC_grades[feature])

    return (feature_kappas_SVM, feature_kappas_LDAC)


if __name__ == '__main__':
    global kappa_all_features
    #if not path.exists("cache\\SVM_features_kappas.pickle") or not path.exists("cache\\LDAC_features_kappas.pickle"):
    #    experiment.start_experiment(teacher_grades=grade_dict, essays=essay_collection)
    if not path.exists("cache\\SVM_model.pickle") or not path.exists(
            "cache\\LDAC_model.pickle"):
        if path.exists("cache\\essays.pickle"):
            essay_collection = cache_manager.read_essays()
        else:
            essay_collection = preprocess.start()
            cache_manager.cache_essays(essay_collection)
        if path.exists("cache\\grades.pickle"):
            grade_dict = cache_manager.read_grades()
        else:
            grade_dict = cache_manager.input_grades(essay_collection)
        train_all_features()
        feature_grades = train_each_feature()
        feature_kappas = evaluate_each_feature(feature_grades[0],
                                               feature_grades[1])
        printing.print_feature_kappas(feature_kappas[0], feature_kappas[1])
    else:
        svm = cache_manager.read_classifier()
        ldac = cache_manager.read_classifier("LDAC_model.pickle")
        essay_vector = preprocess.start(normalise=True, path="essays_to_test/")
        if (len(essay_vector) >= 1):