コード例 #1
0
ファイル: main.py プロジェクト: Obo-project/obo-project
def callback(recognizer, audio):
	try:
		sents = recognizer.recognize_google(audio , language = "en")
		print(sents)
		sents = precompute(sents)
		print(sents)
		for relation in listeRelation:
		    rels = relation.extract(sents)

		    for rel in rels:
		        rel.post();

	except sr.UnknownValueError:
		print("Google Speech Recognition could not understand audio")
	except sr.RequestError:
		print("Could not request results from Google Speech Recognition service")
コード例 #2
0
import speech_recognition as sr
import nltk

from server import post_request
from precompute import precompute
from supportedRelations import *
from supportedRelations import listeRelation, dic


# sents = "The capital of France is Paris."
# sents = "France's capital is Paris."
# sents = "France's capital is Palaiseau."
# sents = "Paris is the capital of France."
# sents = "There are 70 million people in France."
# sents = "There are more than 10 million people in France."
# sents = "There are less than 10 million people in France."
# sents = "population density of France is 100 inhabitants per square kilometer"
# sents = "The gdp in France is 10 billion dollars"
# sents = "France's import is 10 billion dollars"
sents = "France has population density greater than 100 people per kilometer square"

print("Analysed sentence : ", sents)
sents = precompute(sents)
print("Precomputed sentence : ", sents)

for relation in listeRelation:
    rels = relation.extract(sents)

    for rel in rels:
        rel.post();
コード例 #3
0
    params_file_name = "{}_params_{}_{}_{}.npy".format(file_name, NUM_FOV, NUM_H, NUM_V)

    t1 = time.time()
    if os.path.exists(params_file_name):
        params = np.load(params_file_name)
    else:
        params = get_params(MIN_FOV, MAX_FOV, NUM_FOV, NUM_H, NUM_V)
        np.save(params_file_name, params)
    t2 = time.time()
    print(params.shape)
    print("Got params")
    table_file_name = "{}_table_{}_{}_{}_{}_{}.npy".format(file_name, table_type, SAMPLE_RATE, NUM_FOV, NUM_H, NUM_V)
    if os.path.exists(table_file_name):
        heat_table = np.load(table_file_name)
    elif table_type == 0:
        heat_table = precompute(data, file_name, SAMPLE_RATE, params, write_masks=True)
        np.save(file_name, heat_table)
    elif table_type == 1:
        temp_name = "{}_table_{}_{}_{}_{}_{}.npy".format(file_name, REGULAR, SAMPLE_RATE, NUM_FOV, NUM_H, NUM_V)
        if os.path.exists(temp_name):
            print("triggered")
            heat_table = np.load(temp_name)
        else:
            heat_table = precompute(data, file_name, SAMPLE_RATE, params, write_masks=True)
            np.save(temp_name, heat_table)
        heat_table = normalize_table(heat_table)
        np.save(table_file_name, heat_table)
    
    t3 = time.time()

    print("Parameter time: {}".format(t2 - t1))
コード例 #4
0
  if process_type == "OPF+OPF" or process_type == "OPF+SVM":
    distance_type = str(data[7])
    distance_param = str(data[8])

  print "Num K: ", num_k
  print "Thumbnail size: ", thumbnail_size
  print "Feature type: ", feature_type
  print "Descriptor type: ", descriptor_type
  print "Precomputing descriptors"

  pool = None
  if feature_type == "OVERFEAT" or descriptor_type == "OVERFEAT":
    pool = multiprocessing.Pool(processes=1)
  else:  
    pool = multiprocessing.Pool()
  precompute.precompute(train_path, test_path, feature_type, descriptor_type, thumbnail_size, pool)
  pool.close()
  pool.terminate()
  
  if process_type == "KMeans+SVM":
     process_kmeans_svm(num_k=num_k, n_sample_images=n_sample_images, 
      n_sample_descriptors=n_sample_descriptors, thumbnail_size=thumbnail_size,
      feature_type=feature_type, descriptor_type=descriptor_type)
  elif process_type == "KMeans+OPF":
    process_kmeans_opf(num_k=num_k, n_sample_images=n_sample_images, 
        n_sample_descriptors=n_sample_descriptors, thumbnail_size=thumbnail_size,
        feature_type=feature_type, descriptor_type=descriptor_type)
  elif process_type == "OPF+OPF":
    process_opf_opf(num_k=num_k, n_sample_images=n_sample_images, 
        n_sample_descriptors=n_sample_descriptors, thumbnail_size=thumbnail_size,
        feature_type=feature_type, descriptor_type=descriptor_type,
コード例 #5
0
    if process_type == "OPF+OPF" or process_type == "OPF+SVM":
        distance_type = str(data[7])
        distance_param = str(data[8])

    print "Num K: ", num_k
    print "Thumbnail size: ", thumbnail_size
    print "Feature type: ", feature_type
    print "Descriptor type: ", descriptor_type
    print "Precomputing descriptors"

    pool = None
    if feature_type == "OVERFEAT" or descriptor_type == "OVERFEAT":
        pool = multiprocessing.Pool(processes=1)
    else:
        pool = multiprocessing.Pool()
    precompute.precompute(train_path, test_path, feature_type, descriptor_type,
                          thumbnail_size, pool)
    pool.close()
    pool.terminate()

    if process_type == "KMeans+SVM":
        process_kmeans_svm(num_k=num_k,
                           n_sample_images=n_sample_images,
                           n_sample_descriptors=n_sample_descriptors,
                           thumbnail_size=thumbnail_size,
                           feature_type=feature_type,
                           descriptor_type=descriptor_type)
    elif process_type == "KMeans+OPF":
        process_kmeans_opf(num_k=num_k,
                           n_sample_images=n_sample_images,
                           n_sample_descriptors=n_sample_descriptors,
                           thumbnail_size=thumbnail_size,