Exemple #1
0
    def __init__(self, inputargs):
        self.graph = Graph(InputParser(inputargs.inputfile).graph_output())
        self.k = inputargs.k[0]

        if inputargs.all:
            self.simplify = True
            self.communities = True
            self.hubs = True
        else:
            self.simplify = inputargs.simplify
            self.communities = inputargs.communities
            self.hubs = inputargs.hubs

        self._launch_test()
Exemple #2
0
 def addToFamily(self, line):
     try:
         parsed = InputParser.parse_line(line)
         name1 = parsed[0]
         relation = parsed[1]
         gender = parsed[2]
         name2 = None
         if parsed[3]:
             name2 = parsed[3]
         if self.family_tree.addToFamily(name1, relation, gender, name2):
             return Status.ADD_SUCCESS
         else:
             return Status.ADD_FAILED
     except Exception as e:
         return Status.EXCEPTION + str(e)
def main():
    value = input(
        'You can type command: \n'
        '@{name} - to get events from provided category. Example: @update\n'
        '#{name} - to get events intended to some recipient. Example: #john\n'
        'latest{N} - to get latest N events\n'
        '------------------------------------------\n')

    try:
        event_or_filter = InputParser.parse(value)
    except Exception as e:
        print(str(e))
        print('----------------------------------')
        return main()

    if (isinstance(event_or_filter, Filter)):
        events = manager.get_by(event_or_filter)
        print('-----------------------------')
        print(events)
        print('-----------------------------')
    else:
        manager.append(event_or_filter)

    return main()
Exemple #4
0
from graph import Graph
from parser import InputParser


if __name__ == "__main__":
    print("-"*100)
    print("Testing Graph Simplification: ")

    graph1 = Graph(InputParser("oursimplificationtest1.txt").graph_output())
    print("-"*50)
    print("Test 1: ")
    print("Original Graph:")
    graph1.print_graph()
    graph1.simplify_debts()
    print("\nGraph after simplification:")
    graph1.print_graph()
    print()

    graph1 = Graph(InputParser("oursimplificationtest2.txt").graph_output())
    print("-"*50)
    print("Test 2: ")
    print("Original Graph:")
    graph1.print_graph()
    graph1.simplify_debts()
    print("\nGraph after simplification:")
    graph1.print_graph()
    print("\n\n")



Exemple #5
0
path = "./metu.txt"

def split_training_data_to_test_data():
    data_set = list()
    for line in open(path):
        data_set.append(line.strip())

    # random.shuffle(data_set)
    return data_set[:3960], data_set[3960:]


if __name__ == '__main__':

    train_set, test_set = split_training_data_to_test_data()

    parser = InputParser(train_set)
    transition_counts = parser.get_transition_counts()
    emission_counts, corpus = parser.get_emission_counts()

    hmm_builder = HMMBuilder(transition_counts, emission_counts)
    transition_probability = hmm_builder.build_transition_probability()
    # emission probabilities were calculated by smoothing manually in the Viterbi class.

    # frequency of words with frequency 1 is only observed once
    once_words = hmm_builder.get_only_once_words()
    # total number of tags
    state_size = len(transition_probability.keys())
    # only tag labels for backtracking
    tag_labels = list(transition_probability.keys())
    # called k or alpha which will be used in add-k smoothing
    alpha = 0.5
Exemple #6
0
    token_increment = 10
    token_init = 10
    batch_size = 1024
    epochs = 100
    train_test_ratio = 0.9
    use_lm = False
    lstm = True

    physical_devices = tf.config.list_physical_devices('GPU')
    try:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)
    except:
        print('No GPU found by TensorFlow')
        exit(0)

    parser = InputParser(f'data_nmr/HA_nmr.inp',
                        input_type=input_type, token_increment=token_increment, token_init=token_init)
    data = parser.parse_input()
    norm = DataNormalizer(data)
    print(data[:10])
    norm_data = norm.get_normalized_dateset()
    X, Y = parser.split_input_and_output(data)
    X_train, Y_train, X_test, Y_test = split_train_test(X, Y, ratio=train_test_ratio)

    print('Data sample')
    for i in range (0, 20):
        print(f'Input: {X[i]} ### Output {Y[i]}')
    print(f'Train data count: {len(X_train)}\n Test data count: {len(X_test)}')
    
    start = time.time()
    if lstm:
        X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
Exemple #7
0
import operator
import numpy as np
from hmm_builder import HMMBuilder
from parser import InputParser
from viterbi import Viterbi
from test_handler import TestHandler
import random

input_path = "./TrainingDataset.txt"

if __name__ == '__main__':

    parser = InputParser(input_path)
    parser.parse_data()

    transition_counts = parser.get_transition_counts()

    emission_counts, corpus = parser.get_emission_counts()

    print("Transition_counts:", transition_counts)
    print("\nEmission_counts:", emission_counts)

    print("\nCorpus:", corpus)

    hmm_builder = HMMBuilder(transition_counts, emission_counts)
    transition_probability = hmm_builder.build_transition_probability()

    transition_probability = hmm_builder.normalize(transition_probability)

    print(transition_probability)
    # emission probabilities were calculated by smoothing manually in the Viterbi class.
from parser import InputParser
from tensorflow import keras
from time import time
import numpy as np

if __name__ == "__main__":
    start = time()

    input_type = 'B'
    token_increment = 10
    token_init = 10
    parser = InputParser(f'proteins/4r3o' + 'HB',
                         input_type=input_type,
                         token_increment=token_increment,
                         token_init=token_init)
    data_ = parser.parse_input()
    data = [x for x in data_ if x != []]

    X, _ = parser.split_input_and_output(data)

    X = np.array(X, dtype=np.float32)
    X = np.reshape(X, (X.shape[0], 1, X.shape[1]))

    data_load_time = time() - start
    start = time()

    model = keras.models.load_model(f'models/B_LSTM.h5')

    model_load_time = time() - start
    start = time()
from parser import InputParser
from model import model_A, model_lstm
from utils import split_train_test
from tensorflow import keras
import numpy as np
   
if __name__== "__main__":
    input_type = 'A'
    token_increment = 10
    token_init = 10
    train_test_ratio = 0.9
    parser = InputParser(f'data_extended/HA_All.inp',
                        input_type=input_type, token_increment=token_increment, token_init=token_init)
    data_ = parser.parse_input()
    data = [x for x in data_ if x !=[]]

    X, Y = parser.split_input_and_output(data)

    Y = np.array(Y, dtype=np.float32)
    X = np.array(X, dtype=np.float32)

    model = keras.models.load_model(f'models/B_FF.h5')

    predicted = model.predict(X)

    with open(f'results/B_FF.txt', 'w') as f:
            for i in range(0, len(X)):
                f.write(f'{Y[i][0]} {Y[i][1]} {Y[i][2]} {predicted[i][0]} {predicted[i][1]} {predicted[i][2]}\n')