def create_sample_file(number_of_events=100): csv_writer = csv.writer(open(os.path.dirname(os.path.abspath(__file__)) + '/data.csv~', 'w+')) temporal_events = generate_random_events(number_of_events) for i in xrange(10): for A in temporal_events: for B in temporal_events: for C in temporal_events: csv_writer.writerow((A * B).to_list() + (B * C).to_list() + (A * C).to_list())
def create_sample_file(number_of_events=100): csv_writer = csv.writer( open(os.path.dirname(os.path.abspath(__file__)) + '/data.csv~', 'w+')) temporal_events = generate_random_events(number_of_events) for i in xrange(10): for A in temporal_events: for B in temporal_events: for C in temporal_events: csv_writer.writerow((A * B).to_list() + (B * C).to_list() + (A * C).to_list())
def create_sample_file(size=100000): csv_writer = csv.writer(open(os.path.dirname(os.path.abspath(__file__)) + '/data.csv~', 'w+')) for i in xrange(size): if i % (size / 200.0) == 0: print '%{0} complete'.format(float(i) / size * 100) A, B, C = generate_random_events(3) row = (A * B).to_list() + (B * C).to_list() + (A * C).to_list() csv_writer.writerow(row) del A del B del C del row
def create_sample_file(size=100000): csv_writer = csv.writer( open(os.path.dirname(os.path.abspath(__file__)) + '/data.csv~', 'w+')) for i in xrange(size): if i % (size / 200.0) == 0: print '%{0} complete'.format(float(i) / size * 100) A, B, C = generate_random_events(3) row = (A * B).to_list() + (B * C).to_list() + (A * C).to_list() csv_writer.writerow(row) del A del B del C del row
def learn(size=10000): train_x, train_y = read_data(size) from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from random import randrange predicate_index = TemporalRelation.all_relations.index('p') clf = KNeighborsRegressor() clf.fit(train_x, train_y[:, predicate_index]) for i in xrange(10): A, B, C = generate_random_events(3) print 'learning', clf.predict((A * B).to_list() + (B * C).to_list()) print 'actual', (A * C).to_list()[predicate_index], '\n-------------\n'
def learn(size=10000): train_x, train_y = read_data(size) from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from random import randrange predicate_index = TemporalRelation.all_relations.index('p') clf = KNeighborsRegressor() clf.fit(train_x, train_y[:, predicate_index]) for i in xrange(10): A, B, C = generate_random_events(3) print 'learning', clf.predict((A * B).to_list() + (B * C).to_list()) print 'actual', (A * C).to_list()[predicate_index], '\n-------------\n'
def get_composition_data(self): data = [] for key in self.combinations: before, same, after = self.compare(*key) data.append(before) data.append(same) return data def check(self): from spatiotemporal.temporal_events import FormulaCreator print self.data print FormulaCreator(self).calculate_relations().to_vector() print if __name__ == "__main__": from spatiotemporal.temporal_events import FormulaCreator from spatiotemporal.temporal_events.trapezium import generate_random_events for i in xrange(50): A, B = generate_random_events(2) relations = A * B formula = FormulaCreator(DecompositionFitter(relations)) print relations.to_list() relations_estimate = formula.calculate_relations() print relations_estimate.to_list() print relations.to_vector() - relations_estimate.to_vector() print
# If not, we return the solutions with their corresponding truth value # Some solution might be repeated, we only pass one instance of these solution but # add up their truth values for that single instance, we do this by some dict tricks for railway_system in solutions: railway_system.compress() A = convert_rail_to_trapezium_event(railway_system, 'A') C = convert_rail_to_trapezium_event(railway_system, 'C') solution = A * C if solution in strengths_by_solution: strengths_by_solution[solution] += strength_per_solution else: strengths_by_solution[solution] = strength_per_solution result = [] for solution in strengths_by_solution: strength = strengths_by_solution[solution] result.append((solution, strength)) return result, strength_total if __name__ == '__main__': A, B, C = generate_random_events(3) print 'actual', (A * C).to_list() print solutions, strength_total = compose(A * B, B * C) print 'Truth Value Total:', strength_total for relation_a_c, strength in solutions: print relation_a_c.to_list(), 'Truth Value:', strength
beginning = railway_system[rail_key][0].b ending = railway_system[rail_key][1].a b = railway_system[rail_key][1].b return TemporalEventTrapezium(a, b, beginning, ending) if __name__ == "__main__": from spatiotemporal.temporal_events.trapezium import generate_random_events from spatiotemporal.temporal_events.util import compute_railway_strength import numpy from spatiotemporal.temporal_events import RelationFormulaConvolution search_tree = DepthFirstSearchComposition() formula = RelationFormulaConvolution() A, B, C = generate_random_events(3) for event in [A, B, C]: p = "" for point in [event.a, event.b, event.beginning, event.ending]: p += str((point - A.a) / (A.beginning - A.a)) + ", " print p # A = TemporalEventTrapezium(0, 30, 10, 20) # B = TemporalEventTrapezium(1, 9, 2, 8) # C = TemporalEventTrapezium(0, 30, 10, 20) actual_solution = (A * C).to_vector() print "Actual\n", actual_solution goal = [] events = {"A": A, "B": B, "C": C}
data = [] for key in self.combinations: before, same, after = self.compare(*key) data.append(before) data.append(same) return data def check(self): from spatiotemporal.temporal_events import FormulaCreator print self.data print FormulaCreator(self).calculate_relations().to_vector() print if __name__ == '__main__': from spatiotemporal.temporal_events import FormulaCreator from spatiotemporal.temporal_events.trapezium import generate_random_events for i in xrange(50): A, B = generate_random_events(2) relations = A * B print relations.to_list() # from the 13 relations, learns parameters for all combinations of the # before, same, and after relationships between the beginning and # ending distributions of the two intervals formula = FormulaCreator(DecompositionFitter(relations)) # from these relationships, computes the 13 relations again relations_estimate = formula.calculate_relations() print relations_estimate.to_list() print relations.to_vector() - relations_estimate.to_vector() print