Esempio n. 1
0
 def __init__(self, coordinates, radius):
   self.coordinates = coordinates
   self.radius = radius
   self.dates = Dates(coordinates)
   print("before_int")
   self.dates.make_table_with_coordinates_and_dates()
   print("finish_init")
Esempio n. 2
0
    def __init__(self, config):
        self.preferred_teams = config.preferred_teams
        self.include_mlb = config.news_ticker_mlb_news
        self.include_preferred = config.news_ticker_preferred_teams
        self.include_traderumors = config.news_ticker_traderumors
        self.include_countdowns = config.news_ticker_countdowns
        self.include_date = config.news_ticker_date
        self.date_format = config.news_ticker_date_format
        self.feed_urls = []
        self.feed_data = None
        self.starttime = time.time()
        self.important_dates = Dates()

        self.__compile_feed_list()
        self.update(True)
Esempio n. 3
0
  def __init__(self, config):
    self.preferred_teams = config.preferred_teams
    self.include_mlb = config.news_ticker_mlb_news
    self.include_preferred = config.news_ticker_preferred_teams
    self.include_traderumors = config.news_ticker_traderumors
    self.include_countdowns = config.news_ticker_countdowns
    self.include_date = config.news_ticker_date
    self.date_format = config.news_ticker_date_format
    self.feed_urls = []
    self.feed_data = None
    self.starttime = time.time()
    self.important_dates = Dates()

    self.__compile_feed_list()
    self.update(True)
Esempio n. 4
0
class Headlines:
    def __init__(self, config):
        self.preferred_teams = config.preferred_teams
        self.include_mlb = config.news_ticker_mlb_news
        self.include_preferred = config.news_ticker_preferred_teams
        self.include_traderumors = config.news_ticker_traderumors
        self.include_countdowns = config.news_ticker_countdowns
        self.include_date = config.news_ticker_date
        self.date_format = config.news_ticker_date_format
        self.feed_urls = []
        self.feed_data = None
        self.starttime = time.time()
        self.important_dates = Dates()

        self.__compile_feed_list()
        self.update(True)

    def update(self, force=False):
        if force == True or self.__should_update():
            debug.log("Headlines should update!")
            self.starttime = time.time()
            feeds = []
            debug.log("{} feeds to update...".format(len(self.feed_urls)))
            feedparser.USER_AGENT = "mlb-led-scoreboard/3.0 +https://github.com/MLB-LED-Scoreboard/mlb-led-scoreboard"
            if len(self.feed_urls) > 0:
                debug.log("Feed URLs found...")
                for idx, url in enumerate(self.feed_urls):
                    if idx < HEADLINE_MAX_FEEDS:  # Only parse MAX teams to prevent potential hangs
                        debug.log("Fetching {}".format(url))
                        f = feedparser.parse(url)
                        try:
                            title = f.feed.title.encode("ascii", "ignore")
                            debug.log(
                                "Fetched feed '{}' with {} entries.".format(
                                    title, len(f.entries)))
                            feeds.append(f)
                        except AttributeError:
                            debug.warning(
                                "There was a problem fetching {}".format(url))
                self.feed_data = feeds

    def ticker_string(self, max_entries=HEADLINE_MAX_ENTRIES):
        ticker = ""
        if self.include_date:
            date_string = datetime.now().strftime(self.date_format)
            ticker = self.__add_string_to_ticker(ticker, date_string)

        if self.include_countdowns:
            countdown_string = self.important_dates.next_important_date_string(
            )

            # If we get None back from this method, we don't have an important date coming soon
            if countdown_string is not None:
                ticker = self.__add_string_to_ticker(ticker, countdown_string)

        if self.feed_data != None:
            ticker = self.__add_string_to_ticker(ticker, "")
            for feed in self.feed_data:
                ticker += self.__strings_for_feed(feed, max_entries)

        # In case all of the ticker options are turned off and there's no data, return the date
        return datetime.now().strftime(
            FALLBACK_DATE_FORMAT) if len(ticker) < 1 else ticker

    def __add_string_to_ticker(self, ticker, text_to_add):
        t = ticker
        if len(t) > 0:
            t += (" " * HEADLINE_SPACER_SIZE)
        return (t + text_to_add)

    def available(self):
        return self.feed_data != None

    def __strings_for_feed(self, feed, max_entries):
        spaces = " " * HEADLINE_SPACER_SIZE
        title = feed.feed.title.encode("ascii", "ignore")
        headlines = ""

        for idx, entry in enumerate(feed.entries):
            if idx < max_entries:
                h = HTMLParser()
                text = h.unescape(entry.title.encode("ascii", "ignore"))
                headlines += text + spaces
        return title + spaces + headlines

    def __compile_feed_list(self):
        if self.include_mlb:
            self.feed_urls.append(self.__mlb_url_for_team("MLB"))

        if self.include_preferred:
            if len(self.preferred_teams) > 0:
                for team in self.preferred_teams:
                    self.feed_urls.append(self.__mlb_url_for_team(team))

        if self.include_traderumors:
            if len(self.preferred_teams) > 0:
                for team in self.preferred_teams:
                    self.feed_urls.append(
                        self.__traderumors_url_for_team(team))

    def __mlb_url_for_team(self, team_name):
        return "{}/{}.xml".format(MLB_BASE, MLB_FEEDS[team_name])

    def __traderumors_url_for_team(self, team_name):
        return "{}/{}/{}".format(TRADE_BASE, TRADE_FEEDS[team_name],
                                 TRADE_PATH)

    def __should_update(self):
        endtime = time.time()
        time_delta = endtime - self.starttime
        return time_delta >= HEADLINE_UPDATE_RATE
Esempio n. 5
0
class Headlines:

  def __init__(self, config):
    self.preferred_teams = config.preferred_teams
    self.include_mlb = config.news_ticker_mlb_news
    self.include_preferred = config.news_ticker_preferred_teams
    self.include_traderumors = config.news_ticker_traderumors
    self.include_countdowns = config.news_ticker_countdowns
    self.include_date = config.news_ticker_date
    self.date_format = config.news_ticker_date_format
    self.feed_urls = []
    self.feed_data = None
    self.starttime = time.time()
    self.important_dates = Dates()

    self.__compile_feed_list()
    self.update(True)

  def update(self, force=False):
    if force == True or self.__should_update():
      debug.log("Headlines should update!")
      self.starttime = time.time()
      feeds = []
      debug.log("{} feeds to update...".format(len(self.feed_urls)))
      feedparser.USER_AGENT = "mlb-led-scoreboard/3.0 +https://github.com/MLB-LED-Scoreboard/mlb-led-scoreboard"
      if len(self.feed_urls) > 0:
        debug.log("Feed URLs found...")
        for idx, url in enumerate(self.feed_urls):
          if idx < HEADLINE_MAX_FEEDS: # Only parse MAX teams to prevent potential hangs
            debug.log("Fetching {}".format(url))
            f = feedparser.parse(url)
            try:
              title = f.feed.title.encode("ascii", "ignore")
              debug.log("Fetched feed '{}' with {} entries.".format(title, len(f.entries)))
              feeds.append(f)
            except AttributeError:
              debug.warning("There was a problem fetching {}".format(url))
        self.feed_data = feeds

  def ticker_string(self, max_entries=HEADLINE_MAX_ENTRIES):
    ticker = ""
    if self.include_date:
      date_string = datetime.now().strftime(self.date_format)
      ticker = self.__add_string_to_ticker(ticker, date_string)

    if self.include_countdowns:
      countdown_string = self.important_dates.next_important_date_string()
      ticker = self.__add_string_to_ticker(ticker, countdown_string)

    if self.feed_data != None:
      ticker = self.__add_string_to_ticker(ticker, "")
      for feed in self.feed_data:
        ticker += self.__strings_for_feed(feed, max_entries)

    # In case all of the ticker options are turned off and there's no data, return the date
    return datetime.now().strftime(FALLBACK_DATE_FORMAT) if len(ticker) < 1 else ticker

  def __add_string_to_ticker(self, ticker, text_to_add):
    t = ticker
    if len(t) > 0:
      t += (" " * HEADLINE_SPACER_SIZE)
    return (t + text_to_add)

  def available(self):
    return self.feed_data != None

  def __strings_for_feed(self, feed, max_entries):
    spaces = " " * HEADLINE_SPACER_SIZE
    title = feed.feed.title.encode("ascii", "ignore")
    headlines = ""

    for idx, entry in enumerate(feed.entries):
      if idx < max_entries:
        h = HTMLParser()
        text = h.unescape(entry.title.encode("ascii", "ignore"))
        headlines += text + spaces
    return title + spaces + headlines

  def __compile_feed_list(self):
    if self.include_mlb:
      self.feed_urls.append(self.__mlb_url_for_team("MLB"))

    if self.include_preferred:
      if len(self.preferred_teams) > 0:
        for team in self.preferred_teams:
          self.feed_urls.append(self.__mlb_url_for_team(team))

    if self.include_traderumors:
      if len(self.preferred_teams) > 0:
        for team in self.preferred_teams:
          self.feed_urls.append(self.__traderumors_url_for_team(team))

  def __mlb_url_for_team(self, team_name):
    return "{}/{}.xml".format(MLB_BASE, MLB_FEEDS[team_name])

  def __traderumors_url_for_team(self, team_name):
    return "{}/{}/{}".format(TRADE_BASE, TRADE_FEEDS[team_name], TRADE_PATH)

  def __should_update(self):
    endtime = time.time()
    time_delta = endtime - self.starttime
    return time_delta >= HEADLINE_UPDATE_RATE
Esempio n. 6
0
class Data:
  def __init__(self, coordinates, radius):
    self.coordinates = coordinates
    self.radius = radius
    self.dates = Dates(coordinates)
    print("before_int")
    self.dates.make_table_with_coordinates_and_dates()
    print("finish_init")

  def add_feature_to_coordinates(self,feature,type):
      global Final
      m = 0
      for file in feature:
          X = pd.read_csv('features/'+file+'.csv')
          X = X.values
          Y = pd.read_csv("junctions.csv")
          Y = Y.values
          New = []
          New.append(['x','y',file])
          if type == 'count':
            ball_tree = neighbors.BallTree(X, leaf_size=2)
          if type == 'sum' or type == 'avg':
            Z = pd.read_csv('features/' + file + '.csv')
            Z.drop(Z.columns[[2]], axis=1,inplace=True)
            ball_tree = neighbors.BallTree(Z, leaf_size=2)
          x = [Y[0][0], Y[0][1], 0]
          for i in range(len(Y)):
              if type == 'count' or type == 'avg':
                count = ball_tree.query_radius([Y[i]], r=self.radius * 1000, count_only=True)
                x = [Y[i][0], Y[i][1], int(count)]
              if type == 'sum' or type == 'avg':
                  sum = 0
                  ind = ball_tree.query_radius([Y[i]], r=self.radius * 1000)
                  for j in range(len(ind[0])):
                      sum = sum + X[j][2]
                  if type == 'sum':
                    x = [Y[i][0], Y[i][1], sum]
                  if type == 'avg':
                    if int(count) == 0 :
                        x = [Y[i][0], Y[i][1], 0]
                    else:
                        x = [Y[i][0], Y[i][1], sum/int(count)]

              New.append(x)
          # New = self.normalize(New)
          with open("out" + file + '.csv' , "w", newline="") as f:
              writer = csv.writer(f)
              writer.writerows(New)
          b = pd.read_csv("out" + file + '.csv')
          Final=Final.merge(b, on=('x','y'))
          x, y = Final[file].min(), Final[file].max()
          Final[file] = round((Final[file] - x) / (y - x), 3)
          os.remove("out" + file + '.csv')
          m = m + 1

#make csv file "AccRadius.csv" that fits for all junctions their accident nodes within radius
  def fits_coordinate_to_accident_nodes_within_its_radius(self,police_data):
      output = []
      title = ['x', 'y', 'year', 'month', 'day', 'humra']
      output.append(title)
      for fileAcc in police_data:
          X = pd.read_csv('acc/' + fileAcc + '.csv')
          C = pd.read_csv('acc/' + fileAcc + '.csv')
          C.drop(C.loc[:, 'pk_teuna_fikt':'STATUS_IGUN'].columns, axis=1, inplace=True)
          C['X'].replace('', np.nan, inplace=True)
          C.dropna(subset=['X'], inplace=True)
          C = C.values
          X.drop(X.loc[:, 'sug_tik':'YEHIDA'].columns, axis=1, inplace=True)
          X.drop(X.loc[:, 'SHAA':'RAMZOR'].columns, axis=1, inplace=True)
          X.drop(X.loc[:, 'SUG_TEUNA':'STATUS_IGUN'].columns, axis=1, inplace=True)
          X['X'].replace('', np.nan, inplace=True)
          X.dropna(subset=['X'], inplace=True)
          X = X.values
          Y = pd.read_csv("junctions.csv")
          Y = Y.values
          ball_tree = neighbors.BallTree(C, leaf_size=2)
          for i in range(len(Y)):
              ind = ball_tree.query_radius([Y[i]], r=1000)
              for j in (ind[0]):
                  x = [Y[i][0], Y[i][1], int(X[j][1]), int(X[j][2]), int(X[j][3]), int(4-X[j][4])]
                  output.append(x)
      with open("AccRadius.csv", "w", newline="") as f:
          writer = csv.writer(f)
          writer.writerows(output)

  def calculate_classification(self):
      d = {'x': [], 'y': [], 'year': [], 'month': [], 'day': [], 'humra': []}
      with open('AccRadius.csv', 'r') as csvFile:
          data = csv.reader(csvFile)
          for row in data:
              if row[0] == 'x':
                d['x'].append(row[0])
              else:
                d['x'].append(float(row[0]))
              if row[1] == 'y':
                d['y'].append(row[1])
              else:
                d['y'].append(float(row[1]))
              d['year'].append(row[2])
              d['month'].append(row[3])
              d['day'].append(row[4])
              if row[5] == 'humra':
                  d['humra'].append(row[5])
              else:
                  d['humra'].append(float(row[5]))
          df = pd.DataFrame(d)
          sum_of_humra = df.groupby(['x', 'y', 'year', 'month', 'day'])['humra'].sum().reset_index()
          count_of_humra = df.groupby(['x', 'y', 'year', 'month', 'day'])['humra'].count().reset_index()
          one = int(1)
          df1 = df.groupby(['x', 'y', 'year', 'month', 'day'])['humra'].apply(lambda x: (x == one).sum()).reset_index(name='count1')
          two = int(2)
          df2 = df.groupby(['x', 'y', 'year', 'month', 'day'])['humra'].apply(lambda x: (x == two).sum()).reset_index(name='count2')
          three = int(3)
          df3 = df.groupby(['x', 'y', 'year', 'month', 'day'])['humra'].apply(lambda x: (x == three).sum()).reset_index(name='count3')

          s = sum_of_humra['humra'].values
          c = count_of_humra['humra'].values
          x = sum_of_humra['x'].values
          y = sum_of_humra['y'].values
          year = sum_of_humra['year'].values
          month = sum_of_humra['month'].values
          day = sum_of_humra['day'].values
          c1 = df1['count1']
          c2 = df2['count2']
          c3 = df3['count3']

          arr = []
          arr.append(['x', 'y', 'day', 'month', 'year', 'sum_humra', 'count_humra', 'c1','c2','c3'])
          for j in range(len(s)-1):
              arr.append([x[j], y[j], day[j], month[j], float(year[j]), s[j], c[j], c1[j], c2[j], c3[j]])

          with open('data_temp.csv', 'w') as csvFile1:
              writer = csv.writer(csvFile1)
              writer.writerows(arr)
          csvFile1.close()

          with open('data_temp.csv') as in_file:
              with open('data.csv', 'w', newline='') as out_file:
                  writer = csv.writer(out_file)
                  for row in csv.reader(in_file):
                      if any(row):
                          writer.writerow(row)
          os.remove('data_temp.csv')
          print("finish calculate_classification")

  def find_the_most_closest_junction(self,file,col_name,num):
      New = []
      x = ['x', 'y']
      for j in range(0,col_name.__len__()):
          x.append(col_name[j])
      New.append(x)
      X = pd.read_csv(file)
      C = pd.read_csv(file)
      for i in range(0,col_name.__len__()):
        C.drop(col_name[i], axis=1, inplace=True)
      X = X.values
      Y = pd.read_csv("junctions.csv")
      Y = Y.values
      kdt = KDTree(C, leaf_size=30, metric='euclidean')
      for i in range(len(Y)):
          # print(i)
          result = kdt.query([Y[i]], k=num, return_distance=False)
          for m in range(0, num):
              Yind = result[0][m]
              x = [Y[i][0], Y[i][1]]
              for f in range(0, len(X[Yind]) - 2):
                  feature = X[Yind][f]
                  x.append(float(feature))
              New.append(x)
      return New

  def fit_centrelized_junctions_to_table(self):
      self.find_min_and_merge_to_big_table('false',1,'centerlizeToRoad.csv', ['center'], ('x', 'y'))
      print("finish fit_centrelized_junctions_to_table")

  def fit_traffic_signals_junctions_to_table(self):
      global Final
      b = pd.read_csv('traffic_signals.csv')
      # self.merge_files_to_big_table('traffic_signals.csv')
      Final = Final.merge(b, on=('x', 'y'),how='left')
      print("finish fit_traffic_signals_junctions_to_table")

  def merge_files_to_big_table(self,file,fields = ('x','y')):
      b = pd.read_csv(file)
      a = pd.read_csv('data.csv')
      merge_file = a.merge(b, on=fields,how ="left")
      merge_file.to_csv('data.csv', index=False)

  def fit_ages_to_table(self):
      self.find_min_and_merge_to_big_table('true',3,'gil.csv', ['Elders','Adults','Teen'], ('x', 'y'))
      print("finish fit_ages_to_table")

  def fit_ped_to_table(self):
      self.find_min_and_merge_to_big_table('true',1,'ped.csv',['ped','year'], ('x','y','year'),10)
      print("finish fit_ped_to_table")

  def fit_weather(self):
      fields_to_merge = ('x','y','day','month','year')
      self.find_min_and_merge_to_big_table('true',1,'Weather/rain.csv',['rain','day','month','year'],fields_to_merge,3650)
      print("finish rain")
      self.find_min_and_merge_to_big_table('true',1,'Weather/wind.csv',['speed','day','month','year'],fields_to_merge,3650)
      print("finish wind")
      self.find_min_and_merge_to_big_table('true',2,'Weather/temp.csv',['MAX_TEMP','MIN_TEMP','day','month','year'],fields_to_merge,3650)
      print("finish temp")
  def find_min_and_merge_to_big_table(self,normelize,j,file,col_names,fields_to_merge,num = 1):
      global Final
      new_data = self.find_the_most_closest_junction(file, col_names,num)
      with open("tmp.csv", "w", newline="") as f:
          writer = csv.writer(f)
          writer.writerows(new_data)
      b = pd.read_csv("tmp.csv")
      Final = Final.merge(b, on=(fields_to_merge),how='left')
      if normelize=='true':
        for i in range(0,j):
            x,y =Final[col_names[i]].min(),Final[col_names[i]].max()
            Final[col_names[i]]=round((Final[col_names[i]] - x) / (y - x),3)
      os.remove("tmp.csv")
Esempio n. 7
0
 
 date_funcs = [
     Dates.weeks,
     Dates.days
 ]
 
 query_template = """SELECT message, direction FROM Texts 
 WHERE
     conversant_two = '%s'
 AND
     timestamp >= '%s' and timestamp <= '%s'"""
 
 api = pennebaker.Api()
 
 for (conversant_one, conversant_two, start, end) in text_aggregates:
     for (start_date, end_date) in Dates.weeks(start, end):
         text_1 = ""
         text_2 = ""
         num_exchanged_texts = 0
         
         for row in DB.query( query_template % (conversant_two, start_date, end_date) ):
             direction = int(row[1])
             
             if direction == Direction.Sent:
                 text_1 += " " + row[0]
             if direction == Direction.Received:
                 text_2 += " " + row[0]
                 
             num_exchanged_texts += 1
         
         lsm = api.compare(text_1, text_2)