Example #1
0
def merge_json():
    final_dict = {}
    dept_dict = read_json("year_dept.json")
    category_dict = read_json("year_category.json")
    for each in dept_dict:
        each["CREATION YEAR"] = str(each["CREATION YEAR"])
        each["ZIP CODE"] = str(int(each["ZIP CODE"]))

        if each["CREATION YEAR"] not in final_dict.keys():
            final_dict[each["CREATION YEAR"]] = {}
        if each["ZIP CODE"] not in final_dict[each["CREATION YEAR"]].keys():
            final_dict[each["CREATION YEAR"]][each["ZIP CODE"]] = {}
        if "DEPARTMENT" not in final_dict[each["CREATION YEAR"]][each["ZIP CODE"]].keys():
            final_dict[each["CREATION YEAR"]][each["ZIP CODE"]]["DEPARTMENT"] = []
        dept = {"name": each["DEPARTMENT"], "count": each["COUNT"]}
        final_dict[each["CREATION YEAR"]][each["ZIP CODE"]]["DEPARTMENT"].append(dept)

    for each in category_dict:
        each["CREATION YEAR"] = str(each["CREATION YEAR"])
        each["ZIP CODE"] = str(int(each["ZIP CODE"]))
        if each["CREATION YEAR"] not in final_dict.keys():
            final_dict[each["CREATION YEAR"]] = {}
        if each["ZIP CODE"] not in final_dict[each["CREATION YEAR"]].keys():
            final_dict[each["CREATION YEAR"]][each["ZIP CODE"]] = {}
        if "CATEGORY" not in final_dict[each["CREATION YEAR"]][each["ZIP CODE"]].keys():
            final_dict[each["CREATION YEAR"]][each["ZIP CODE"]]["CATEGORY"] = []
        dept = {"name": each["CATEGORY"], "count": each["COUNT"]}
        final_dict[each["CREATION YEAR"]][each["ZIP CODE"]]["CATEGORY"].append(dept)
    write_json(final_dict, "final.json")
Example #2
0
def analyze_predictions():
    dept_category = read_json(dept_category_file)
    prob_category = read_json(prob_category_file)
    merged_df = pd.read_csv(merged_file)
    merged_df['DEPT_LABEL'] = merged_df['DEPT_LABEL'].apply(
        lambda x: dept_category[str(x)])
    merged_df['PROB_LABEL'] = merged_df['PROB_LABEL'].apply(
        lambda x: prob_category[str(x)])
    master_df = pd.read_csv(master_dataframe_file)
    new_master = master_df.merge(merged_df, on='CASE ID')
    new_master.drop(columns=['Unnamed: 0', 'Unnamed: 0.1'], inplace=True)
    new_master.to_csv(master_dataframe_file_pred, header=True, index=False)
 def remove_item(self, *args):
     data = read_json(self.filename) or []
     try:
       data.remove(list(args))
     except:
       log.err("Can't remove " + str(list(args)) + " from " + str(data))
     with open(self.filename, 'w+') as file:
         file.write(json.dumps(data))
Example #4
0
def testFeatures():
  data = read_json(test_file)
  data = [Recipe(id=recipe["id"],
                 cuisine=None,
                 ingredients=recipe["ingredients"]) for recipe in data]
  for r in data:
    c = getPerceptronCuisineFeatures(r)
    print "{},{}".format(r.id, c)
Example #5
0
 def remove_item(self, *args):
     data = read_json(self.filename) or []
     try:
         data.remove(list(args))
     except:
         log.err("Can't remove " + str(list(args)) + " from " + str(data))
     with open(self.filename, 'w+') as file:
         file.write(json.dumps(data))
 def remove(self, index):
     data = read_json(self.filename) or []
     if index >= len(data):
         log.err("Trying to remove something out of index? size: {}, index: {}".format(len(data), index))
         return
     del data[index]
     with open(self.filename, 'w+') as file:
         file.write(json.dumps(data))
Example #7
0
 def remove(self, index):
     data = read_json(self.filename) or []
     if index >= len(data):
         log.err(
             "Trying to remove something out of index? size: {}, index: {}".
             format(len(data), index))
         return
     del data[index]
     with open(self.filename, 'w+') as file:
         file.write(json.dumps(data))
 def read(self, callback):
     if os.path.isfile(self.filename):
         BACKUP = self.filename + ".backup"
         # On Linux, os.rename will silently overwrite, but that is not the
         # case on Windows, so just remove the old backup if it exists:
         if os.path.isfile(BACKUP):
             os.remove(BACKUP)
         os.rename(self.filename, BACKUP)
         if os.path.isfile(self.filename):
             log.err("os.rename did not move file, removing it manually")
             os.remove(self.filename)
         data = read_json(BACKUP) or []
         for line in data:
             try:
                 callback(*line)
             except Exception,e:
                 log.err("Error while reading: " + str(e))
Example #9
0
 def read(self, callback):
     if os.path.isfile(self.filename):
         BACKUP = self.filename + ".backup"
         # On Linux, os.rename will silently overwrite, but that is not the
         # case on Windows, so just remove the old backup if it exists:
         if os.path.isfile(BACKUP):
             os.remove(BACKUP)
         os.rename(self.filename, BACKUP)
         if os.path.isfile(self.filename):
             log.err("os.rename did not move file, removing it manually")
             os.remove(self.filename)
         data = read_json(BACKUP) or []
         for line in data:
             try:
                 callback(*line)
             except Exception, e:
                 log.err("Error while reading: " + str(e))
Example #10
0
    def get_gantt_tasks_by_week(self, week_number: int):
        tasks = read_json('data/tasks.json')
        queried_tasks = []
        for task in tasks:
            start = DATE_HELPER.get_datetime_from_task(task["START DATE"])
            end = DATE_HELPER.get_datetime_from_task(task["END DATE"])
            start_week = DATE_HELPER.get_week(start,
                                              output_week_number=True)[1]
            end_week = DATE_HELPER.get_week(end, output_week_number=True)[1]

            in_curr_week = start_week == week_number or end_week == week_number
            if in_curr_week:
                queried_tasks.append(task)

            after_curr_week = DATE_HELPER.get_week(
                start, output_week_number=True)[1] > week_number
            if after_curr_week:
                break
        return queried_tasks
Example #11
0
def process_json():
    time_series_dict = read_json("time_series.json")
    final_dict = {}
    for each in time_series_dict:
        each["CREATION YEAR"] = str(each["CREATION YEAR"])
        each["ZIP CODE"] = str(int(each["ZIP CODE"]))
        each["CREATION MONTH"] = str(each["CREATION MONTH"])

        if each["CREATION YEAR"] not in final_dict.keys():
            final_dict[each["CREATION YEAR"]] = {}
        if each["ZIP CODE"] not in final_dict[each["CREATION YEAR"]].keys():
            final_dict[each["CREATION YEAR"]][each["ZIP CODE"]] = {}
        if "DEPARTMENT" not in final_dict[each["CREATION YEAR"]][each["ZIP CODE"]].keys():
            final_dict[each["CREATION YEAR"]][each["ZIP CODE"]]["DEPARTMENT"] = []

        dept = {
            "CREATION MONTH": each["CREATION MONTH"],
            "NAME": each["DEPARTMENT"],
            "COUNT": each["DAYS TO CLOSE"]
        }
        final_dict[each["CREATION YEAR"]][each["ZIP CODE"]]["DEPARTMENT"].append(dept)
    write_json(final_dict, "time_series_final.json")
Example #12
0
import os
from utils.json_utils import read_json
import pandas as pd

root_dir_path = "/home/charan/Documents/workspaces/python_workspaces/Data/ADL_Project"
prob_category = os.path.join(root_dir_path, "problem/prob_category.json")
dept_category = os.path.join(root_dir_path, "dept/dept_category.json")
prob_category = read_json(prob_category)
dept_category = read_json(dept_category)


def apply_dept(input_string):
    return list(dept_category.values()).index(input_string)


re_map = {
    "Parks & Rec": "Parks and Rec",
    "Parks & Recreation": "Parks and Rec",
    "Information Technology": "DROP",
    "NCS": "DROP",
    "Housing Community Dev": "DROP",
    "IT": "DROP",
    "Municipal Court": "DROP"
}


def apply_process_dept(input_string):
    if input_string in list(re_map.keys()):
        return re_map[input_string]
    else:
        return input_string
Example #13
0
import pandas as pd
import os
from utils.json_utils import write_json, read_json

root_dir = "/home/charan/Documents/workspaces/python_workspaces/Data/ADL_Project"
prob_file = os.path.join(root_dir, "problem/prob_classification_df.csv")
dept_file = os.path.join(root_dir, "dept/dept_classification_df.csv")
final_df = os.path.join(root_dir, "final_data/311_Cases_master_with_desc.csv")
dept_json = os.path.join(root_dir, "dept/dept_category.json")
prob_json = os.path.join(root_dir, "problem/prob_category.json")
map_prob_json = os.path.join(
    root_dir,
    "problem/new_category.json")  ## Used for mapping to parent category.
map_prob_json = read_json(map_prob_json)
master_dataframe_file_pred = os.path.join(root_dir,
                                          "final_data/Data_with_no_desc.csv")
timeseries_data = os.path.join(root_dir, 'time-series/time_series.csv')

final_data = pd.read_csv(final_df)

prob_category = {}
dept_category = {}


def apply_dept_label(input_text):
    return list(dept_category.values()).index(input_text)


def apply_prob_label(input_string):
    return list(prob_category.values()).index(input_string)
Example #14
0
def get_settings(settings_file=DEFAULT_SETTINGS_FILE):
    if not path.isfile(settings_file):
        create_default_settings(settings_file)
    settings = read_json(settings_file)
    return settings
Example #15
0
    def load_from_file(self, filename: str = ""):
        try:
            data = read_json(filename)

            drones_dict = data.get("drones", None)
            assert not drones_dict is None, f"Drones not specified in file {filename}. Failed loading from file."

            env_dict = data.get("environment", None)
            assert not env_dict is None, f"Environment not specified in file {filename}. Failed loading from file."

            assert "trajectory" in dict(next(iter(drones_dict.values()))).keys(
            ), f"Trajectories not specified in file {filename}. Failed loading from file."
            traj_dict = {
                id: drone_dict["trajectory"]
                for id, drone_dict in drones_dict.items()
            }

            if "path" in env_dict.keys():
                env_dict = read_json(env_dict["path"])
            self.log["environment"] = Environment([
                Obstacle(obj["shape"], np.array(obj["points"]))
                for obj in env_dict["obstacles"]
            ])

            drones = []
            for id, drone in drones_dict.items():
                d_class = getattr(
                    importlib.import_module(drone["info"]["module"]),
                    drone["info"]["cls"])
                d_args = dict(
                    filter(
                        lambda elem: elem[0] != "module" and elem[0] != "cls"
                        and elem[0] != "id", drone["info"].items()))

                sensors = []
                for s in drone["info"]["sensors"]:
                    s_class = getattr(importlib.import_module(s["module"]),
                                      s["cls"])
                    s_args = {
                        k: np.array(v) if isinstance(v, list) else v
                        for k, v in dict(
                            filter(
                                lambda elem: elem[0] != "module" and elem[0] !=
                                "cls", s.items())).items()
                    }
                    sensors.append(s_class(**s_args))

                drones.append(
                    d_class(id=id,
                            state=np.array(
                                next(iter(
                                    drone["trajectory"].values()))["state"]),
                            **d_args))
            self.__log_info(drones)

            for id, drone_traj in traj_dict.items():
                for time, state_meas_dict in drone_traj.items():
                    drone = list(filter(lambda d: d.id == id, drones))[0]
                    self.log_time_step(
                        drone.generate_time_entry(
                            np.array(state_meas_dict["state"]), [
                                float(m["measurement"])
                                for m in state_meas_dict["measurements"]
                            ]), float(time))

        except FileNotFoundError:
            print(
                f"Could not find file named {filename}. Loading log file failed. Exiting."
            )
            exit(0)
Example #16
0
 def save(self, *args):
     data = read_json(self.filename) or []
     data.append(args)
     with open(self.filename, 'w+') as file:
         file.write(json.dumps(data))
Example #17
0
def get_settings(settings_file=DEFAULT_SETTINGS_FILE):
    if not path.isfile(settings_file):
        create_default_settings(settings_file)
    settings = read_json(settings_file)
    return settings
Example #18
0
 def save(self, *args):
     data = read_json(self.filename) or []
     data.append(args)
     with open(self.filename, 'w+') as file:
         file.write(json.dumps(data))
Example #19
0
    ws = {}
    for ing in recipe.ingredients:
      for w in ing.split():
        ws[w] = ws.get(w, 0.0) + 1.0
    for k, v in ws.iteritems():
      features.append(["w_{}".format(k), v])
  return features

def testFeatures():
  data = read_json(test_file)
  data = [Recipe(id=recipe["id"],
                 cuisine=None,
                 ingredients=recipe["ingredients"]) for recipe in data]
  for r in data:
    c = getPerceptronCuisineFeatures(r)
    print "{},{}".format(r.id, c)

def participate(data):
  ingest(data)
  initWeights()
  for x in range(0, iterations):
    random.shuffle(data)
    perceptronFeatures(data)
    print "@@@ Iteration: " + str(x + 1) + " @@@"
    testFeatures()

if __name__ == '__main__':
  raw_data = read_json(training_file)
  data = [Recipe(id=recipe["id"], cuisine=recipe["cuisine"], ingredients=recipe["ingredients"]) for recipe in raw_data]
  participate(data)