Exemplo n.º 1
0
def main2() -> None:
    def json_or(x: str, f: Callable[[], X]) -> X:
        return utils.json_read_or(prefix+x, f)

    # pre-lookup
    current_semester = json_or("current_semester.json", get_current_semester)
    inferno = json_or("pre-inferno.json", lambda: download_inferno([]))
    pflicht = json_or("pre-tucan-pflicht.json", download_tucan_vv_pflicht)
    wahl    = json_or("pre-tucan-wahl.json", download_tucan_vv_wahl)
    search  = json_or("pre-tucan-search.json", lambda: download_tucan_vv_search(current_semester))

    # get courses
    course_ids = pflicht + wahl + search
    course_ids = list(sorted(set((i[0], i[1]) for i in course_ids)))
    courses    = json_or("tucan.json", lambda: download_from_tucan(course_ids))

#    # three alternative ways to get list of courses:
#    get_fbs = lambda: download_tucan_vv_catalogue(
#      ("01", "02", "03", "04", "05", "11", "13", "16", "18", "20",))
#    get_fb20 = lambda: download_tucan_vv_catalogue(("20",))
#    get_anmeldung = lambda: download_tucan_anmeldung()
#    courses2 = json_or('tucan-FBs.json',       get_fbs)
#    courses3 = json_or('tucan-FB20.json',      get_fb20)
#    courses4 = json_or('tucan-anmeldung.json', get_anmeldung)

    # get modules
    module_ids  = {module_id for course in courses
                             for module_id in course['modules']}
    module_ids |= {module_id for regulation in inferno.values()
                             for module_id in regulation.keys()}
    modules = json_or("inferno.json", lambda: download_from_inferno(module_ids))

    # computer science regulations
    modules = inner_join(courses, modules)
    for regulation in inferno.keys():
        module_part = dict()
        for k,v in modules.items():
          bsc                     = regulation.startswith("B.Sc.")
          listed_in_inferno       = k[0:10] in inferno[regulation]
          listed_in_tucan_wahl    = any(title.startswith(k) for title,url in wahl)
          listed_in_tucan_pflicht = any(title.startswith(k) for title,url in pflicht)
          linked_over_inferno     = regulation in str(v['regulations'])
          title_contains_for_inf  = any(item["title"]=="Titel" and "f\u00fcr Inf" in item["details"] for item in v["details"])

          if (listed_in_inferno
          or  listed_in_tucan_wahl
          or  linked_over_inferno
          or  (bsc and listed_in_tucan_pflicht)
          or  (bsc and title_contains_for_inf)
          ):
            module_part[k] = v
        short_regulation = "".join(c for c in regulation if c.isalnum())
        utils.json_write(prefix+'-'+short_regulation+'.json', module_part)

    if True:
        # test support for other FBs, here FB 13:
        module_part = {k:v for k,v in modules.items() if k.startswith("13-") }
        utils.json_write(prefix+'-BauUmwelt.json', module_part)

    print()
Exemplo n.º 2
0
def main2():
    get_inferno = lambda: download_inferno([])
    get_from_tucan = lambda: download_from_tucan(course_ids)
    get_from_inferno = lambda: download_from_inferno(module_ids)

    inferno = utils.json_read_or(prefix + "pre-inferno.json", get_inferno)
    regulations = list(inferno.keys())

    course_ids = utils.json_read_or(prefix + "pre-tucan-pflicht.json",
                                    download_tucan_vv_pflicht)
    course_ids += utils.json_read_or(prefix + "pre-tucan-wahl.json",
                                     download_tucan_vv_wahl)
    course_ids += utils.json_read_or(prefix + "pre-tucan-search.json",
                                     download_tucan_vv_search)
    course_ids = list(sorted(set(tuple(i) for i in course_ids)))
    courses = utils.json_read_or(prefix + "tucan.json", get_from_tucan)

    #    # three alternative ways to get list of courses:
    #    get_fbs = lambda: download_tucan_vv_catalogue(
    #      ("01", "02", "03", "04", "05", "11", "13", "16", "18", "20",))
    #    get_fb20 = lambda: download_tucan_vv_catalogue(("20",))
    #    get_anmeldung = lambda: download_tucan_anmeldung()
    #    courses2 = utils.json_read_or(prefix+'tucan-FBs.json',       get_fbs)
    #    courses3 = utils.json_read_or(prefix+'tucan-FB20.json',      get_fb20)
    #    courses4 = utils.json_read_or(prefix+'tucan-anmeldung.json', get_anmeldung)

    module_ids = {
        module_id
        for course in courses for module_id in course['modules']
    }
    module_ids |= {
        key
        for regulation in regulations for key in inferno[regulation].keys()
    }
    modules = utils.json_read_or(prefix + "inferno.json", get_from_inferno)

    modules = inner_join(courses, modules)
    pflicht = utils.json_read(prefix + "pre-tucan-pflicht.json")
    wahl = utils.json_read(prefix + "pre-tucan-wahl.json")
    for regulation in regulations:
        module_part = {
            k: v
            for k, v in modules.items() if regulation in str(v['regulations'])
            or k[0:10] in inferno[regulation] or
            (regulation.startswith("B.Sc.") and
             (any(title.startswith(k) for title, url in pflicht) or any(
                 item["title"] == "Titel" and "f\u00fcr Inf" in item["details"]
                 for item in v["details"])))
        }
        short_regulation = "".join(c for c in regulation if c.isalnum())
        utils.json_write(prefix + '-' + short_regulation + '.json',
                         module_part)
    if True:
        # test support for other FBs, here FB 13:
        module_part = {k: v for k, v in modules.items() if k.startswith("13-")}
        short_regulation = "".join(c for c in regulation if c.isalnum())
        utils.json_write(prefix + '-BauUmwelt.json', module_part)
    print()
 def get_server_settings(self):
    try:
       data = utils.json_read(self._settings_filepath)
    except FileNotFoundError:
       data = copy.deepcopy(self.DEFAULT_SETTINGS)
       utils.json_write(self._settings_filepath, data=data)
    # Update server name.
    if data["Server Name"] != self._server.name:
       data["Server Name"] = self._server.name
       utils.json_write(self._settings_filepath, data=data)
    # TODO: Add additional data verification with jsonschema
    return data
Exemplo n.º 4
0
    def add_ignore(self, value):
        """Store list of options to ignore.
        """
        arg_out = ''

        # If exit item selected
        if self.arg == '[done]':
            arg_out = '[pause]'
        else:
            ignored = utils.json_read(self.wf.datafile('user_ignore.json'))
            if ignored:
                ignored.extend([value])
                clean = list(set(ignored))
                utils.json_write(clean, self.wf.datafile('user_ignore.json'))
            else:
                utils.json_write([value], self.wf.datafile('user_ignore.json'))
        return arg_out
Exemplo n.º 5
0
    def add_default(self, value):
        """Store list of options to be defaults.
        """
        arg_out = ''

        # If exit item selected
        if self.arg == '[done]':
            arg_out = '[pause]'
        else:
            defaults = utils.json_read(self.wf.datafile('user_defaults.json'))
            if defaults:
                defaults.extend([value])
                clean = list(set(defaults))
                utils.json_write(clean, self.wf.datafile('user_defaults.json'))
            else:
                utils.json_write([value], self.wf.datafile('user_defaults.json'))
        return arg_out
Exemplo n.º 6
0
    def add_ignore(self, value):
        """Store list of options to ignore.
        """
        arg_out = ''

        # If exit item selected
        if self.arg == '[done]':
            arg_out = '[pause]'
        else:
            ignored = utils.json_read(self.wf.datafile('user_ignore.json'))
            if ignored:
                ignored.extend([value])
                clean = list(set(ignored))
                utils.json_write(clean, self.wf.datafile('user_ignore.json'))
            else:
                utils.json_write([value], self.wf.datafile('user_ignore.json'))
        return arg_out
Exemplo n.º 7
0
    def add_default(self, value):
        """Store list of options to be defaults.
        """
        arg_out = ''

        # If exit item selected
        if self.arg == '[done]':
            arg_out = '[pause]'
        else:
            defaults = utils.json_read(self.wf.datafile('user_defaults.json'))
            if defaults:
                defaults.extend([value])
                clean = list(set(defaults))
                utils.json_write(clean, self.wf.datafile('user_defaults.json'))
            else:
                utils.json_write([value],
                                 self.wf.datafile('user_defaults.json'))
        return arg_out
Exemplo n.º 8
0
    def _store_template_info(self, key, value):
        """Store dictionary info for new user template.
        """
        tmps = utils.json_read(self.wf.datafile('user_templates.json'))
        if tmps:
            if key == 'name':
                d = [{key: value}]
                tmps.extend(d)
            else:
                for temp in tmps:
                    if len(temp.keys()) != 3:
                        temp.update({key: value})
                        break
            new = tmps
        else:
            new = [{key: value}]

        utils.json_write(new, self.wf.datafile('user_templates.json'))
        return True
Exemplo n.º 9
0
    def _store_template_info(self, key, value):
        """Store dictionary info for new user template.
        """
        tmps = utils.json_read(self.wf.datafile('user_templates.json'))
        if tmps:
            if key == 'name':
                d = [{key: value}]
                tmps.extend(d)
            else:
                for temp in tmps:
                    if len(temp.keys()) != 3:
                        temp.update({key: value})
                        break
            new = tmps
        else:
            new = [{key: value}]

        utils.json_write(new, self.wf.datafile('user_templates.json'))
        return True
Exemplo n.º 10
0
def MODEL_HEAD(model):
    print('model is : ' + model)
    request_data = json.loads(request.data.decode('utf-8'))
    train_id = request_data['train_id']
    print(request_data)
    parameter = {}
    for kvs in request_data['parameter'].keys():
        parameter[kvs] = request_data['parameter'][kvs]
    file_name = ROOT_PATH + 'matchzoo_temp_files/data/' + train_id + '.json'
    dataset_path = ROOT_PATH + 'matchzoo_temp_files/files/' + train_id + '.train'
    init_dict = {
        'state': 'run',
        'data': {
            'loss': [[]],
            'accuracy': [[], [], []]
        }
    }
    model_name = [
        'dssm', 'drmm', 'cdssm', 'arcii', 'matchpyramid', 'duet', 'mvlstm',
        'arci', 'krnm', 'conv_knrm'
    ]
    model_api = [
        dssm_api, drmm_api, cdssm_api, arcii_api, matchpyramid_api, duet_api,
        mvlstm_api, arci_api, krnm_api, conv_knrm_api
    ]
    if not os.path.exists(file_name):
        json_write(file_name, init_dict)
        with open(
                ROOT_PATH + 'matchzoo_temp_files/logger/' + train_id + '.log',
                'w') as f:
            f.write('')
        with open(
                ROOT_PATH + 'matchzoo_temp_files/logger/' + train_id +
                '.preprocess_log', 'w') as f:
            f.write('')
        qpool = IOpool()
        logdir = LogDir()
        for idx in range(len(model_name)):
            if model_name[idx] == model:
                model_api[idx](qpool, logdir, dataset_path, train_id,
                               parameter)
                break
    return jsonify({'status': 'ok'})
Exemplo n.º 11
0
def tune():
    request_data = json.loads(request.data.decode('utf-8'))
    train_id = request_data['train_id']
    parameter = request_data['parameter']
    epochs = request_data['epochs']
    file_name = ROOT_PATH + 'matchzoo_temp_files/data/' + train_id + '.json'
    dataset_path = ROOT_PATH + 'matchzoo_temp_files/files/' + train_id + '.train'
    init_dict = {
        'state': 'run',
        'data': {
            'loss': [[]],
            'accuracy': [[], [], []]
        }
    }
    if not os.path.exists(file_name):
        json_write(file_name, init_dict)
        with open(
                ROOT_PATH + 'matchzoo_temp_files/logger/' + train_id +
                '.preprocess_log', 'w') as f:
            f.write('')
        qpool = IOpool()
        logdir = LogDir()
        tune_api(qpool, logdir, dataset_path, train_id, parameter, epochs)
    return jsonify({'status': 'ok'})
 def save_server_settings(self, data):
    utils.json_write(self._settings_filepath, data=data)
    return
Exemplo n.º 13
0
   async def _fill_buffers(self):
      await self._client.set_temp_game_status("filling cache buffers.")
      for server in self._client.servers:
         ch_dict = None
         try:
            ch_dict = self._data[server.id]
         except KeyError:
            ch_dict = {}
            self._data[server.id] = ch_dict

         for ch in server.channels:
            if ch.type is discord.ChannelType.voice:
               continue
            print("MessageCache caching messages in #" + ch.name)

            # TODO: Rename these variable names.
            # TODO: Turn this into a function? (see duplicated code...)
            ch_dir = self._get_ch_dir(server.id, ch.id)
            ch_json_filepath = ch_dir + self._CH_JSON_FILENAME
            
            ch_json_data = None
            try:
               ch_json_data = utils.json_read(ch_json_filepath)
            except FileNotFoundError:
               ch_json_data = {}

            # TODO: Turn this into a function? (see duplicated code...)
            ch_stored_timestamp = None
            try:
               ch_stored_timestamp = dateutil.parser.parse(ch_json_data["last message timestamp"])
            except KeyError:
               ch_stored_timestamp = datetime.datetime(datetime.MINYEAR, 1, 1)
               ch_json_data["last message timestamp"] = ch_stored_timestamp.isoformat()
               utils.json_write(ch_json_filepath, data=ch_json_data)

            # This will now fill a buffer all messages of a channel.
            # TODO: Consider filling a file, then reading off the file.
            msg_buffer = []
            try:
               async for msg in self._client.logs_from(ch, limit=ARBITRARILY_LARGE_NUMBER):
                  if msg.timestamp <= ch_stored_timestamp:
                     break
                  # Insert in front since we're reading messages starting from most recent.
                  msg_buffer.insert(0, self._message_dict(msg))
            except discord.errors.Forbidden:
               print("MessageCache unable to read #" + ch.name)
               continue

            ch_dict[ch.id] = msg_buffer

            # Move every 5000 messages to disk.
            while len(ch_dict[ch.id]) >= 5000:
               self._move_to_disk(server.id, ch.id, messages=5000)

            # Move every 1000 messages to disk.
            while len(ch_dict[ch.id]) >= 1000:
               self._move_to_disk(server.id, ch.id, messages=1000)

            # Now move every 200 messages to disk.
            while len(ch_dict[ch.id]) >= 200:
               self._move_to_disk(server.id, ch.id, messages=200)

      await self._client.remove_temp_game_status()
      return
Exemplo n.º 14
0
   def _move_to_disk(self, server_id, ch_id, messages=None):
      print("MessageCache moving messages to disk.")
      ch_dir = self._get_ch_dir(server_id, ch_id)
      ch_json_filepath = ch_dir + self._CH_JSON_FILENAME

      # TODO: Turn this into a function? (see duplicated code...)
      ch_json_data = None
      try:
         ch_json_data = utils.json_read(ch_json_filepath)
      except FileNotFoundError:
         ch_json_data = {}

      # TODO: Turn this into a function? (see duplicated code...)
      ch_stored_timestamp = None
      try:
         ch_stored_timestamp = dateutil.parser.parse(ch_json_data["last message timestamp"])
      except KeyError:
         ch_stored_timestamp = datetime.datetime(datetime.MINYEAR, 1, 1)
         ch_json_data["last message timestamp"] = ch_stored_timestamp.isoformat()
         utils.json_write(ch_json_filepath, data=ch_json_data)

      ch_dict = self._data[server_id]

      # Split off the messages to be stored.
      if messages is None:
         to_store = ch_dict[ch_id]
         ch_dict[ch_id] = []
      else:
         to_store = ch_dict[ch_id][:messages]
         ch_dict[ch_id] = ch_dict[ch_id][messages:]

      latest_message = to_store[-1:][0]
      latest_timestamp_isoformat = latest_message["t"].isoformat()
      ch_json_data["last message timestamp"] = latest_timestamp_isoformat

      for msg_dict in to_store:
         # TODO: I still don't know what's causing this to be a string...
         # This temporary fix will have to do for now.
         if isinstance(msg_dict["t"], str):
            print("WARNING: msg_dict[t] is already a string. contents: " + msg_dict["t"])
         else:
            msg_dict["t"] = msg_dict["t"].isoformat() # Make serializable

      # Check the highest numbered json file.
      highest_json_file_number = 0
      for file_name in os.listdir(ch_dir):
         if file_name.endswith(".json"):
            file_number = None
            try:
               file_number = int(file_name[:-5])
            except ValueError:
               continue
            if file_number > highest_json_file_number:
               highest_json_file_number = file_number

      # Store data in the next available json file number
      file_name = str(highest_json_file_number + 1) + ".json"
      utils.json_write(ch_dir + file_name, data=to_store)

      # Save latest message timestamp.
      utils.json_write(ch_json_filepath, data=ch_json_data)
      return