Beispiel #1
0
    def get_cluster_scheduler(self):
        """
        获取hadoop 集群信息
        :param file: 输出文件保存路径
        """
        url = self.hadoop_url + "scheduler"
        scheduler_file = os.path.join(self.file_path, "scheduler.csv")
        scheduler_file2 = os.path.join(self.file_path, "scheduler2.csv")

        try:
            results = urlopen(url, timeout=2000).read()
            results = json.loads(results)
            results = results['scheduler']['schedulerInfo']['queues']['queue']
            print(self.memcpu_info)
            for scheduler_info in results:
                results_copy = scheduler_info.copy()
                for key, value in results_copy['resourcesUsed'].items():
                    scheduler_info[key] = value / self.memcpu_info[key]
        except KeyError as error:
            logger.error("key error {0}".format(error))
        except Exception as error:
            logger.error(error)

        write_header = True
        if FileOperator.file_exits(scheduler_file):
            write_header = False
        headers = results[0].keys()
        FileOperator.write_to_csv(results, scheduler_file,
                                  headers=headers, write_header=write_header, model="a+")
        FileOperator.write_to_csv(results, scheduler_file2,
                                  headers=headers, write_header=write_header,model="w+")
Beispiel #2
0
def main():
    log_file = "main_log.log"
    create_timed_rotating_log("log/" + log_file)
    logger = logging.getLogger("BasicLogger")
    logger.info("----- Starting Logging Session -----")
    config = configparser.ConfigParser()
    config.read('config.ini')

    # How to use the config values. REMOVE when done with setup of this!
    print(config.sections())
    print('scale of this whole thing from config file is: ' + config['grid']['scale'])
    # To get the values as integers:
    i = int(config['grid']['max_position_z'])
    print(i+2)

    # subprocess.call("../gcodepull.sh", shell=True)

    #opens the file named in the varibles file
    length = range(FileOperator.OpenFile()- 3)
    Motor.setup()
    start = 2
    for row in length:
            # for the appropiated length each row is worked through 
            # and the needet steps are sent to the stepper motors
            next_row = row + start
            delta_step = FileOperator.NextMove(next_row)
            corrected_coords = FileOperator.MoveCorrect(delta_step)
            Motor.move(corrected_coords)
            print('finished')
    GPIO.cleanup()
Beispiel #3
0
    def get_scheduler_info(self, running_application):
        logger.info("start get_scheduler_info")
        apps = running_application.copy(deep=True)

        apps = apps.groupby('queue')['allocatedMB', 'allocatedVCores'].sum()
        apps['queueName'] = apps.index
        apps.insert(0, 'totalMemory', self.memcpu_info['memory'])
        apps.insert(0, 'totalCpu', self.memcpu_info['vCores'])
        apps.insert(0, 'memory', apps['allocatedMB'] / apps['totalMemory'])
        apps.insert(0, 'vCores', apps['allocatedVCores'] / apps['totalCpu'])

        scheduler_file = os.path.join(self.file_path, "scheduler_summary.csv")
        write_header = True
        if FileOperator.file_exits(scheduler_file):
            write_header = False
        apps.to_csv(scheduler_file,
                    header=write_header,
                    index=False,
                    mode="a+")

        logger.info("start get_cluster_scheduler")
        url = self.hadoop_url + "scheduler"
        scheduler_file2 = os.path.join(self.file_path, "scheduler_metric.csv")

        results = urlopen(url, timeout=2000).read()
        results = json.loads(results)
        results = results['scheduler']['schedulerInfo']['queues']['queue']
        headers = results[0].keys()
        for j in results:
            if j.has_key('queues'):
                del j['queues']
        FileOperator.write_to_csv(results,
                                  scheduler_file2,
                                  headers=headers,
                                  model="w+")
Beispiel #4
0
    def get_cluster_information(self):
        """
        get cluster infromation
        """
        url = self.hadoop_url + "metrics"
        write_header = True
        cluster_file = os.path.join(self.file_path, "cluster.csv")
        cluster_file2 = os.path.join(self.file_path, "cluster2.csv")
        if FileOperator.file_exits(cluster_file):
            write_header = False
        try:
            results = urlopen(url, timeout=2000).read()
            results = [json.loads(results)["clusterMetrics"]]
        except Exception as error:
            logger.error(error)

        self.memcpu_info["memory"] = results[0].get('totalMB', 0)
        self.memcpu_info["vCores"] = results[0].get('totalVirtualCores', 0)
        self.get_cluster_scheduler()
        headers = results[0].keys()

        FileOperator.write_to_csv(results, cluster_file,
                                  headers=headers, write_header=write_header,model="a+")
        FileOperator.write_to_csv(results, cluster_file2,
                                  headers=headers, model="w")
Beispiel #5
0
 def get_sparkjobs_information(self, applications):
     """
     get each application's jobs information
     :param applications: list contains applications information
     """
     app_jobs = []
     spark_job_file = os.path.join(self.file_path, "sparkjob.json")
     self.job_metrics = self.job_metrics.replace("\n", "").split(',')
     for application_items in applications:
         application_id = application_items["id"]
         application_rest_url = self.application_url + application_id + "/1/jobs"
         try:
             application_jobs_list = HadoopUtil.request_url(application_rest_url)
             application_jobs_list = json.loads(application_jobs_list)
         except urlerror:
             logger.warning("this application {0} is not "
                         "a spark type application".format(application_items["id"]))
         else:
             for applications in application_jobs_list:
                 apps = {key: value for key, value in applications.items()
                         for applications in application_jobs_list
                         if key in self.job_metrics}
                 app_jobs.append(dict(apps, **application_items))
     headers = app_jobs[0].keys()
     FileOperator.write_to_json(app_jobs, spark_job_file)
     FileOperator.write_to_csv(app_jobs, spark_job_file, headers=headers)
Beispiel #6
0
 def check_uniqeid_exist(uniqeid):
     uniqeid = uniqeid.upper()
     if FileOperator.csv_or_db() == "db":
         server_name, user_name, user_password, database_name = FileOperator.app_config_reader(
         )
         db = sql.connect(host=server_name,
                          user=user_name,
                          password=user_password,
                          charset='utf8',
                          use_unicode=True,
                          autocommit=True)
         cursor = db.cursor()
         query_in_users_exist = "SELECT unique_identifier FROM BloodDonationStorage.Donor WHERE unique_identifier = '" + uniqeid + "';"
         cursor.execute(query_in_users_exist)
         allusers = cursor.fetchall()
         if allusers:
             print("Already in the database.")
             return False
         else:
             return True
     else:
         file = open("Data/donors.csv", "r", encoding="utf-8")
         reader = csv.reader(file)
         for line in reader:
             if len(line) != 0 and uniqeid == line[6].upper():
                 print("Already in the database.")
                 file.close()
                 return False
         file.close()
         return True
Beispiel #7
0
    def get_applications_information(self, query_parametes=None):
        """
        :param query_parametes: dict 过滤条件,默认为成功执行完成 默认搜索所有
          * state [deprecated] - state of the application
          * states - applications matching the given application states,
                specified as a comma-separated list.
          * finalStatus - the final status of the application -
                reported by the application itself
          * user - user name
          * queue - queue name
          * limit - total number of app objects to be returned
          * startedTimeBegin -
                applications with start time beginning with this time,
                specified in ms since epoch
          * startedTimeEnd -
                applications with start time ending with this time,
                specified in ms since epoch
          * finishedTimeBegin -
                applications with finish time beginning with this time,
                specified in ms since epoch
          * finishedTimeEnd -
                applications with finish time ending with this time,
                specified in ms since epoch
          * applicationTypes -
                applications matching the given application types,
                specified as a comma-separated list.
          * applicationTags -
                applications matching any of the given application tags,
                specified as a comma-separated list.
        :param file: 输出文件保存位置
        example:
           query_parametes = {"finalStaus": "SUCCEEDED"}
           get_job(query_parametes=query_parametes)
        """
        hadoop_rest_url = self.hadoop_url + "apps?"
        app_file = os.path.join(self.file_path, "app.csv")

        try:
            for key, value in query_parametes.items():
                hadoop_rest_url += key + "=" + str(value) + "&"
        except AttributeError:
            logger.warn("didn't get any query_parametes, so ,collect all apps")

        json_result = HadoopUtil.request_url(hadoop_rest_url)
        try:
            list_result = json.loads(json_result)['apps']['app']
            headers = list_result[0].keys()
        except KeyError as error:
            logger.error("key error {0}".format(error))
        except TypeError:
            logger.warn("dit not get any data from parameters "
                        "{0}".format(query_parametes))
        except Exception as error:
            logger.error(error)
        else:
            FileOperator.write_to_csv(list_result, app_file, headers=headers)
            self.get_sparkjobs_information(list_result)
Beispiel #8
0
    def get_commonjobs_information(self):
        commonjob_file = os.path.join(self.file_path, "commonjob.csv")

        result = HadoopUtil.request_url(self.job_url)

        result = json.loads(result)["jobs"]
        if not result:
            return
        result = result["job"]
        headers = result[0].keys()
        FileOperator.write_to_csv(result, commonjob_file, headers=headers)
Beispiel #9
0
 def delete_menu(which_file):
     os.system('cls')
     welcome_message = "Please give the id what you would like to delete in the " + which_file[
         5:] + " file!"
     print("*" * len(welcome_message))
     print(welcome_message + "\n")
     deleting_data = input(">> ")
     if which_file == "Data/donors.csv":
         FileOperator.delete_from_database(which_file, "unique_identifier",
                                           deleting_data)
     elif which_file == "Data/donations.csv":
         FileOperator.delete_from_database(which_file, "id", deleting_data)
Beispiel #10
0
def thread_main():
    """
    for queue to trainning model and predict
    """

    cluster_df = pd.read_csv(CLUSTER_INFILE)
    # total_mem = cluster_df["totalMB"].values[0]
    # total_cpu = cluster_df["totalVirtualCores"].values[0]

    scheduler_df = pd.read_csv(SCHEDULER_INFILE)
    scheduler_df = scheduler_df.dropna(how="any", axis=0)

    # scheduler_df["memory"] = scheduler_df["memory"] / total_mem
    # scheduler_df["vCores"] = scheduler_df["vCores"] / total_cpu

    queue_names = set(scheduler_df["queueName"].values)
    scheduler_df = scheduler_df.set_index("queueName")

    FileOperator.makesure_file_exits("model_input")
    FileOperator.makesure_file_exits("model_out")
    FileOperator.makesure_file_exits("model")

    # empty the pre_file
    FileOperator.write_list_tocsv([], PRE_FILE)

    for queue_name in queue_names:
        queue_information = scheduler_df.ix[queue_name, ["memory", "vCores"]]
        queue_information.insert(0, "times", range(queue_information.shape[0]))
        model_input_file = "./model_input/{0}.csv".format(queue_name)
        queue_information.to_csv(model_input_file, index=False, header=False)
        model_dir = "./model/{0}".format(queue_name)

        train(queue_name, model_input_file, PRE_FILE, model_dir,
              FLAGS.train_step, FLAGS.predict_step)
Beispiel #11
0
    def event_data():
        print("Please enter the following informations!")
        date_of_event = Switch.general_data_inputer(["Date of the event", "Date of the event (YYYY.MM.DD)"])
        start_time = Switch.general_data_inputer(["Start time", "Start time (hh:mm)"])
        end_time = Switch.general_data_inputer(["End time", "End time (hh:mm)", start_time])
        zip_code = Switch.general_data_inputer(["Zip code", "Zip code"])
        city = Switch.general_data_inputer(["City", "City"])
        address = Switch.general_data_inputer(["Address", "Address"])
        available_beds = Switch.general_data_inputer(["Available beds", "Available beds"])
        planned_donor_number = Switch.general_data_inputer(["Planned donor number", "Planned donor number"])
        event_duration_time = EventCalculations.duration_in_time(start_time, end_time)
        colon_in_duration_time = str(event_duration_time).find(":")
        final_donor_number = EventCalculations.maximum_donor_number(available_beds, start_time, end_time)
        success_rate = EventCalculations.success_rate(planned_donor_number, final_donor_number)
        success_text = EventCalculations.success_text(success_rate)

        os.system('cls')

        print("\n" + "-" * 32 + "\n")
        print("Details of the planned event:\n")
        print("Date of the event:", date_of_event)
        print("Start time:", start_time)
        print("End time:", end_time)
        print("Event duration time: %s hour(s) %s minute(s)" % (str(event_duration_time)[:colon_in_duration_time],
                str(event_duration_time)[colon_in_duration_time+1:colon_in_duration_time+3]))
        print("Zip code:", zip_code)
        print("City:", city)
        print("Address:", address)
        print("Available beds:", available_beds)
        print("Planned donor number:", planned_donor_number)
        print("Maximum donor number:", final_donor_number)
        print("Percent of success:", success_rate, "%")
        print("Efficiency:", success_text)
        print("\n" + "-" * 32)

        if os.path.isfile("C:/Users/" + user_name + "/AppData/Local/Programs/Python/Python35-32/Lib/site-packages/colorama-0.3.3-py3.5.egg"):
            save = SaveMenu.save_menu(2, 21)
        else:
            save = SaveMenuOldFashioned.save_menu(2, "Do you want to save?")
            print()
        if save:
            every_file_data = [str(date_of_event).replace("-", "."), str(start_time)[:len(str(start_time))-3],\
                               str(end_time)[:len(str(end_time))-3], zip_code, city, address, available_beds, \
                               planned_donor_number, final_donor_number]
            header = "id,date_of_event,start_time,end_time,zip_code,city,address,number_of_available_beds," + \
                     "planned_donor_number,final_donor_number\n"
            FileOperator.save_new_data(every_file_data, header, 'Data/donations.csv')
            print("Save was successful!")
            time.sleep(2)
Beispiel #12
0
 def import_modified_ios_resource(self):
     repository.load()
     fileOperator = FileOperator()
     # 更新词条
     for f in os.listdir(self.appConfig.ios_resources_root_directory):
         language = self.__get_ios_file_language(f)
         if len(language) <= 0:
             continue
         path = os.path.join(self.appConfig.ios_resources_root_directory, f,
                             "Localizable.strings")
         dist = fileOperator.read_ios_keywords(path)
         for k, v in dist.items():
             repository.try_ro_modify_keyword(k, v, language)
     # 重写 repo json
     repository.rewrite_repo_json()
Beispiel #13
0
 def listing_database_db(which_file):
     server_name, user_name, user_password, database_name = FileOperator.app_config_reader()
     if 'donors' in which_file:
         table_name = '.Donor'
     else:
         table_name = '.Event'
     sql_command, result, header = [], [], []
     sql_command.append("SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE " + \
                  "`TABLE_SCHEMA`='" + database_name + "' AND `TABLE_NAME`='" + table_name[1:] + "';")
     sql_command.append("SELECT * FROM " + database_name + table_name)
     dbcon = mysql.connector.connect(user=user_name, password=user_password, host=server_name, database=database_name)
     cursor = dbcon.cursor()
     for i, one_command in enumerate(sql_command):
         cursor.execute(one_command)
         for cursor_message in cursor:
             if i == 0:
                 header.append(cursor_message[0])
             else:
                 result.append(cursor_message)
     if len(result) != 0:
         if len(result) == 1:
             print("There is only one result:")
         elif len(result) > 1:
             print("There are " + str(len(result)) + " results:")
         print("-" * 52)
         for i in range(len(result)):
             ListingDataBase.printer(i + 1, header, result[i])
     else:
         print("There is no data corresponding to this query...")
     dbcon.close()
     getch()
Beispiel #14
0
 def search_in_file(which_file):
     search_term = input("Search term: ")
     print("-" * 52)
     if FileOperator.csv_or_db() == 'db':
         Search.search_in_file_db(which_file, search_term)
     else:
         Search.search_in_file_csv(which_file, search_term)
Beispiel #15
0
 def search_in_file_db(which_file, search_term):
     num_of_find = 1
     server_name, user_name, user_password, database_name = FileOperator.app_config_reader()
     sql_search_term = " LIKE '%" + search_term + "%' OR "
     if 'donors' in which_file:
         table_name = '.Donor'
     else:
         table_name = '.Event'
     sql_command, result, header = [], [], []
     sql_command.append("SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE " + \
                  "`TABLE_SCHEMA`='" + database_name + "' AND `TABLE_NAME`='" + table_name[1:] + "';")
     sql_command.append("SELECT * FROM " + database_name + table_name + " WHERE ")
     dbcon = mysql.connector.connect(user=user_name, password=user_password, host=server_name, database=database_name)
     cursor = dbcon.cursor()
     cursor.execute(sql_command[0])
     for cursor_message in cursor:
         header.append(cursor_message[0])
         for one_message in cursor_message:
             sql_command[1] += one_message + sql_search_term
     sql_command[1] = sql_command[1][0:len(sql_command[1]) - 4]
     cursor.execute(sql_command[1])
     for cursor_message in cursor:
         if Search.search_and_print(search_term, header, cursor_message, num_of_find):
             num_of_find += 1
     if num_of_find == 1:
         print("There is no data corresponding to this query...")
     dbcon.close()
Beispiel #16
0
def update_cluster_info(rmq, cfg):
    cluster_file = cfg.get_cluster_metric_path()
    if FileOperator.file_exits(cluster_file):
        total_mb = datainput.read_cluster_csv(cluster_file)
        if total_mb == 0:
            return
        queue = rmq.get_queue('root')
        queue.data.add_totalMb(total_mb)
Beispiel #17
0
 def get_cluster_information(self):
     logger.info("start get_cluster_information")
     url = self.hadoop_url + "metrics"
     write_header = True
     cluster_file = os.path.join(self.file_path, "cluster.csv")
     if FileOperator.file_exits(cluster_file):
         write_header = False
     results = urlopen(url, timeout=2000).read()
     results = [json.loads(results)["clusterMetrics"]]
     self.memcpu_info["memory"] = results[0].get('totalMB', 0)
     self.memcpu_info["vCores"] = results[0].get('totalVirtualCores', 0)
     headers = results[0].keys()
     FileOperator.write_to_csv(results,
                               cluster_file,
                               headers=headers,
                               write_header=write_header,
                               model="a+")
     self.get_applications_information()
Beispiel #18
0
def update_predict_info(rmq, cfg):
    prediction_file = cfg.get_prediction_path()
    if FileOperator.file_exits(prediction_file):
        queue_wishes = datainput.read_prediction_csv(prediction_file)
        for wish in queue_wishes:
            queue = rmq.get_queue(wish.name)
            if queue is None:
                print("Unknown queue name6", wish.name)
                continue
            queue.data.update_queue_wish(wish)
Beispiel #19
0
def update_app_info(rmq, cfg):
    app_file = cfg.get_job_metric_path()
    if FileOperator.file_exits(app_file):
        jobs = datainput.read_app_csv(app_file)
        for job in jobs:
            queue = rmq.get_queue(job.name)
            if queue is None:
                print("Unknown queue name4", job.name)
                continue
            queue.data.add_job(job)
Beispiel #20
0
def init_data():
    print("src file: ", Const.SRC_FILE)
    sentence_list = FileOperator.f_open(Const.SRC_FILE)
    sentence_list = StringOperator.split_sentence(sentence_list)

    flatten_word_list = StringOperator.array_string_to_flatten(sentence_list)
    unique_char_set = StringOperator.array_char_to_unique(flatten_word_list)
    print("unique char set len :", len(unique_char_set))
    FileOperator.f_write(Const.UNIQ_SRC_FILE, unique_char_set)
    print("save unique file: ", Const.UNIQ_SRC_FILE)
    prob_state = ProbabilityState(Const.NUM_OF_CLUSTER, Const.PROB_FILE,
                                  "init")
    prob_state.save_prob(Const.PROB_FILE)
    w2v = Word2Vec(Const.W2V_SRC_FILE, Const.W2V_WEIGHT_FILE,
                   Const.WORD_FEAT_LEN, "init")

    data_array = []
    for word in unique_char_set:
        data_array.append(w2v.str_to_vector(word))
    MyKmeans(Const.NUM_OF_CLUSTER, Const.KMEANS_SAVE_FILE, data_array, "init")
Beispiel #21
0
def update_scheduler_info(rmq, cfg):
    scheduler_file = cfg.get_scheduler_metric_path()
    if FileOperator.file_exits(scheduler_file):
        queue_configs = datainput.read_scheduler_csv(scheduler_file)
        for qc in queue_configs:
            queue = rmq.get_queue(qc.name)
            if queue is None:
                print("Unknown queue name", qc.name)
                continue
            else:
                queue.data.update_queue_config(qc)
Beispiel #22
0
def thread_main():
    """
  for queue to trainning model and predict
  """

    # cluster_df = pd.read_csv(CLUSTER_INFILE)
    # total_mem = cluster_df["totalMB"].values[0]
    # total_cpu = cluster_df["totalVirtualCores"].values[1]

    scheduler_df = pd.read_csv(SCHEDULER_INFILE, error_bad_lines=False)

    # scheduler_df["memory"] = scheduler_df["memory"] / total_mem
    # scheduler_df["vCores"] = scheduler_df["vCores"] / total_cpu

    queue_names = set(scheduler_df["queueName"].values)

    scheduler_df = scheduler_df.set_index("queueName")

    FileOperator.path_exits("model_input")
    FileOperator.path_exits("model_out")
    FileOperator.path_exits("model")

    # empty the pre_file
    FileOperator.write_list_tocsv([], PRE_FILE)

    for queue_name in queue_names:
        print('--------------queue:{0}-----------'.format(queue_name))
        queue_information = scheduler_df.ix[queue_name, ["memory"]]
        # queue_information['memory'] = round(queue_information['memory'], 2)
        queue_information = queue_information.replace(0.0, 0.01)
        queue_information.insert(0, "times", range(queue_information.shape[0]))

        model_input_file = "./model_input/{0}.csv".format(queue_name)

        FileOperator.write_list_tocsv([], model_input_file)

        queue_information.to_csv(model_input_file, index=False, header=False)
        model_dir = "./model/{0}".format(queue_name)

        train(queue_name, model_input_file, PRE_FILE, model_dir,
              FLAGS.train_step, FLAGS.predict_step)
Beispiel #23
0
def load_data():
    read = FileOperator.f_open(Const.UNIQ_SRC_FILE)
    unique_char_set = read[-1].split(",")
    w2v = Word2Vec(Const.W2V_SRC_FILE, Const.W2V_WEIGHT_FILE,
                   Const.WORD_FEAT_LEN, "load")

    data_array = []
    for word in unique_char_set:
        data_array.append(w2v.str_to_vector(word))
    kmeans = MyKmeans(Const.NUM_OF_CLUSTER, Const.KMEANS_SAVE_FILE, data_array,
                      "load")
    return unique_char_set, w2v, kmeans
Beispiel #24
0
 def import_ios_resources(self):
     fileOperator = FileOperator()
     # 解析所有的多语言
     for f in os.listdir(self.appConfig.ios_resources_root_directory):
         language = self.__get_ios_file_language(f)
         if len(language) <= 0:
             continue
         # 语言名称
         self.support_languages.append(language)
     # 解析多语言的词条
     for f in os.listdir(self.appConfig.ios_resources_root_directory):
         language = self.__get_ios_file_language(f)
         if len(language) <= 0:
             continue
         path = os.path.join(self.appConfig.ios_resources_root_directory, f,
                             "Localizable.strings")
         dist = fileOperator.read_ios_keywords(path)
         logging.debug("Read iOS keywords : " + str(dist))
         for k, v in dist.items():
             if k not in self.keywords:
                 self.keywords.append(k)
             if k not in self.translates:
                 self.translates[k] = {}
                 for support_language in self.support_languages:
                     if support_language != language:
                         self.translates[k][support_language] = ""
                     else:
                         self.translates[k][support_language] = v
     # 新增多语言的情况:要为应用初始化的时候选中的多语言设置词条
     for sl in self.appConfig.support_languages:
         if sl not in self.support_languages:
             for k, v in self.translates.items():
                 self.translates[k][sl] = ""
     # 输出用于调试的日志
     self.appConfig.add_support_languages(self.support_languages)
     logging.debug("Parsed From iOS Resources : " +
                   str(self.support_languages))
     logging.debug("Parsed Keywords : " + str(self.keywords))
     logging.debug(self.translates)
Beispiel #25
0
 def gen_ios_resources(self):
     repository.load()
     ios_blacklist = self.appConfig.ios_language_black_list
     fileOperator = FileOperator()
     for language in repository.languages:
         # 过滤黑名单
         if language in ios_blacklist:
             continue
         dist = {}
         for data in repository.datas:
             keyword = data["keyword"]
             translates = data["translates"]
             translation = translates[language]
             dist[keyword] = translation
         # 写入资源
         language_dir = os.path.join(
             self.appConfig.ios_resources_root_directory,
             language + ".lproj")
         if not os.path.exists(language_dir):
             os.mkdir(language_dir)
         fname = os.path.join(language_dir, "Localizable.strings")
         fileOperator.write_ios_resources(dist, fname)
Beispiel #26
0
def get_logger(refresh=False):
    log_path = "./log"
    log_file = os.path.join(log_path, "logs")
    FileOperator.path_exits(log_path)
    if refresh:
        get_logger.logger = None
    if get_logger.logger:
        return get_logger.logger
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    file_hadler = logging.FileHandler(log_file)
    file_hadler.setLevel(logging.INFO)
    stream_hadler = logging.StreamHandler()
    stream_hadler.setLevel(logging.INFO)
    formater = logging.Formatter(
        "%(levelname)s %(asctime)s %(filename)s[line:%(lineno)d]: %(message)s")
    file_hadler.setFormatter(formater)
    stream_hadler.setFormatter(formater)
    logger.addHandler(file_hadler)
    logger.addHandler(stream_hadler)
    get_logger.logger = logger
    return get_logger.logger
Beispiel #27
0
def learn_word():
    print("src file: ", Const.SRC_FILE)
    sentence_list = FileOperator.f_open(Const.SRC_FILE)
    sentence_list = StringOperator.split_sentence(sentence_list)
    prob_state, w2v, kmeans, _ = load_data()

    cnt = 0
    for sentence in sentence_list:
        sys.stdout.write("\r progress: %d / %d" % (cnt, len(sentence_list)))
        sys.stdout.flush()
        for i in range(len(sentence) - 2):
            vec = w2v.str_to_vector(sentence[i]).reshape(1, -1)
            cluster = kmeans.get_cluster(vec)
            next_vec = w2v.str_to_vector(sentence[i + 1]).reshape(1, -1)
            next_cluster = kmeans.get_cluster(next_vec)
            prob_state.count_up_trainsition(cluster, next_cluster)
        cnt += 1
    prob_state.save_prob(Const.PROB_FILE)
    print()
    print("end")
Beispiel #28
0
def _main(flags):
    scheduler_df = pd.read_csv(SCHEDULER_INFILE, error_bad_lines=False)
    scheduler_df = scheduler_df.set_index("queueName")
    queue_names = pd.unique(scheduler_df.index.values)

    FileOperator.path_exits("model_input")
    FileOperator.path_exits("model_out")
    FileOperator.write_list_tocsv([], PRE_FILE)

    for queue_name in queue_names:
        print('--------------queue:{0}-----------'.format(queue_name))
        queue_information = scheduler_df.loc[queue_name, ['memory']]
        queue_information = queue_information.reset_index()
        queue_information = queue_information.loc[:, ['memory']]
        queue_information.insert(0, "times", queue_information.index.values)

        model_input_file = "./model_input/{0}.csv".format(queue_name)
        FileOperator.write_list_tocsv([], model_input_file)

        queue_information.to_csv(model_input_file, index=False, header=False)
        model_dir = "./model/{0}".format(queue_name)

        train(queue_name, model_input_file, PRE_FILE, model_dir,
              flags["train_step"], flags["predict_step"])
 def search_in_ids_db(id):
     server_name, user_name, user_password, database_name = FileOperator.app_config_reader()
     original, changed, sql_command, line, header, table_name = [], [], [], [], [], ""
     if id.isdigit():
         table_name, id_name = ".Event", "id"
     elif (id[:6].isdigit() and id[6:8].isalpha() and len(id) == 8) or (id[:6].isalpha() and id[6:8].isdigit() and len(id) == 8):
         table_name, id_name = ".Donor", "unique_identifier"
     else:
         print("The input is not correct.")
         time.sleep(1.5)
         return
     sql_command.append("SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE " + \
                  "`TABLE_SCHEMA`='" + database_name + "' AND `TABLE_NAME`='" + table_name[1:] + "';")
     sql_command.append("SELECT * FROM " + database_name + table_name + " WHERE " + id_name + " = '" + id + "';")
     dbcon = mysql.connector.connect(user=user_name, password=user_password, host=server_name, database=database_name)
     cursor = dbcon.cursor()
     cursor.execute(sql_command[0])
     for cursor_message in cursor:
         header.append(cursor_message[0])
     cursor.execute(sql_command[1])
     for cursor_message in cursor:
         for one_message in cursor_message:
             line.append(str(one_message))
         if id_name == "id":
             original.append(EventObject(line[0],line[1],line[2],line[3],line[4],line[5],line[6],line[7],line[8],line[9]))
             changed.append(EventObject(line[0],line[1],line[2],line[3],line[4],line[5],line[6],line[7],line[8],line[9]))
             line = []
         else:
             original.append(DonorObject(line[0],line[1],line[2],line[3],str(line[4]).replace("None", "Never"),line[5],line[6],line[7],line[8],line[9],line[10],line[11],line[12]))
             changed.append(DonorObject(line[0],line[1],line[2],line[3],str(line[4]).replace("None", "Never"),line[5],line[6],line[7],line[8],line[9],line[10],line[11],line[12]))
             line = []
     if id_name == 'id' and original:
         ChangeClass.change_process_event(original, changed, [id, header])
     elif original:
         ChangeClass.change_process_donor(original, changed, [id, header])
     else:
         print("Not included in the database.")
         time.sleep(1.5)
Beispiel #30
0
def call_sse():
    read = FileOperator.f_open(Const.UNIQ_SRC_FILE)
    w2v = Word2Vec(Const.W2V_SRC_FILE, Const.W2V_WEIGHT_FILE,
                   Const.WORD_FEAT_LEN, "load")

    unique_char_set = read[-1].split(",")
    print("number of unique word:", len(unique_char_set))
    data_array = []
    for word in unique_char_set:
        data_array.append(w2v.str_to_vector(word))

    sse_list = []
    num_of_cluster_list = range(100, 2000, 100)
    for num_of_cluster in num_of_cluster_list:
        print(num_of_cluster)
        kmeans = MyKmeans(num_of_cluster, Const.KMEANS_SAVE_FILE, data_array,
                          "init")
        print(kmeans.get_sse())
        sse_list.append(kmeans.get_sse())

    plt.plot(num_of_cluster_list, sse_list, marker='o')
    # plt.show()
    plt.savefig(Const.SSE_IMG)
Beispiel #31
0
print('        enable pin: ' + str(enable_pin))
print('        sleep time: ' + str(sleep_time))
print('        ---------------------')
print('        x_mil: ' + str(x_mil))
print('        y_mil: ' + str(y_mil))
print('        z_mil: ' + str(z_mil))

inputok = input('-      Looks good? Keep going? (y/n)  ')
if inputok != 'y':
    sys.exit('( ! )     Okay. Stopping ...')
    Motor.cleanup(motor)

motor = Motor(xdir, xstep, ydir, ystep, zdir, zstep, enable_pin, sleep_time)
Motor.setup(motor)
#import code; code.interact(local=dict(globals(), **locals()))
operator = FileOperator(motor, x_mil, y_mil, z_mil)

print('')
print('( 3 )  Choosing GCODE File')
files = os.listdir('./gcodefiles/')
filnum = -1
for file in files:
    filnum += 1
    print('      ' + str(filnum) + ' : ' + file)

inputfile = input('-      Choose file by entering (0...' + str(filnum) + '): ')
filename = files[int(inputfile)]
filepath = './gcodefiles/' + filename

print('')
print('( 4 )  Ok. Ready for Printing: ' + filename)