Exemplo n.º 1
0
    def save_predict_file(self, node_id, df):

        _predict_path = utils.get_source_predict_path(node_id.split('_')[0], node_id.split('_')[1], 'predict')

        predict_result_dir = utils.make_and_exist_directory(_predict_path + "/" + "result" + "/")
        predict_result_filename = predict_result_dir + str(node_id) +"_" + strftime("%Y-%m-%d-%H:%M:%S",
                                                                            gmtime()) + ".csv"
        df.to_csv(predict_result_filename)
Exemplo n.º 2
0
    def predict(self, node_id, ver, parm, data=None, result=None):
        """ Wdnn predict 
            batchlist info에서 active flag가 Y인 Model을 가져와서 예측을 함 

        Args:
          params: 
            * node_id
            * conf_data

        Returns:
            none

        Raises:

        Example

        """
        try:
            logging.info("wdnn predict_start nnid : {0}".format(node_id))
            _node_id = node_id + "_" + ver + "_" + "netconf_node"

            _data_conf_id = node_id + "_" + ver + "_dataconf_node"
            self._init_node_parm(_node_id)
            #self.cls_pool_all = conf_data['cls_pool']  # Data feeder

            config = {
                "type": self.model_type,
                "labels": self.label_values,
                "nn_id": node_id,
                "nn_wf_ver_id": ver
            }
            train = TrainSummaryInfo(conf=config)
            #print(config)
            self.batch = self.get_active_batch(_node_id)
            #print(train)
            self.model_predict_path = ''.join(
                [self.model_path + '/' + self.batch])
            self.multi_node_flag = False

            conf_data = {}
            conf_data['node_id'] = _node_id

            #conf_data['cls_pool'].get('nn00001_1_pre_feed_fr2wdnn_test')
            print("model_path : " + str(self.model_path))
            print("hidden_layers : " + str(self.hidden_layers))
            print("activation_function : " + str(self.activation_function))
            print("batch_size : " + str(self.batch_size))
            print("epoch : " + str(self.epoch))
            print("model_type : " + str(self.model_type))

            # data_store_path = WorkFlowDataFrame(conf_data['nn_id']+"_"+conf_data['wf_ver']+"_"+ "data_node").step_store
            data_conf_info = self.data_conf

            # make wide & deep model
            wdnn = NeuralCommonWdnn()
            wdnn_model = wdnn.wdnn_build(self.model_type, node_id,
                                         self.hidden_layers,
                                         str(self.activation_function),
                                         data_conf_info,
                                         str(self.model_predict_path))

            # feed
            # TODO file이 여러개면 어떻하지?
            filelist = sorted(parm.items())
            #train_data_set = self.cls_pool  # get filename
            #file_queue = str(train_data_set.input_paths[0])  # get file_name

            # file을 돌면서 최대 Row를 전부 들고 옴 tfrecord 총 record갯수 가져오는 방법필요

            _batch_size = self.batch_size
            _num_tfrecords_files = 0

            # multi Feeder modified
            multi_read_flag = self.multi_read_flag

            # Todo H5
            # train per files in folder h5용
            # if multi_file flag = no이면 기본이 h5임

            results = dict()
            ori_list = list()
            pre_list = list()
            #self.batch_size = 5
            for filename in filelist:
                print("h5")
                #feeder = PreNodeFeedFr2Wdnn().set_for_predict(_data_conf_id)
                feeder = PreNodeFeedFr2Wdnn()
                #_data_conf_id
                #set_for_predict
                feeder.set_for_predict(_data_conf_id)
                data_node = DataNodeFrame()
                train_data_set = data_node.load_csv_by_pandas(
                    self.predict_path + "/" + filename[1].name)

                #feeder.set_input_paths([self.predict_path + "/" + filename[1].name])
                #train_data_set = feeder
                #_size = train_data_set
                # 파일이 하나 돌때마다
                # for 배치사이즈와 파일의 총갯수를 가져다가 돌린다. -> 마지막에 뭐가 있을지 구분한다.
                # 파일에 iter를 넣으면 배치만큼 가져오는 fn이 있음 그걸 __itemd에 넣고
                # Input 펑션에서 multi를 vk판단해서 col와 ca를 구분한다.(이걸 배치마다 할 필요가 있나?)
                # -> 그러면서 피팅
                #
                # # Iteration is to improve for Model Accuracy

                # Per Line in file
                # eval should be one line predict
                #self.batch_size = 2

                #train_date를 어떻게 가져오냐가 문제

                result_df = pd.DataFrame()

                for i in range(0, len(train_data_set.index), self.batch_size):

                    data_set = train_data_set[i:i + self.batch_size]
                    #if i == 0:
                    #eval_data_Set = data_set
                    # input_fn2(self, mode, data_file, df, nnid, dataconf):
                    predict_value = wdnn_model.predict(
                        input_fn=lambda: feeder.input_fn2(
                            tf.contrib.learn.ModeKeys.TRAIN, filename,
                            data_set, data_conf_info))

                    data_set_count = len(data_set.index)
                    predict_val_list = [_pv for _pv in predict_value]
                    predict_val_count = len(predict_val_list)

                    if (data_set_count != predict_val_count):
                        logging.error(
                            "wdnn eval error check : dataframe count({0}) predict count({1})"
                            .format(data_set_count, predict_val_count))
                        raise ValueError(
                            'eval data validation check error : dataframe and predict count is different(neuralnet_node_wdnn.eval)'
                        )

                    data_set[
                        'predict_label'] = predict_val_list  #list(predict_value)
                    #_predict = list(predict_value)
                    predict_y = list(data_set['predict_label'])
                    #pd.concat(result_df, data_set)
                    result_df = result_df.append(data_set)
                    ori_list.extend(data_set[self.label].values.tolist())
                    pre_list.extend(list(data_set['predict_label']))

                    # model fitting
                    print(len(ori_list))
                    print(len(pre_list))
                    #logging.error("wdnn eval ori list  : {0}".format(ori_list) )
                    logging.info("wdnn eval ori list  : {0}".format(
                        len(ori_list)))
                    #logging.info("wdnn eval ori list  : {0}".format('info'))
                    #logging.debug("wdnn eval ori list  : {0}".format('debug'))
                    #logging.critical("wdnn eval ori list  : {0}".format('critical'))
                    #print("model fitting h5 " + str(data_set))
                # #Select Next file

                #train_data_set.next()

            predict_result_dir = utils.make_and_exist_directory(
                self.predict_path + "/" + "result" + "/")
            predict_result_filename = predict_result_dir + "result_" + strftime(
                "%Y-%m-%d-%H:%M:%S", gmtime()) + ".csv"
            result_df.to_csv(predict_result_filename)
            #os.remove(self.predict_path + "/" + filename[1].name)

            # #TODO : 앞으로 옮기자
            # train.set_nn_batch_ver_id(self.batch)
            # if self.model_type == "regression":
            #     results['ori'] = ori_list
            #     results['pre'] = pre_list
            #     train.set_result_info(ori_list, pre_list)
            #
            # if self.model_type == "category":
            #     # tfrecord는 여기서 Label을 변경한다. 나중에 꺼낼때 답이 없음 Tensor 객체로 추출되기 때문에 그러나 H5는 feeder에서 변환해주자
            #     le = LabelEncoder()
            #     le.fit(self.label_values)
            #
            #     for _i, _ori in enumerate(ori_list):
            #         #return_value = self.labels[np.argmax(model.predict(X_train))]
            #         train.set_result_info(str(_ori), str(le.inverse_transform(pre_list[_i])))
            #return self.batch

            logging.info("eval end")
            return train
        except Exception as e:
            logging.error("Wdnn predict error {0}".format(e))

            raise Exception(e)
Exemplo n.º 3
0
    def predict(self, node_id,ver, parm, data=None, result=None):

        """ Wdnn predict 
            batchlist info에서 active flag가 Y인 Model을 가져와서 예측을 함 

        Args:
          params: 
            * node_id
            * conf_data

        Returns:
            none

        Raises:INFO

        Example

        """
        try:
            logging.info("wdnn predict_start nnid : {0}".format(node_id))
            if ver == 'active':
                self.batch, wf_ver = self.get_active_batch2(node_id)
                #self.model_predict_path = ''.join([self.model_path + '/' + self.batch])
            else:
                wf_ver = ver
                _nn_ver_id= node_id + "_" + wf_ver + "_" + "netconf_node"
                self.batch = self.get_active_batch(_nn_ver_id)  # Train이 Y인것 가져오기 Eval Flag가 Y인거 가져오기

            _node_id = node_id + "_" + wf_ver+ "_" + "netconf_node"
            _data_conf_id = node_id + "_" + wf_ver + "_dataconf_node"
            self._init_node_parm(_node_id)


            graph = NNCommonManager().get_nn_node_name(node_id)
            for net in graph:
                if net['fields']['graph_node'] == 'netconf_node':
                    netconf_node = net['fields']['graph_node_name']
            self.model_path = utils.get_model_path(node_id, wf_ver, netconf_node)

            config = {"type": self.model_type, "labels": self.label_values, "nn_id":node_id, "nn_wf_ver_id":ver}
            #train = TrainSummaryInfo(conf=config)
            self.model_predict_path = ''.join([self.model_path + '/' + self.batch])
            self.multi_node_flag = False
            self.predict_path = ''.join(['/hoya_src_root/'+node_id+'/common/predict'])

            conf_data = {}
            conf_data['node_id'] = _node_id

            logging.info("model_path : " + str(self.model_path))
            logging.info("hidden_layers : " + str(self.hidden_layers))
            logging.info("activation_function : " + str(self.activation_function))
            logging.info("batch_size : " + str(self.batch_size))
            logging.info("epoch : " + str(self.epoch))
            logging.info("model_type : " + str(self.model_type))

            data_conf_info = self.data_conf
            # make wide & deep model
            wdnn = NeuralCommonWdnn()
            wdnn_model = wdnn.wdnn_build(self.model_type, node_id, self.hidden_layers,
                                         str(self.activation_function), data_conf_info, str(self.model_predict_path),dememsion_auto_flag = self.auto_demension)
            # feed
            le = LabelEncoder()
            le.fit(self.label_values)

            file_cnt = len(parm.FILES.keys())
            dir = 'predict'
            filelist = list()
            if file_cnt > 0:
                for key, requestSingleFile in parm.FILES.items():
                    filelist.append(str(requestSingleFile._name))

            _batch_size = self.batch_size
            _num_tfrecords_files = 0

            # multi Feeder modified
            multi_read_flag = self.multi_read_flag

            results = dict()
            ori_list = list()
            pre_list = list()
            #self.batch_size = 5
            for filename in filelist:
                print("h5")
                #feeder = PreNodeFeedFr2Wdnn().set_for_predict(_data_conf_id)
                feeder = PreNodeFeedFr2Wdnn()

                feeder.set_for_predict(_data_conf_id)
                data_node = DataNodeFrame()
                train_data_set = data_node.load_csv_by_pandas(self.predict_path + "/" + filename)

                result_df = pd.DataFrame()

                for i in range(0, len(train_data_set.index), self.batch_size):

                    data_set = train_data_set[i:i + self.batch_size]

                    predict_value = wdnn_model.predict(
                        input_fn=lambda: feeder.input_fn2(tf.contrib.learn.ModeKeys.TRAIN, filename,
                                                                  data_set, data_conf_info))

                    data_set_count = len(data_set.index)
                    predict_val_list = [_pv for _pv in predict_value]
                    predict_val_count = len(predict_val_list)

                    if (data_set_count != predict_val_count):
                        logging.error("wdnn eval error check : dataframe count({0}) predict count({1})".format(data_set_count, predict_val_count))
                        raise ValueError(
                            'eval data validation check error : dataframe and predict count is different(neuralnet_node_wdnn.eval)')

                    data_set['predict_label'] = list(le.inverse_transform(predict_val_list)) #list(predict_value)

                    #_predict = list(predict_value)
                    predict_y = list(data_set['predict_label'])
                    #pd.concat(result_df, data_set)
                    result_df = result_df.append(data_set)
                    ori_list.extend(data_set[self.label].values.tolist())
                    pre_list.extend(list(data_set['predict_label']))

                    logging.info("wdnn eval ori list  : {0}".format(len(ori_list)))
                    logging.info("wdnn eval ori list  : {0}".format(len(pre_list)))
                #train_data_set.next()

            predict_result_dir = utils.make_and_exist_directory(self.predict_path + "/" + "result" + "/")
            predict_result_filename = predict_result_dir + "result_" + strftime("%Y-%m-%d-%H:%M:%S",
                                                                                gmtime()) + ".csv"
            result_df.to_csv(predict_result_filename)

            logging.info("eval end")
            return json.loads(result_df.to_json())
        except Exception as e:
            logging.error("Wdnn predict error {0}".format(e))

            raise Exception(e)
Exemplo n.º 4
0
    def predict(self, node_id, ver, parm, data=None, result=None):
        """ Wdnn predict 
            batchlist info에서 active flag가 Y인 Model을 가져와서 예측을 함 

        Args:
          params: 
            * node_id
            * conf_data

        Returns:
            none

        Raises:INFO

        Example

        """
        try:
            logging.info("wdnn predict_start nnid : {0}".format(node_id))
            if ver == 'active':
                self.batch, wf_ver = self.get_active_batch2(node_id)
            else:
                wf_ver = ver
                _nn_ver_id = node_id + "_" + wf_ver + "_" + "netconf_node"
                self.batch = self.get_active_batch(
                    _nn_ver_id)  # Train이 Y인것 가져오기 Eval Flag가 Y인거 가져오기

            _node_id = node_id + "_" + wf_ver + "_" + "netconf_node"
            _data_conf_id = node_id + "_" + wf_ver + "_dataconf_node"
            self._init_node_parm(_node_id)

            graph = NNCommonManager().get_nn_node_name(node_id)
            for net in graph:
                if net['fields']['graph_node'] == 'netconf_node':
                    netconf_node = net['fields']['graph_node_name']
            self.model_path = utils.get_model_path(node_id, wf_ver,
                                                   netconf_node)

            config = {
                "type": self.model_type,
                "labels": self.label_values,
                "nn_id": node_id,
                "nn_wf_ver_id": ver
            }
            self.model_predict_path = ''.join(
                [self.model_path + '/' + self.batch])
            self.multi_node_flag = False
            self.predict_path = ''.join(
                ['/hoya_src_root/' + node_id + '/common/predict'])

            conf_data = {}
            conf_data['node_id'] = _node_id

            logging.info("model_path : " + str(self.model_path))
            logging.info("ml_class : " + str(self.ml_class))
            logging.info("config : " + str(self.config))

            data_conf_info = self.data_conf
            # make ML model
            clf = joblib.load(self.model_path + 'model.pkl')

            # feed
            le = LabelEncoder()
            le.fit(self.label_values)

            file_cnt = len(parm.FILES.keys())
            dir = 'predict'
            filelist = list()
            if file_cnt > 0:
                for key, requestSingleFile in parm.FILES.items():
                    filelist.append(str(requestSingleFile._name))

            _batch_size = self.batch_size
            _num_tfrecords_files = 0

            # multi Feeder modified
            multi_read_flag = self.multi_read_flag

            results = dict()
            ori_list = list()
            pre_list = list()
            #self.batch_size = 5
            for filename in filelist:
                print("h5")
                feeder = PreNodeFeedFr2ML()

                feeder.set_for_predict(_data_conf_id)
                data_node = DataNodeFrame()
                train_data_set = data_node.load_csv_by_pandas(
                    self.predict_path + "/" + filename)

                result_df = pd.DataFrame()

                for i in range(0, len(train_data_set.index), self.batch_size):

                    data_set = train_data_set[i:i + self.batch_size]

                    predict_value = clf.predict(
                        input_fn=lambda: feeder.input_fn2(
                            tf.contrib.learn.ModeKeys.TRAIN, filename,
                            data_set, data_conf_info))

                    data_set_count = len(data_set.index)
                    predict_val_list = [_pv for _pv in predict_value]
                    predict_val_count = len(predict_val_list)

                    if (data_set_count != predict_val_count):
                        logging.error(
                            "wdnn eval error check : dataframe count({0}) predict count({1})"
                            .format(data_set_count, predict_val_count))
                        raise ValueError(
                            'eval data validation check error : dataframe and predict count is different(neuralnet_node_wdnn.eval)'
                        )

                    data_set['predict_label'] = list(
                        le.inverse_transform(
                            predict_val_list))  #list(predict_value)

                    #_predict = list(predict_value)
                    predict_y = list(data_set['predict_label'])
                    #pd.concat(result_df, data_set)
                    result_df = result_df.append(data_set)
                    ori_list.extend(data_set[self.label].values.tolist())
                    pre_list.extend(list(data_set['predict_label']))

                    logging.info("wdnn eval ori list  : {0}".format(
                        len(ori_list)))
                    logging.info("wdnn eval ori list  : {0}".format(
                        len(pre_list)))
                #train_data_set.next()

            predict_result_dir = utils.make_and_exist_directory(
                self.predict_path + "/" + "result" + "/")
            predict_result_filename = predict_result_dir + "result_" + strftime(
                "%Y-%m-%d-%H:%M:%S", gmtime()) + ".csv"
            result_df.to_csv(predict_result_filename)

            logging.info("eval end")
            return json.loads(result_df.to_json())
        except Exception as e:
            logging.error("Wdnn predict error {0}".format(e))

            raise Exception(e)