Example #1
0
    def __init__(self):
        self.model = None  # Prediction model
        self.model_path = '../data/NN/model.h5'
        self.weight_path = '../data/NN/weight.h5'
        self.SCALAR_PICKLE_PATH = '../data/preprocess/scaler_py2.pkl'
        self.feature_list = []
        self.iter_count = 0  # sim trigger引用计数
        self.pred_interval = 20  # 预测间隔 < self.pred_length
        self.pred_step = 5  # 读文件步长,每隔50ms更新基础数据

        self.sequence_init_weight = 2  # 初始储备buffersize权重 >= 2
        self.sequence_length = 60  # 10s 的数据进行预测
        self.pred_length = 30  # 预测出未来5s       60 ~ 5s
        self.gt_grpc_bufsize = (
            20 * 12) + 2  # 传回给 grpc的事实数据系列 buf_size = 100s * 12个数据/s = 1200
        self.pred_grpc_bufsize = (
            20 + 10
        ) * 12 + self.pred_length  # 传回给 grpc的预测数据系列 buf_size = 110s * 12个数据/s = 1380
        self.invert_scale = 2000.0  # 归一化的缩放比例

        self.iter_csv = None
        self.gt_buf = np.array([])  # 实际数据缓冲区
        self.pred_buf = np.array([])  # 预测数据缓冲区
        self.gt_grpc_buf = np.array([])  # 传回给 grpc的事实数据系列
        self.pred_grpc_buf = np.array([])  # 传回给 grpc的预测数据系列
        self.error_grpc_buf = np.array([])  # 传回给 grpc的误差数据系列

        self.FEATURE_SIZE = 3
        self.y_cols = []
        self.scaler = None
        self.fileModel = FileModel()
        pass
Example #2
0
def upload_file_post():
    if request.method == "POST":

        if request.files:
            # get the uploaded file
            uploaded_file = request.files["customFile"]
            filename = secure_filename(uploaded_file.filename)
            print(uploaded_file)
            # save file in files folder
            uploaded_file.save('files/' + filename)

            try:

                url = 'https://www.virustotal.com/vtapi/v2/file/scan'

                params = {'apikey': API_KEY}

                files = {'file': (filename, open('files/' + filename), 'rb')}

                response = requests.post(url, files=files, params=params)

                print(response.json())
                # Save filename and hash in the database
                if not FileModel.get(response.json()['resource']):
                    FileModel.create(response.json()['resource'], filename,
                                     response.json()['resource'])
                # if everything went will file_scan.html will be displayed with message of success
                return render_template('file_scan.html', msg=1)
            except:
                # if everything went wrong file_scan.html will be displayed with message of failure
                return render_template('file_scan.html', msg=0)
Example #3
0
 def __init__(self, parent=None):
     QObject.__init__(self, parent)
     self._data = FileModel()
     self._fileName = ''
     self._structure = None
     self._meta = None
     self._sections = []
Example #4
0
class FileDocument(QObject):
    structureChanged = pyqtSignal()
    sectionsChanged = pyqtSignal()

    def __init__(self, parent=None):
        QObject.__init__(self, parent)
        self._data = FileModel()
        self._fileName = ''
        self._structure = None
        self._meta = None
        self._sections = []

    @pyqtProperty('QVariant')
    def data(self):
        return self._data

    @data.setter
    def data(self, value):
        self._data = value

    @pyqtProperty(QQmlListProperty, notify=sectionsChanged)
    def sections(self):
        return QQmlListProperty(AddressRange, self, self._sections)

    @pyqtSlot('QString')
    def openFile(self, path):
        self._fileName = path
        self._data.openFile(path)
        self.load_structure()

    @pyqtProperty('QString', constant=True)
    def fileName(self):
        return self._fileName

    @pyqtProperty(StructureNode, notify=structureChanged)
    def structure(self):
        return self._structure

    def load_structure(self):
        fmt = ExeFormat()
        self._meta = fmt.parse_file(self._fileName,
                                    to_meta=True,
                                    compact_meta=False)
        self.create_structure(self._meta)

    def create_structure(self, meta):
        self._structure = StructureNode(None,
                                        value=meta,
                                        sections=self._sections)
        self.structureChanged.emit()
        self.sectionsChanged.emit()

    @pyqtSlot(int, int)
    def addSection(self, begin, end):
        section = AddressRange(self, begin=begin, end=end)
        section.style = AreaStyle(color=QColor(0, 255, 0, 25),
                                  borderColor=QColor(0, 255, 0),
                                  borderWidth=2)
        self._sections.append(section)
        self.sectionsChanged.emit()
Example #5
0
 def __init__(self):
     ##CONST CODE AREA
     self.CLIPS_FILE_LOAD_FAILED_CODE = 404
     self.CLIPS_FILE_LOAD_SUCCESS_CODE = 200
     self.codetype = sys.getfilesystemencoding()
     ##GLOBAL_AREA
     self.file_pointers = 1  # sim.data file read position
     self.m_nCount = 0
     self.data_file_name = TEMP_FILE_PREFIX + 'test_flight_facts.dat'
     self.clips_dribble_filename = TEMP_FILE_PREFIX + 'record.dat'
     self.clips_env = clips.Environment()
     self.engine = clips
     self.filemodel = FileModel()
     self.fact_filename = ''
     ## fact dataframe
     self.data = np.array([])
     self.data_header = np.array([])
     # self.dataframe = pd.DataFrame
     ## mapper
     self.mapper_list = []  # Template and slot mapping pair list
     self.record_name = ''  # Reasoning result record file name
Example #6
0
def file_data():
    # fetch all rows
    rows = FileModel.get_all()
    # make json data with id, name and resource(hash) and html buttons to download and view report
    json_data = []
    for result in rows:
        data = {}
        data['id'] = result['id']
        data['name'] = result['name']
        data['resource'] = result['resource']
        data['action'] = '<a href="/file/report/' + result[
            'resource'] + '" class="btn btn-info">View Report</a> <a target="_blank()" href="/file/download/' + result[
                'resource'] + '" class="btn btn-info">Download Report</a>'
        json_data.append(data)
    return json.dumps(json_data)
Example #7
0
class ClipsService():
    def __init__(self):
        ##CONST CODE AREA
        self.CLIPS_FILE_LOAD_FAILED_CODE = 404
        self.CLIPS_FILE_LOAD_SUCCESS_CODE = 200
        self.codetype = sys.getfilesystemencoding()
        ##GLOBAL_AREA
        self.file_pointers = 1  # sim.data file read position
        self.m_nCount = 0
        self.data_file_name = TEMP_FILE_PREFIX + 'test_flight_facts.dat'
        self.clips_dribble_filename = TEMP_FILE_PREFIX + 'record.dat'
        self.clips_env = clips.Environment()
        self.engine = clips
        self.filemodel = FileModel()
        self.fact_filename = ''
        ## fact dataframe
        self.data = np.array([])
        self.data_header = np.array([])
        # self.dataframe = pd.DataFrame
        ## mapper
        self.mapper_list = []  # Template and slot mapping pair list
        self.record_name = ''  # Reasoning result record file name

    def reset(self):
        """
        reset
        :return:
        """
        self.file_pointers = 1  # sim.data file read position
        self.m_nCount = 0
        self.clips_env = clips.Environment()
        self.engine = clips
        self.filemodel.reset()
        self.filemodel.load_data(self.fact_filename)
        self.data = np.array([])
        self.data_header = np.array([])

    def update_data(self):
        """@public: Get fact data."""
        self.filemodel.update_data()
        self.data_header = self.filemodel.get_dataframe_header()
        self.data = self.filemodel.get_data()

    def get_EOF_mark(self):
        """
        返回仿真结束标志(EOF Mark).
        :return:
        """
        return self.filemodel.is_EOF()

    def load_rule_files(self, filenames):
        """@public: Load rule files"""
        self.__on_init_clips()
        try:
            for filename in filenames:
                self.engine.Load(RULE_FILE_PREFIX + filename)
            self.engine.DebugConfig.DribbleOn(self.clips_dribble_filename)
            self.engine.Reset()
            return self.CLIPS_FILE_LOAD_SUCCESS_CODE
        except self.engine.ClipsError as e:
            print(self.engine.ErrorStream.Read())
            return self.CLIPS_FILE_LOAD_FAILED_CODE

    def get_slots(self):
        """@public: Get Slot names orgnized by template names."""
        slot_dict = {}
        for template_name in self.engine.TemplateList():
            template = self.engine.FindTemplate(template_name)
            if len(template.Slots.Names()) > 0:
                slot_dict[template] = template.Slots.Names()
        return slot_dict

    def set_mapping_list(self, mapper):
        for item in mapper:
            # print(item.slot)
            self.mapper_list.append(item)
            # template = item.template
            # slot = item.slot
            # variable = item.variable

    def __on_init_clips(self):
        """@private: Initialize the CLIPS environment."""
        self.engine.Reset()
        self.engine.Clear()

    def load_fact_file(self, filename):
        """@public: Load fact file into dataframe"""
        if len(filename) <= 0:
            return
        self.fact_filename = filename
        self.filemodel.load_data(filename)

    def generate_fact_str(self):
        """@public: Generate assert fact formatted string"""
        tmp_template_name = ""
        assert_str = ""

        for mapper in self.mapper_list:
            template_name = mapper.template
            __idx = np.where(
                self.data_header == mapper.variable.decode('utf-8'))[0][0]

            slot_str = " (" + mapper.slot + " " + str(self.data[__idx]) + ')'
            if not tmp_template_name == template_name:
                if len(assert_str) > 0:
                    assert_str = assert_str + ')\n'
                    pt_str = "(" + template_name + slot_str
                    assert_str = assert_str + pt_str
                else:
                    pt_str = "(" + template_name + slot_str
                    assert_str = assert_str + pt_str

                tmp_template_name = template_name
            else:
                assert_str = assert_str + slot_str
        return (assert_str + ')')  #.decode('gb2312').encode('utf8')

    def get_fact_content(self):
        """@public: Get the row of the dataframe (specific row of the facts file)"""
        # self.get_data()
        strs = ""
        for item in self.data:
            strs = strs + str(item) + ";"

        return strs  #.decode('gb2312').encode('utf-8')

    def generate_facts(self, factstr):
        """@public: Generate fact file"""
        with open(self.data_file_name, 'w+') as f:
            f.writelines(factstr)

    def set_record_name(self):
        """@public: Generate record file"""
        nowTime = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
        self.record_name = TEMP_FILE_PREFIX + 'record/ ' + nowTime + '.record'
        file = open(self.record_name, 'w')
        file.close()

    # def __get_dataframe_size(self):
    #     """@decrepte: Get the whole fact files' row size"""
    #     return len(self.dataframe)

    def get_result(self):
        """@public: """
        self.engine.Reset()
        self.engine.LoadFacts(self.data_file_name)
        self.engine.Run()
        std_str = self.engine.StdoutStream.Read()
        dlg_str = self.engine.DialogStream.Read()
        # disp_str = CLIPS_HPROSE.DisplayStream.Read()
        if not dlg_str is None:
            pass
        else:
            dlg_str = 'NaN'

        if not std_str is None:
            pass
        else:
            std_str = 'NaN'

        fileobj = open(self.record_name, 'a')
        fileobj.writelines(std_str + '\n' + dlg_str + '\n')
        print std_str, dlg_str
        return std_str, dlg_str
Example #8
0
print "\t\t\t\t-----------------------------------------------"
print "\t\t\t\t PID Tuning with Artificial Intelegence method"
print "\t\t\t\t             Tower Copter Control"
print "\t\t\t\t               M.Imam Muttaqin"
print "\t\t\t\t-----------------------------------------------"
print "\t\t\t\t Select Mode > 1.Automatic (use default-conf)"
print "\t\t\t\t             > 2.Genetic Algorithm (Hard Tune)"
print "\t\t\t\t             > 3.Neural Network (Soft Tune)"
print "\t\t\t\t             > 4.TensorFlow (Soft Tune)"
print "\t\t\t\t             > 5.NN Predict"
print "\t\t\t\t             > 99.Exit"

i_mode = input("\t\t\t\t             : ")

name_model = raw_input("Enter name of models: ")
file_model = FileModel(name_model)

if i_mode == 1:
    ga(True)
    nn()
elif i_mode == 2:
    ga()
elif i_mode == 3:
    nn()
elif i_mode == 4:
    keras()
elif i_mode == 5:
    nn_predict()
else:
    sys.exit()
Example #9
0
class LstmService():
    def __init__(self):
        self.model = None  # Prediction model
        self.model_path = '../data/NN/model.h5'
        self.weight_path = '../data/NN/weight.h5'
        self.SCALAR_PICKLE_PATH = '../data/preprocess/scaler_py2.pkl'
        self.feature_list = []
        self.iter_count = 0  # sim trigger引用计数
        self.pred_interval = 20  # 预测间隔 < self.pred_length
        self.pred_step = 5  # 读文件步长,每隔50ms更新基础数据

        self.sequence_init_weight = 2  # 初始储备buffersize权重 >= 2
        self.sequence_length = 60  # 10s 的数据进行预测
        self.pred_length = 30  # 预测出未来5s       60 ~ 5s
        self.gt_grpc_bufsize = (
            20 * 12) + 2  # 传回给 grpc的事实数据系列 buf_size = 100s * 12个数据/s = 1200
        self.pred_grpc_bufsize = (
            20 + 10
        ) * 12 + self.pred_length  # 传回给 grpc的预测数据系列 buf_size = 110s * 12个数据/s = 1380
        self.invert_scale = 2000.0  # 归一化的缩放比例

        self.iter_csv = None
        self.gt_buf = np.array([])  # 实际数据缓冲区
        self.pred_buf = np.array([])  # 预测数据缓冲区
        self.gt_grpc_buf = np.array([])  # 传回给 grpc的事实数据系列
        self.pred_grpc_buf = np.array([])  # 传回给 grpc的预测数据系列
        self.error_grpc_buf = np.array([])  # 传回给 grpc的误差数据系列

        self.FEATURE_SIZE = 3
        self.y_cols = []
        self.scaler = None
        self.fileModel = FileModel()
        pass

    def set_config(self, model_path, weight_path, scalar_pickle_path,
                   invert_scale, feature_size, features):
        """
        @Impl 设置网络参数
        :param model_path:  模型路径
        :param weight_path: 权值路径
        :param scalar_pickle_path:  归一化或标准化pickle路径
        :param invert_scale:    预测参数尺度缩放比例
        :param feature_size:    特征数量
        :param features: 特征名称列表(unicode),将预测列放在最后一列 [a,b,pred], 次序与训练时候的次序相同
        :return:
        """
        self.model_path = model_path  # '../data/NN/model.h5'
        self.weight_path = weight_path  #'../data/NN/weight.h5'
        self.__load_model(self.model_path)
        self.__load_weight(self.weight_path)
        self.SCALAR_PICKLE_PATH = scalar_pickle_path
        self.invert_scale = invert_scale
        self.FEATURE_SIZE = feature_size
        self.feature_list = features
        self.reset()
        pass

    def reset(self):
        """
        reset
        :return:
        """
        self.gt_buf = np.array([])
        self.pred_buf = np.array([])
        self.gt_grpc_buf = np.array([])
        self.pred_grpc_buf = np.array([])
        self.iter_count = 0
        self.pred_interval = self.pred_length - 1 if self.pred_interval > self.pred_length else self.pred_interval
        self.scaler = joblib.load(self.SCALAR_PICKLE_PATH)
        self.y_cols = [
            item for item in range(self.pred_length * (self.FEATURE_SIZE))
            if (item + 1) % (self.FEATURE_SIZE) == 0
        ]  # 选取预测的列

    def __load_weight(self, path=''):
        """
        @private: Load LSTM trained weight
        :param path: weight file path
        :return:
        """
        self.model.load_weights(path)
        pass

    def __filter_attr(self, dataframe):
        """
        从数据集中过滤属性
        :param dataframe:
        :return:
        """

        df = pd.DataFrame(dataframe, columns=self.feature_list)
        return df

    def __load_model(self, path=''):
        """
        @private: Load LSTM model
        :param path: model file path
        :return:
        """
        self.model = load_model(path)
        pass

    def __prediction(self, X_gt):
        """
        @private: 执行模型预测
        :param X_gt: 输入真实值
        :return:  输出预测值
        """
        y_hat = self.model.predict(x=X_gt)

        y_hat = self.__make_seqbatch2seq(y_hat)
        return np.array(y_hat)
        # y_hat = np.array(y_hat)
        # if y_hat.size > 0:
        #     return np.array(y_hat[0])
        # else:
        #     return np.array(y_hat)

    def __series_to_supervised(self, data, n_in=60, n_out=0):
        """
        @private: Convert series to supervised learning
        :return:
        """
        """Convert series to supervised learning"""
        data = pd.DataFrame(data)
        cols = list()
        # input sequence (t-n, ... t-1)
        for i in range(n_in, 0, -1):
            cols.append(data.shift(i))

        # forecast sequence (t, t+1, ... t+n)
        for i in range(0, n_out):
            cols.append(data.shift(-i))

        agg = pd.concat(cols, axis=1)

        if True:
            agg.dropna(inplace=True)
        return agg

    def __make_seqbatch2seq(self, in_seqbatch):
        """
        @private: 将多组长时间预测转化为一个 [1,2,3],[2,3,4] => [1,2]
        :param in_seqbatch:
        :return:
        """
        # seq_batch = in_seqbatch.copy()
        seq = []
        for item in in_seqbatch:
            seq.append(item[0])
        return np.array(seq)

    def __get_groundtruth(self, pred_step=1):
        """
        @private: 每拍读取pred_step行文件获取ground truth
        :param pred_step: 多长时间预测一次
        :return: 返回真实的一拍数据,攒60拍后开始预测
        """
        try:
            dataset = self.fileModel.get_data_frame(
            )  #self.iter_csv.get_chunk(pred_step)
            dataset = self.__filter_attr(dataset)
        except StopIteration:
            return None

        values = dataset.values
        # scaled_vals = self.scaler.fit_transform(values)

        return np.array(values)

    def on_sim_trigger(self):
        """
        每拍触发
        :return:
        """

        y_hat = np.array([])
        gt = self.__get_groundtruth(
            pred_step=self.pred_step)[0]  # 获取groundtruth
        self.gt_buf = np.vstack(
            (self.gt_buf, gt)) if self.gt_buf.size > 0 else gt  # .append()
        y_gt_step = gt[self.FEATURE_SIZE - 1]  # type = double

        if (len(self.gt_buf) >= self.sequence_length *
                self.sequence_init_weight) and (self.iter_count %
                                                self.pred_interval == 0):
            reframed = self.__series_to_supervised(self.gt_buf,
                                                   self.sequence_length)
            values = reframed.values

            X_gt, y_gt = values, values[:, self.y_cols]  # x_gt gt:ground truth
            X_gt = self.scaler.transform(X_gt)
            X_gt = X_gt.reshape((X_gt.shape[0], 1, X_gt.shape[1]))

            # do prediction ::@ test proc time 0.01s
            y_hat = self.__prediction(X_gt)  # 未来5s的数据
            y_hat[y_hat < 0.0] = 0.01
            y_hat = y_hat * self.invert_scale

            self.pred_buf = y_hat

            # 事实buf向前移动一位
            self.gt_buf = self.gt_buf[1:]

        self.iter_count = self.iter_count + 1  # 引用计数加1
        self.get_grpc_buf(y_gt_step, y_hat)

        return self.gt_grpc_buf, self.pred_grpc_buf, self.error_grpc_buf

    def get_grpc_buf(self, y_gt, y_hat):
        """
        获取传回给grpc的buffer数据用于曲线显示
        :param y_gt: 真实值
        :param y_hat: 预测
        :return:
        """
        # y_hat = y_hat.tolist()
        # :block:   gt part
        if self.gt_grpc_buf.size < self.gt_grpc_bufsize:
            self.gt_grpc_buf = np.hstack([self.gt_grpc_buf, y_gt])
        else:
            self.gt_grpc_buf = np.delete(self.gt_grpc_buf, 0)
            self.gt_grpc_buf = np.hstack([self.gt_grpc_buf, y_gt])

        # :block:   pred part
        # : y_hat无数据时补0
        if (self.pred_grpc_buf.size <= self.sequence_length *
                self.sequence_init_weight) and y_hat.size <= 0:
            self.pred_grpc_buf = np.hstack([self.pred_grpc_buf, 0.0])

        elif (self.pred_grpc_buf.size <= self.sequence_length *
              self.sequence_init_weight) and y_hat.size > 0:
            self.pred_grpc_buf = np.hstack([self.pred_grpc_buf, y_hat])
        elif (self.pred_grpc_buf.size < self.pred_grpc_bufsize):
            if y_hat.size > 0:
                self.pred_grpc_buf = np.hstack(
                    [self.pred_grpc_buf, y_hat[-self.pred_interval:]])
        else:
            if y_hat.size > 0:
                self.pred_grpc_buf = np.delete(
                    self.pred_grpc_buf, np.arange(0, self.pred_interval))
                self.pred_grpc_buf = np.hstack(
                    [self.pred_grpc_buf, y_hat[-self.pred_interval:]])

        # :block:   error part
        self.error_grpc_buf = self.gt_grpc_buf - self.pred_grpc_buf[:self.
                                                                    gt_grpc_buf
                                                                    .size]

        pass