Ejemplo n.º 1
0
    def generate_trace(self, min_length=5):
        # We start the sentence with the start token
        x = np.zeros((1, self.seq_length, self.vocab_size))
        mask = np.zeros((1, self.seq_length))
        new_sentence = []
        i = 0
        # Repeat until we get an end token
        selIndex = self.word_to_index[IN_SELECTION_TOKEN]
        notSelIndex = self.word_to_index[NOT_IN_SELECTION_TOKEN]
        while not ((len(new_sentence) > 0) and
                   ((new_sentence[-1] == selIndex) or
                    (new_sentence[-1] == notSelIndex))):
            probs = self.propabilities(x, mask)[0]
            #            samples = np.random.multinomial(1, probs)
            #            index = np.argmax(samples)
            index = np.random.choice(range(len(probs)), p=probs)
            new_sentence.append(index)
            x[0, i, index] = 1
            mask[0, i] = 1
            i += 1

            # Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
            # And: We don't want sentences with UNKNOWN_TOKEN's
            if len(
                    new_sentence
            ) >= self.seq_length or index == self.word_to_index[UNKNOWN_TOKEN]:
                writeLog("Generated exceedingly long example trace. Skipping.")
                return None
        if len(new_sentence) < min_length:
            return None
        res = [self.index_to_word[x] for x in new_sentence]
        writeLog("Generated example trace of length %d: %s" %
                 (len(res), str(res)))
        return res
Ejemplo n.º 2
0
def main(opts):
    if opts.openworld == False:
        datalist = ReadWebList()
        datalen = len(datalist)
    else:
        datalist = ReadOpenWebList(
            5000, 1)  # 5000 sites for open world dataset, each with 1 instance
        datalen = len(datalist)
        print("len datalist for openworld = ", len(datalist))
    for i in range(0, datalen, cm.MAX_SITES_PER_TOR_PROCESS):
        if i + cm.MAX_SITES_PER_TOR_PROCESS < datalen:
            writeLog("data start from %s to %s" %
                     (datalist[i][0],
                      datalist[i + cm.MAX_SITES_PER_TOR_PROCESS - 1][0]))
            print("data start from %s to %s\n" %
                  (datalist[i][0],
                   datalist[i + cm.MAX_SITES_PER_TOR_PROCESS - 1][0]))
            launch_tor_with_custom_stem(
                datalist[i:i + cm.MAX_SITES_PER_TOR_PROCESS], opts.browser)
        else:
            writeLog("data start from %s to %s" %
                     (datalist[i][0], datalist[-1][0]))
            print("data start from %s to %s\n" %
                  (datalist[i][0], datalist[-1][0]))
            launch_tor_with_custom_stem(datalist[i:], opts.browser)
    ParsePcapFile()
Ejemplo n.º 3
0
    def trainModel(self, callback):
        data_size = len(self.positions_train)
        writeLog("Training...")
        p = 0
        num_iterations = 0
        num_iterations_after_report = 0
        num_report_iterations = 1
        avg_cost = 0
        #        writeLog("It: " + str(data_size * self.num_epochs // self.batch_size))
        try:
            it = 0
            while (num_report_iterations <= self.num_callbacks):
                x, y, mask = self.gen_data(self.TS_train, p,
                                           self.positions_train,
                                           self.batch_size)
                it += 1
                p += self.batch_size
                num_iterations += self.batch_size
                num_iterations_after_report += self.batch_size
                #                if(p+self.batch_size+self.seq_length >= data_size):
                #                    writeLog('Carriage Return')
                #                    p = 0;
                avg_cost += self.train(x, y, mask)
                if (callback and num_iterations_after_report >=
                        self.num_iterations_between_reports):
                    callback(num_iterations, it, avg_cost / it,
                             num_report_iterations)
                    avg_cost = 0
                    num_iterations_after_report = num_iterations_after_report - self.num_iterations_between_reports
                    num_report_iterations = num_report_iterations + 1

#            callback(num_iterations, it, avg_cost / it, num_report_iterations)
        except KeyboardInterrupt:
            pass
Ejemplo n.º 4
0
 def __init__(self, case_name, dataset_name, algorithm, num_layers,
              optimizer, learning_rate, batch_size, num_callbacks,
              hidden_dim_size, num_iterations_between_reports,
              grad_clipping, predict_only_outcome, final_trace_only,
              trace_length_modifier, max_num_words, truncate_unknowns, rng):
     writeLog("Using data set: " + dataset_name)
     self.algorithm = algorithm
     self.num_layers = num_layers
     self.optimizer = optimizer
     self.learning_rate = learning_rate
     self.batch_size = batch_size
     self.num_callbacks = num_callbacks
     self.traces = trace_registry[dataset_name](trace_length_modifier)
     self.dataset_size = len(self.traces)
     self.dataset_name = dataset_name
     self.case_name = case_name
     self.hidden_dim_size = hidden_dim_size
     self.num_iterations_between_reports = num_iterations_between_reports
     self.grad_clipping = grad_clipping
     self.rng = rng
     self.predict_only_outcome = predict_only_outcome
     self.final_trace_only = final_trace_only
     self.trace_length_modifier = trace_length_modifier
     self.max_num_words = max_num_words
     self.truncate_unknowns = truncate_unknowns
     lasagne.random.set_rng(rng)
     try:
         self.createModel()
     except:
         writeLog("Exception: " + sys.exc_info()[0])
Ejemplo n.º 5
0
def TBBSetup(driverpath, controller, idx):
    driver = 0
    try:
        driver = TorBrowserDriver(driverpath, tor_cfg=cm.USE_STEM)
    except Exception as e:
        writeLog("[crawl.py error]TBBSetup error: " + str(e))
        print("[crawl.py error]TBBSetup error")
        print(str(e))
        driver = 0
    return driver
 def reportTemperature(self, morning = True):
     rtn = self.session.post(self.reader.get('temperature', 'getstateUrl'))
     rtn = json.loads(rtn.content)
     if len(rtn['module']) > 0 and rtn['module'][0]['sfdt'] == "1":
         #self.pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 添加体温失败", 
         #        "今日添加体温失败,添加时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
         #        "上报用户名: " + self.usr + "\n"
         #        + "错误原因:" + '已经生成今日体温')
         tokenstr = rtn['module'][0]["id"]
     else:
         token_url = self.reader.get('temperature', 'token-url')
         tokenstr = bytes.decode(self.session.post(token_url).content)
         addUrl = self.reader.get('temperature', 'addUrl')
         result = self.session.post(addUrl, data = {"info": json.dumps({'token': tokenstr}, ensure_ascii=False)})
         tokenstr = json.loads(result.content)['module']
         if not json.loads(result.content)['isSuccess']:
             pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 添加体温失败", 
                     "今日添加体温失败,添加时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
                     "上报用户名: " + self.usr + "\n" + "错误原因:" + json.loads(result.content)['msg'], self.pushUrl, self.reader.get('Push', 'token'), self.reader.get('Push', 'template'))  
     #StateById = self.reader.get('temperature', 'getstateByid')
     #rtn = json.loads(self.session.post(StateById, data = {"info": json.dumps({'id': tokenstr}, ensure_ascii=False)}).content)['module']
     updateUrl = addUrl = self.reader.get('temperature', 'update-url')
     data = {
         "id": tokenstr,
         "sdid1": "1",
         "tw1" : "37.2",
         "fr1" : "0",
         "bs" : "1"
     }
     rtn1 = json.loads(self.session.post(updateUrl, data={"info": json.dumps({"data": data}, ensure_ascii=False)}).content)['isSuccess']
     data = {
         "id": tokenstr,
         "sdid2": "3",
         "tw2" : "37.2",
         "fr2" : "0",
         "bs" : "2"
     }
     rtn2 = json.loads(self.session.post(updateUrl, data={"info": json.dumps({"data": data}, ensure_ascii=False)}).content)['isSuccess']
     if rtn1 and rtn2:
         writeLog('success', time.strftime("%m-%d", time.localtime()), "tempLog.txt")
         pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 体温上报成功", 
                 "今日体温上报成功,上报时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
                 "上报用户名: " + self.usr, self.pushUrl, self.reader.get('Push', 'token'), self.reader.get('Push', 'template'))
     else:
         writeLog('fail', time.strftime("%m-%d", time.localtime()), "tempLog.txt")
         pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 体温上报失败", 
                 "今日体温上报失败,上报时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
                 "上报用户名: " + self.usr, self.pushUrl, self.reader.get('Push', 'token'), self.reader.get('Push', 'template'))
Ejemplo n.º 7
0
def cleanupStream(controller, crawlcnt, domain):
    print("check & remove existing streams...")
    l = []
    for stream in controller.get_streams():
        l.append(stream.circ_id)
    d = getGuard(controller, l)
    for stream in controller.get_streams():
        try:
            # print("stream id: ",stream.id,stream.circ_id,stream.target_address)
            writeStreamInfo("%s,%s,%s,%s,%s,%s,%s,%s" %
                            (domain, crawlcnt, stream.id, stream.circ_id,
                             d[stream.circ_id], stream.target_address,
                             stream.target, str(stream.target_port)))
            controller.close_stream(stream.id)
        except Exception as e:
            writeLog("### error in closing stream: " + str(stream.id))
            pass
Ejemplo n.º 8
0
def TorSetup(tor_binary):
    tor_process, controller = 0, 0
    print("in tor setup binary = ", tor_binary)
    try:
        tor_process = launch_tbb_tor_with_stem(tbb_path=cm.driverpath,
                                               torrc=cm.TorConfig,
                                               tor_binary=tor_binary)
        controller = Controller.from_port(
            port=int(cm.TorConfig['ControlPort']))
        controller.authenticate()
        print("getting tor circuit...")
        print("write entry guard/ circuit to log...")
    except Exception as e:
        print("[crawl.py error]TorSetup: " + str(e) + "\n")
        writeLog("[crawl.py error]TorSetup: " + str(e) + "\n")
        tor_process, controller = 0, 0
    return tor_process, controller
Ejemplo n.º 9
0
def ParsePcapFile(filepath, NodeList, outputpath):
    fw = open(outputpath, 'w')
    fw.write(','.join(cm.packetinfo) + '\n')  # write header
    writeLog("parse filepath = %s" % (filepath))
    fp = open(filepath, 'rb')
    try:
        pcap = dpkt.pcap.Reader(fp)
        cnt = 0
        for ts, buf in pcap:
            eth = dpkt.ethernet.Ethernet(buf)
            ip = eth.ip
            ipsrc = inet_to_str(ip.src)
            ipdst = inet_to_str(ip.dst)
            if ipsrc in NodeList or ipdst in NodeList:
                output = retrieveInfo(cnt, ts, ip)
                fw.write(','.join([str(x) for x in output]) + "\n")
                # fw.write('%s,%s,%s,%s,%s\n'%(str(ts),str(ipaddr),str(dstaddr),str(flag),str(datalen)))
            cnt += 1
    except Exception as e:
        writeLog("[pcaputils.py]ParsePcapFile error: %s" % (str(e)))
    finally:
        fp.close()
Ejemplo n.º 10
0
 def report(self):
     rtn = self.session.post(self.addUrl)
     # print(rtn.url)
     try:
         status = rtn.json()
     except:
         status = {"msg": "网页可能发生了错误"}
         writeLog('fail', time.strftime("%m-%d", time.localtime()))
         pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 上报失败", 
                 "今日上报失败,上报时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
                 "上报用户名: " + self.usr + "\n"
                 + "错误原因:" + status['msg'], self.pushUrl, self.reader.get('Push', 'token'), self.reader.get('Push', 'template'))
         return False
     print(status)
     rtn = self.session.post(
         self.editUrl, data={'info': json.dumps({'id': status['module']})})
     data = rtn.json()['module']['data'][0]
     if 'brzgtw' in data:
         data['brzgtw'] = '36.3'
     rtn = self.session.post(
         self.saveUrl, data={'info': json.dumps({'model': data})})
     try:
         print(rtn.text)
         status = rtn.json()
         if(status['isSuccess']):
             writeLog('success', time.strftime("%m-%d", time.localtime()))
             pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 上报成功", 
                 "今日上报成功,上报时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
                 "上报用户名: " + self.usr, self.pushUrl, self.reader.get('Push', 'token'), self.reader.get('Push', 'template'))
             return True
         else:
             writeLog('fail', time.strftime("%m-%d", time.localtime()))
             pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 上报失败", 
                 "今日上报失败,上报时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
                 "上报用户名: " + self.usr + "\n"
                 + "错误原因:辅导员已经审查", self.pushUrl, self.reader.get('Push', 'token'), self.reader.get('Push', 'template'))
             return False
     except:
         writeLog('fail', time.strftime("%m-%d", time.localtime()))
         pushMsg(time.strftime("%m-%d %H:%M:%S", time.localtime()) + " 上报失败", 
                 "今日上报失败,上报时间为" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n" +
                 "上报用户名: " + self.usr + "\n"
                 + "错误原因:" + status['msg'], self.pushUrl, self.reader.get('Push', 'token'), self.reader.get('Push', 'template'))
         return False
Ejemplo n.º 11
0
def train(modelConfig,dataConfig,logConfig):
    """
    训练
    :param modelConfig: 模型配置
    :param dataConfig: 数据配置
    :param logConfig:  日志配置
    :return:
    """
    # 模型配置
    model = modelConfig['model']
    criterion = modelConfig['criterion']
    optimzer = modelConfig['optimzer']
    epochs =  modelConfig['epochs']
    device = modelConfig['device']

    #数据加载器
    trainLoader = dataConfig['trainLoader']
    validLoader = dataConfig['validLoader']
    trainLength =  dataConfig['trainLength']
    validLength = dataConfig['validLength']

    # 日志及模型保存
    modelPath = logConfig['modelPath']
    historyPath = logConfig['historyPath']
    logPath = logConfig['logPath']
    lastModelPath = logConfig['lastModelPath']


    trainLosses = []
    trainAcces = []
    validLosses = []
    validAcces = []
    now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    print('train is starting in ' + now)
    bestAcc = 0.

    for epoch in range(epochs):
        print("Epoch{}/{}".format(epoch, epochs))
        print("-" * 10)

        trainLoss, trainAcc = oneEpoch_train(model,trainLoader,optimzer,criterion,device)
        validLoss, validAcc = oneEpoch_valid(model,validLoader,criterion,device)

        trainLoss = trainLoss / len(trainLoader)
        trainAcc =  trainAcc / trainLength
        validLoss = validLoss / len(validLoader)
        validAcc = validAcc / validLength

        # trainLosses.append(trainLoss)
        # trainAcces.append(trainAcc)
        #
        # validLosses.append(validLoss)
        # validAcces.append(validAcc)
        # 模型验证有进步时,保存模型
        if validAcc > bestAcc:
            bestAcc = validAcc
            # saveModel(model,modelPath)

        # 训练日志
        now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        trainLog = now + " Train loss is :{:.4f},Train accuracy is:{:.4f}%\n".format(trainLoss, 100 * trainAcc)
        validLog = now + " Valid loss is :{:.4f},Valid accuracy is:{:.4f}%\n".format(validLoss, 100 * validAcc)
        bestlog = now + ' bestAcc is {:.4f}%'.format(100 * bestAcc)
        log = trainLog + validLog

        print(log+bestlog)

        # 训练历史 每个EPOCH都覆盖一次
        # history = {
        #     'trainLosses':trainLosses,
        #     'trainAcces':trainAcces,
        #     'validLosses':validLosses,
        #     'validAcces':validAcces
        # }

        writeLog(logPath,log)
Ejemplo n.º 12
0
 def report(num_examples_seen, it, avg_cost, num_report_iterations):
     t2 = time.time()
     tutrain = (t2 - self.previous_time)
     self.cumul_train_time = self.cumul_train_time + tutrain
     self.time_used.append(tutrain)
     self.generate_trace(5)
     self.sr_examplesSeen.append(num_examples_seen)
     self.cms = {}
     self.cms_str = ""
     writeLog("Testing 100% training samples")
     sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
     self.sr_trains.append(sr_train)
     writeLog("Testing 100% test samples")
     sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
     writeLog("Testing 75% test samples")
     sr_tests_75p = calculateSuccessRate(self.traces_test, 0.75, 2)
     writeLog("Testing 50% test samples")
     sr_tests_50p = calculateSuccessRate(self.traces_test, 0.5, 3)
     writeLog("Testing 25% test samples")
     sr_tests_25p = calculateSuccessRate(self.traces_test, 0.25, 4)
     self.sr_tests.append(sr_test)
     self.sr_tests_75p.append(sr_tests_75p)
     self.sr_tests_50p.append(sr_tests_50p)
     self.sr_tests_25p.append(sr_tests_25p)
     self.avg_costs.append(avg_cost)
     data_size = len(self.TS_train)
     epoch = it * self.batch_size / data_size
     t3 = time.time()
     tutest = (t3 - t2)
     self.cumul_test_time = self.cumul_test_time + tutest
     self.previous_time = t3
     self.time_used_for_test.append(tutest)
     self.all_cms.append(self.cms)
     writeLog(
         "Iteration: %i (%i) Total time used: ~%f seconds (train: %f, test: %f)"
         % (num_report_iterations, num_examples_seen,
            (time.time() - self.start_time) * 1., self.cumul_train_time,
            self.cumul_test_time))
     writeLog("Epoch {} average loss = {}".format(epoch, avg_cost))
     writeLog(
         "Success rates: test: %f test 75%%: %f test 50%%: %f test 25%%: %f train: %f"
         %
         (sr_test, sr_tests_75p, sr_tests_50p, sr_tests_25p, sr_train))
     writeResultRow([
         datetime.now().replace(microsecond=0).isoformat(), "ok", "",
         self.case_name, self.dataset_name, self.dataset_size,
         self.algorithm, self.num_layers, self.hidden_dim_size,
         self.optimizer, self.learning_rate, self.seq_length,
         self.batch_size, self.grad_clipping,
         self.num_iterations_between_reports, num_report_iterations,
         num_examples_seen, epoch, tutrain, self.cumul_train_time,
         tutest, self.cumul_test_time, sr_train, sr_test, sr_tests_75p,
         sr_tests_50p, sr_tests_25p, avg_cost, self.auc, self.cms[1][0],
         self.cms[1][1], self.cms[1][2], self.cms[1][3],
         str(self.cms_str), self.predict_only_outcome,
         self.final_trace_only, self.trace_length_modifier,
         self.num_iterations_between_reports *
         self.num_callbacks == 100000 * 50, self.max_num_words,
         self.truncate_unknowns
     ])
Ejemplo n.º 13
0
def launch_tor_with_custom_stem(datalist, browser):
    print("length of data: ", len(datalist))
    tor_binary = join(cm.TorProxypath, cm.DEFAULT_TOR_BINARY_PATH)
    tor_process, controller = 0, 0
    try:
        TRYTOR_CNT = cm.TRYCNT
        while TRYTOR_CNT > 0 and tor_process == 0 and controller == 0:
            print("try to setup tor:", str(TRYTOR_CNT))
            tor_process, controller = TorSetup(tor_binary)
            TRYTOR_CNT -= 1
        if tor_process == 0:
            raise TorSetupError
        print("finish tor proxy setup...")
        xvfb_display = start_xvfb()  # virtual display
        for ele in datalist:
            t = getTime()
            savepath, out_img = SetOutputPath(ele, t)
            p = 0
            try:
                driver, TRYCNT = 0, cm.TRYCNT
                while driver == 0 and TRYCNT != 0:
                    print("try to setup tbb:", str(TRYCNT))
                    args = (cm.driverpath, controller,
                            ele[2]) if browser == 'TBB' else ()
                    options = {
                        'TBB': TBBSetup,
                        'FF': FFSetup,
                        'CR': ChromeSetup
                    }
                    driver = options[browser](*args)
                    TRYCNT -= 1
                if driver == 0:
                    raise TBBSetupError

                cmd = "tcpdump -i %s tcp and not port ssh -w %s" % (
                    cm.netInterface, savepath)
                print('cmd = ', cmd)
                cmd = cmd.split(' ')
                p = subprocess.Popen(cmd)
                try:
                    timeout(cm.VISITPAGE_TIMEOUT)
                    driver.get('https://' + ele[0])
                    cancel_timeout()
                    time.sleep(cm.DURATION_VISIT_PAGE)
                    p.terminate()
                    if (ele[2] == 0 or ele[2] == 2):
                        driver.get_screenshot_as_file(out_img)
                    writeLog(str(t) + "," + ele[0] + "," + str(ele[2]))
                    print("Finish tcpdump sleep...")
                except TimeExceededError:
                    writeLog("Error crawling," + ele[0] + "," + str(ele[2]) +
                             "\n" + str("Page visit Timeout"))
                finally:
                    cancel_timeout()
            except TBBSetupError:
                print("[crawl.py error]: unable to setup TBB")
                writeLog("[crawl.py error]: unable to setup TBB")
            except Exception as e:
                with open(cm.ErrorFilePath, 'a+') as fw:
                    fw.write(ele[0] + "," + str(e) + "\n")
                writeLog("Error crawling," + ele[0] + "," + str(ele[2]) +
                         "\n" + str(e))
            finally:
                if p != 0 and p.returncode != 0:
                    try:
                        p.terminate()
                    except Exception as e:
                        writeLog("[crawl.py] tcpdump terminate error: " +
                                 str(e))
                if controller != 0:
                    cleanupStream(controller, str(ele[2]), ele[0])
                if driver != 0:
                    try:
                        timeout(30)
                        driver.quit()
                        cancel_timeout()
                    except Exception as e:
                        cancel_timeout()
                        writeLog("[crawl.py] driver quit error: " + str(e))
                if ele[2] != 3:
                    time.sleep(cm.PAUSE_BETWEEN_INSTANCES)
                else:
                    time.sleep(cm.PAUSE_BETWEEN_SITES)
                RemoveTmpFile()
                RemoveProcess()
    except TorSetupError:
        print("[crawl.py] unable to set up tor proxy")
        writeLog("[crawl.py] unable to set up tor proxy")
    except Exception as e:
        print("[crawl.py]launch_tor_with_custom_stem Error")
        print("Error:", str(e))
        writeLog("[crawl.py]launch_tor_with_custom_stem Error : " + str(e))
    finally:
        if tor_process != 0:
            tor_process.kill()
        stop_xvfb(xvfb_display)
Ejemplo n.º 14
0
    def run(self):
        """Run the main loop."""
        while (True):
            self._captureManager.enterFrame()

            frame = self._captureManager.frame

            if frame is not None:
                if (self.autoBackgroundImg == None):
                    self.autoBackgroundImg = numpy.float32(frame)
                rawFrame = frame.copy()
                # First work out the region of interest by
                #    subtracting the fixed background image
                #    to create a mask.
                #print frame
                #print self._background_depth_img
                absDiff = cv2.absdiff(frame, self._background_depth_img)
                benMask, maskArea = filters.getBenMask(absDiff, 8)

                cv2.accumulateWeighted(frame, self.autoBackgroundImg, 0.05)
                # Convert the background image into the same format
                # as the main frame.
                #bg = self.autoBackgroundImg
                bg = cv2.convertScaleAbs(self.autoBackgroundImg, alpha=1.0)
                # Subtract the background from the frame image
                cv2.absdiff(frame, bg, frame)
                # Scale the difference image to make it more sensitive
                # to changes.
                cv2.convertScaleAbs(frame, frame, alpha=100)
                # Apply the mask so we only see the test subject.
                frame = cv2.multiply(frame, benMask, dst=frame, dtype=-1)

                if (maskArea <= self.cfg.getConfigInt('area_threshold')):
                    bri = (0, 0, 0)
                else:
                    # Calculate the brightness of the test subject.
                    bri = filters.getMean(frame, benMask)

                # Add the brightness to the time series ready for analysis.
                self._ts.addSamp(bri[0])
                self._ts.addImg(rawFrame)

                # Write timeseries to a file every 'output_framecount' frames.
                if (self._outputFrameCount >=
                        self.cfg.getConfigInt('output_framecount')):
                    # Write timeseries to file
                    self._ts.writeToFile("%s/%s" % \
                        ( self.cfg.getConfigStr('output_directory'),
                          self.cfg.getConfigStr('ts_fname')
                      ))
                    self._outputFrameCount = 0
                else:
                    self._outputFrameCount = self._outputFrameCount + 1

                # Only do the analysis every 15 frames (0.5 sec), or whatever
                # is specified in configuration file analysis_framecount
                # parameter.
                if (self._frameCount <
                        self.cfg.getConfigInt('analysis_framecount')):
                    self._frameCount = self._frameCount + 1
                else:
                    # Look for peaks in the brightness (=movement).
                    self._nPeaks, self._ts_time, self._rate = self._ts.findPeaks(
                    )
                    #print "%d peaks in %3.2f sec = %3.1f bpm" % \
                    #    (nPeaks,ts_time,rate)

                    oldStatus = self._status
                    if (maskArea > self.cfg.getConfigInt('area_threshold')):
                        # Check for alarm levels
                        if (self._rate > self.cfg.getConfigInt("rate_warn")):
                            self._status = self.ALARM_STATUS_OK
                        elif (self._rate >
                              self.cfg.getConfigInt("rate_alarm")):
                            self._status = self.ALARM_STATUS_WARN
                        else:
                            self._status = self.ALARM_STATUS_FULL
                    else:
                        self._status = self.ALARM_STATUS_NOT_FOUND


                    if (oldStatus == self.ALARM_STATUS_OK and
                        self._status == self.ALARM_STATUS_WARN) or \
                        (oldStatus == self.ALARM_STATUS_WARN and
                         self._status == self.ALARM_STATUS_FULL):
                        # Write timeseries to file
                        self._ts.writeToFile("%s/%s" % \
                            ( self.cfg.getConfigStr('output_directory'),
                              self.cfg.getConfigStr('alarm_ts_fname')
                          ),bgImg=self._background_depth_img)

                    # Collect the analysis results together and send them
                    # to the web server.
                    resultsDict = {}
                    resultsDict['fps'] = "%3.0f" % self.fps
                    resultsDict['bri'] = "%4.0f" % self._ts.mean
                    resultsDict['area'] = "%6.0f" % maskArea
                    resultsDict['nPeaks'] = "%d" % self._nPeaks
                    resultsDict['ts_time'] = self._ts_time
                    resultsDict['rate'] = "%d" % self._rate
                    resultsDict['time_t'] = time.ctime()
                    resultsDict['status'] = self._status
                    self._ws.setAnalysisResults(resultsDict)

                    # Write the results to file as a json string
                    utils.writeJSON(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     self.cfg.getConfigStr("data_fname")))
                    utils.writeLog(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     "benFinder_alarms.log"))
                    # Plot the graph of brightness, and save the images
                    # to disk.
                    self._ts.plotRawData(
                        file=True,
                        fname="%s/%s" % \
                        (self._tmpdir,self.cfg.getConfigStr("chart_fname")))

                    cv2.imwrite(
                        "%s/%s" % (self._tmpdir,
                                   self.cfg.getConfigStr("raw_image_fname")),
                        rawFrame)
                    cv2.imwrite(
                        "%s/%s" %
                        (self._tmpdir,
                         self.cfg.getConfigStr("masked_image_fname")), frame)
                    self._frameCount = 0
            else:
                print "Null frame received - assuming end of file and exiting"
                break
            self._captureManager.exitFrame()
Ejemplo n.º 15
0
    def initializeTraces(self):
        word_to_index = []
        index_to_word = []

        TRAIN_SIZE = int(self.dataset_size * TRAIN_SAMPLE_PERCENTAGE)
        TEST_SIZE = int(self.dataset_size * (1 - TRAIN_SAMPLE_PERCENTAGE))
        indexes = self.rng.permutation(self.dataset_size)
        #        indexes = range(self.dataset_size)
        self.traces_train = self.traces[indexes[:TRAIN_SIZE]]
        self.traces_test = self.traces[indexes[TRAIN_SIZE:]]

        # Tokenize the sentences into words
        writeLog("Tokenizing %s sentences." % len(self.traces))
        #    tokenized_sentences = [nltk.word_tokenize(trace.sentence) for trace in traces]
        tokenized_sentences_train = [
            nltk.WhitespaceTokenizer().tokenize(trace.sentence)
            for trace in self.traces_train
        ]
        tokenized_sentences = [
            nltk.WhitespaceTokenizer().tokenize(trace.sentence)
            for trace in self.traces
        ]

        # Count the word frequencies
        word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences_train))
        writeLog("Found %d unique words tokens." % len(word_freq.items()))

        # Get the most common words and build index_to_word and word_to_index vectors
        vocab = sorted(word_freq.items(),
                       key=lambda x: (x[1], x[0]),
                       reverse=True)
        writeLog("Using vocabulary size %d." % len(vocab))
        writeLog(
            "The least frequent word in our vocabulary is '%s' and appeared %d times."
            % (vocab[-1][0], vocab[-1][1]))

        words = []
        for x in vocab:
            w = x[0]
            if (w != IN_SELECTION_TOKEN and w != NOT_IN_SELECTION_TOKEN):
                words.append(w)
        words = np.asarray(words)

        if ((self.max_num_words != None)
                and (self.max_num_words < len(words))):
            words = words[range(self.max_num_words)]
            writeLog(
                "Vocabulary was truncated to %d most frequent words in training set."
                % len(words))
        index_to_word = np.concatenate(
            [[UNKNOWN_TOKEN, IN_SELECTION_TOKEN, NOT_IN_SELECTION_TOKEN],
             words])
        word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
        self.seq_length = 0
        # Replace all words not in our vocabulary with the unknown token
        for i, sent in enumerate(tokenized_sentences):
            ts = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
            if (self.truncate_unknowns):
                origts = ts
                ts = []
                wasUnknown = False
                for w in origts:
                    isUnknown = w == UNKNOWN_TOKEN
                    if ((not isUnknown) or (not wasUnknown)):
                        ts.append(w)
                    wasUnknown = isUnknown
            tokenized_sentences[i] = ts
            l = len(tokenized_sentences[i])
            if (l > self.seq_length):
                self.seq_length = l
        writeLog("Maximum sequence length is %d tokens." % (self.seq_length))
        self.word_to_index = word_to_index
        self.index_to_word = index_to_word
        self.vocab_size = len(self.word_to_index)

        tokenized_sentences = np.asarray(tokenized_sentences)
        self.TS_train = tokenized_sentences[indexes[:TRAIN_SIZE]]
        self.positions_train = []
        if (self.final_trace_only):
            for i, ts in enumerate(self.TS_train):
                l = len(ts)
                if l > 1:
                    self.positions_train.append([i, l - 1])
        else:
            for i, ts in enumerate(self.TS_train):
                l = len(ts)
                if l > 1:
                    for pos in range(l - 1):
                        self.positions_train.append([i, pos])

        self.TS_test = tokenized_sentences[indexes[TRAIN_SIZE:]]
        self.positions_test = []
        for i, ts in enumerate(self.TS_test):
            l = len(ts)
            if l > 1:
                for pos in range(l - 1):
                    self.positions_test.append([i, pos])
Ejemplo n.º 16
0
Archivo: depict.py Proyecto: vonalan/DC
            loss = nn.partial_fit(xs)

            # log = '%s  batch: %10d  cost: %.8e nmi-1: %.8f nmi-2: %.8f nmi-3: %.8f' % (
            #     dt.datetime.now(), i, loss, 0, 0, 0)
            # print(log)

        if not epoch % 1:
            loss, dys = nn.predict(X)

            nmi1 = 0.5
            nmi2 = 0.5
            nmi3 = 0.5

            log = '%s  epoch: %10d  cost: %.8e nmi-1: %.8f nmi-2: %.8f nmi-3: %.8f' % (
                dt.datetime.now(), epoch, loss, nmi1, nmi2, nmi3)
            utils.writeLog('../log', name, log)
            print(log)

            # dhs = np.histogram(dys, bins=outdim)[0]

            # print(khs)
            # print(dhs)

        if not epoch % 100:
            nn.saveModel('../model', epoch)

        epoch += 1

        flag = False
        if flag: break
    print("optimization is finished! ")
Ejemplo n.º 17
0
    def createModel(self):
        self.initializeTraces()

        writeLog("Preparing " + str(self.num_layers) +
                 " layers for algorithm: " + self.algorithm)

        # First, we build the network, starting with an input layer
        # Recurrent layers expect input of shape
        # (batch size, SEQ_LENGTH, num_features)
        mask_var = T.matrix('mask')

        l_in = lasagne.layers.InputLayer(shape=(None, None, self.vocab_size))
        l_mask = lasagne.layers.InputLayer((None, None), mask_var)
        l_layers = [l_in]

        # We now build the LSTM layer which takes l_in as the input layer
        # We clip the gradients at GRAD_CLIP to prevent the problem of exploding gradients.
        if (self.algorithm == "gru"):
            layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.GRULayer(
                parentLayer,
                self.hidden_dim_size,
                grad_clipping=self.grad_clipping,
                mask_input=l_mask if isFirstLayer else None,
                only_return_final=isLastLayer)
        else:
            # All gates have initializers for the input-to-gate and hidden state-to-gate
            # weight matrices, the cell-to-gate weight vector, the bias vector, and the nonlinearity.
            # The convention is that gates use the standard sigmoid nonlinearity,
            # which is the default for the Gate class.
            #            gate_parameters = lasagne.layers.recurrent.Gate(
            #                W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
            #                b=lasagne.init.Constant(0.))
            #            cell_parameters = lasagne.layers.recurrent.Gate(
            #                W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
            #                # Setting W_cell to None denotes that no cell connection will be used.
            #                W_cell=None, b=lasagne.init.Constant(0.),
            #                # By convention, the cell nonlinearity is tanh in an LSTM.
            #                nonlinearity=lasagne.nonlinearities.tanh)

            layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.LSTMLayer(
                parentLayer,
                self.hidden_dim_size,
                grad_clipping=self.grad_clipping,
                mask_input=l_mask if isFirstLayer else None,
                nonlinearity=lasagne.nonlinearities.tanh,
                # Here, we supply the gate parameters for each gate
                #                    ingate=gate_parameters, forgetgate=gate_parameters,
                #                    cell=cell_parameters, outgate=gate_parameters,
                # We'll learn the initialization and use gradient clipping
                only_return_final=isLastLayer)

        for layerId in range(self.num_layers):
            l_layers.append(
                layerCreatorFunc(l_layers[layerId], layerId == 0,
                                 layerId == self.num_layers - 1))

        # The output of l_forward_2 of shape (batch_size, N_HIDDEN) is then passed through the softmax nonlinearity to
        # create probability distribution of the prediction
        # The output of this stage is (batch_size, vocab_size)
        l_out = lasagne.layers.DenseLayer(
            l_layers[len(l_layers) - 1],
            num_units=self.vocab_size,
            W=lasagne.init.Normal(),
            nonlinearity=lasagne.nonlinearities.softmax)
        l_layers.append(l_out)

        # Theano tensor for the targets
        target_values = T.ivector('target_output')
        #!        target_var = T.matrix('target_output')

        # lasagne.layers.get_output produces a variable for the output of the net
        network_output = lasagne.layers.get_output(l_out)

        # https://github.com/Lasagne/Lasagne/blob/master/examples/recurrent.py
        # The network output will have shape (n_batch, 1); let's flatten to get a
        # 1-dimensional vector of predicted values
        #        predicted_values = network_output.flatten()

        #        flat_target_values = target_values.flatten()

        # Our cost will be mean-squared error
        #        cost = T.mean((predicted_values - flat_target_values)**2)
        #        cost = T.mean((network_output - target_values)**2)
        # The loss function is calculated as the mean of the (categorical) cross-entropy between the prediction and target.
        #!        cost = T.nnet.categorical_crossentropy(network_output,target_var).mean()
        cost = T.nnet.categorical_crossentropy(network_output,
                                               target_values).mean()

        # Retrieve all parameters from the network
        all_params = lasagne.layers.get_all_params(l_out, trainable=True)

        # Compute AdaGrad updates for training
        writeLog("Computing updates...")
        writeLog("Using optimizer: " + self.optimizer)
        if (self.optimizer == "sgd"):
            updates = lasagne.updates.sgd(cost, all_params, self.learning_rate)
        elif (self.optimizer == "adagrad"):
            updates = lasagne.updates.adagrad(cost, all_params,
                                              self.learning_rate)
        elif (self.optimizer == "adadelta"):
            updates = lasagne.updates.adagrad(cost, all_params,
                                              self.learning_rate, 0.95)
        elif (self.optimizer == "momentum"):
            updates = lasagne.updates.momentum(cost, all_params,
                                               self.learning_rate, 0.9)
        elif (self.optimizer == "nesterov_momentum"):
            updates = lasagne.updates.nesterov_momentum(
                cost, all_params, self.learning_rate, 0.9)
        elif (self.optimizer == "rmsprop"):
            updates = lasagne.updates.rmsprop(cost, all_params,
                                              self.learning_rate, 0.9)
        else:
            updates = lasagne.updates.adam(cost,
                                           all_params,
                                           self.learning_rate,
                                           beta1=0.9,
                                           beta2=0.999)

        # Theano functions for training and computing cost
        writeLog("Compiling train function...")
        self.train = theano.function(
            [l_in.input_var, target_values, l_mask.input_var],
            cost,
            updates=updates,
            allow_input_downcast=True)
        #!        self.train = theano.function([l_in.input_var, target_var, l_mask.input_var], cost, updates=updates, allow_input_downcast=True)
        writeLog("Compiling train cost computing function...")
        self.compute_cost = theano.function(
            [l_in.input_var, target_values, l_mask.input_var],
            cost,
            allow_input_downcast=True)
        #!        self.compute_cost = theano.function([l_in.input_var, target_var, l_mask.input_var], cost, allow_input_downcast=True)

        # In order to generate text from the network, we need the probability distribution of the next character given
        # the state of the network and the input (a seed).
        # In order to produce the probability distribution of the prediction, we compile a function called probs.
        writeLog("Compiling propabilities computing function...")
        self.propabilities = theano.function(
            [l_in.input_var, l_mask.input_var],
            network_output,
            allow_input_downcast=True)

        self.start_time = time.time()
        self.previous_time = self.start_time
        self.cumul_train_time = 0
        self.cumul_test_time = 0
        self.auc = 0
        self.sr_trains = []
        self.sr_tests = []
        self.sr_tests_75p = []
        self.sr_tests_50p = []
        self.sr_tests_25p = []
        self.sr_examplesSeen = []
        self.time_used = []
        self.avg_costs = []
        self.time_used_for_test = []
        self.all_cms = []

        def predict_outcome(tracesToCalculateFor, selIndex, notSelIndex,
                            tracePercentage):
            batches, masks = self.gen_prediction_data(tracesToCalculateFor,
                                                      tracePercentage)
            correct = 0
            predictions = []
            probs_out = []
            for i in range(len(batches)):
                x = batches[i]
                mask = masks[i]
                probs = self.propabilities(x, mask)
                for prob in enumerate(probs):
                    selProb = prob[1][selIndex]
                    notSelProb = prob[1][notSelIndex]
                    probs_out.append(selProb / (selProb + notSelProb))
                    predictions.append(selProb >= notSelProb)
            return predictions, probs_out

        def calculateSuccessRate(tracesToCalculateFor, tracePercentage,
                                 testId):
            selIndex = self.word_to_index[IN_SELECTION_TOKEN]
            notSelIndex = self.word_to_index[NOT_IN_SELECTION_TOKEN]
            predictions, probs = predict_outcome(tracesToCalculateFor,
                                                 selIndex, notSelIndex,
                                                 tracePercentage)
            numSuccess = 0
            cm = [0, 0, 0, 0]
            exps = []
            for i in range(len(tracesToCalculateFor)):
                expected = tracesToCalculateFor[i].isSelected
                actual = predictions[i]
                exps.append(1 if expected else 0)
                numSuccess += 1 if expected == actual else 0
                cm[0] += 1 if expected and actual else 0
                cm[1] += 1 if not expected and not actual else 0
                cm[2] += 1 if not expected and actual else 0
                cm[3] += 1 if expected and not actual else 0
            self.cms[testId] = cm
            self.cms_str += ":%i_%i_%i_%i" % (cm[0], cm[1], cm[2], cm[3])
            if (testId == 1):
                self.auc = metrics.roc_auc_score(exps, probs)
            return numSuccess / len(tracesToCalculateFor)

        def report(num_examples_seen, it, avg_cost, num_report_iterations):
            t2 = time.time()
            tutrain = (t2 - self.previous_time)
            self.cumul_train_time = self.cumul_train_time + tutrain
            self.time_used.append(tutrain)
            self.generate_trace(5)
            self.sr_examplesSeen.append(num_examples_seen)
            self.cms = {}
            self.cms_str = ""
            writeLog("Testing 100% training samples")
            sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
            self.sr_trains.append(sr_train)
            writeLog("Testing 100% test samples")
            sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
            writeLog("Testing 75% test samples")
            sr_tests_75p = calculateSuccessRate(self.traces_test, 0.75, 2)
            writeLog("Testing 50% test samples")
            sr_tests_50p = calculateSuccessRate(self.traces_test, 0.5, 3)
            writeLog("Testing 25% test samples")
            sr_tests_25p = calculateSuccessRate(self.traces_test, 0.25, 4)
            self.sr_tests.append(sr_test)
            self.sr_tests_75p.append(sr_tests_75p)
            self.sr_tests_50p.append(sr_tests_50p)
            self.sr_tests_25p.append(sr_tests_25p)
            self.avg_costs.append(avg_cost)
            data_size = len(self.TS_train)
            epoch = it * self.batch_size / data_size
            t3 = time.time()
            tutest = (t3 - t2)
            self.cumul_test_time = self.cumul_test_time + tutest
            self.previous_time = t3
            self.time_used_for_test.append(tutest)
            self.all_cms.append(self.cms)
            writeLog(
                "Iteration: %i (%i) Total time used: ~%f seconds (train: %f, test: %f)"
                % (num_report_iterations, num_examples_seen,
                   (time.time() - self.start_time) * 1., self.cumul_train_time,
                   self.cumul_test_time))
            writeLog("Epoch {} average loss = {}".format(epoch, avg_cost))
            writeLog(
                "Success rates: test: %f test 75%%: %f test 50%%: %f test 25%%: %f train: %f"
                %
                (sr_test, sr_tests_75p, sr_tests_50p, sr_tests_25p, sr_train))
            writeResultRow([
                datetime.now().replace(microsecond=0).isoformat(), "ok", "",
                self.case_name, self.dataset_name, self.dataset_size,
                self.algorithm, self.num_layers, self.hidden_dim_size,
                self.optimizer, self.learning_rate, self.seq_length,
                self.batch_size, self.grad_clipping,
                self.num_iterations_between_reports, num_report_iterations,
                num_examples_seen, epoch, tutrain, self.cumul_train_time,
                tutest, self.cumul_test_time, sr_train, sr_test, sr_tests_75p,
                sr_tests_50p, sr_tests_25p, avg_cost, self.auc, self.cms[1][0],
                self.cms[1][1], self.cms[1][2], self.cms[1][3],
                str(self.cms_str), self.predict_only_outcome,
                self.final_trace_only, self.trace_length_modifier,
                self.num_iterations_between_reports *
                self.num_callbacks == 100000 * 50, self.max_num_words,
                self.truncate_unknowns
            ])
#            self.draw_chart()

#        writeLog("Calculating initial probabilities.")
#        self.sr_examplesSeen.append(0)

        self.cms = {}
        self.cms_str = ""
        #        sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
        #        self.sr_trains.append(sr_train)
        #        sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
        #        self.sr_tests.append(sr_test)
        #        self.time_used.append(time.time() - self.start_time)
        #        self.avg_costs.append(0)
        #        writeLog("Initial success rates: test: %f  train: %f" % (sr_test, sr_train))

        num_examples_seen = self.trainModel(report)

        self.cms = {}
        self.cms_str = ""
        self.sr_examplesSeen.append(num_examples_seen)
        sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
        self.sr_trains.append(sr_train)
        sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
        self.sr_tests.append(sr_test)
        self.avg_costs.append(0)
        writeLog("Final success rates: test: %f  train: %f" %
                 (sr_test, sr_train))
        self.time_used.append(self.cumul_train_time)
Ejemplo n.º 18
0
def train(modelConfig, dataConfig, logConfig):
    """
    训练
    :param modelConfig: 模型配置
    :param dataConfig: 数据配置
    :param logConfig:  日志配置
    :return:
    """
    # 模型配置
    model = modelConfig['model']
    criterion = modelConfig['criterion']
    optimzer = modelConfig['optimzer']
    epochs = modelConfig['epochs']
    device = modelConfig['device']

    # 数据加载器
    trainLoader = dataConfig['trainLoader']
    validLoader = dataConfig['validLoader']
    trainLength = dataConfig['trainLength']
    validLength = dataConfig['validLength']

    # 日志及模型保存
    modelPath = logConfig['modelPath']
    historyPath = logConfig['historyPath']
    logPath = logConfig['logPath']
    lastModelPath = logConfig['lastModelPath']

    trainLosses = []
    trainAcces = []
    validLosses = []
    validAcces = []
    now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    print('train is starting in ' + now)

    best_L1_Acc = 0.
    best_L2_Acc = 0.
    best_L3_Acc = 0.
    best_concat_Acc = 0.
    best_com_Acc = 0.
    best_epoch = 0

    for epoch in range(epochs):
        print("Epoch{}/{}".format(epoch, epochs))
        print("-" * 10)

        loss_1, loss_2, loss_3, loss_concat, loss, acc_1, acc_2, acc_3, acc_com \
            = oneEpoch_train(model, trainLoader, optimzer, criterion, device)

        val_loss_1, val_acc_1, val_loss_2, val_acc_2, val_loss_3, val_acc_3, val_loss_concat, val_acc_concat, val_loss_com, val_acc_com\
            = oneEpoch_valid(model, validLoader, criterion, device)

        loss_1 = loss_1 / len(trainLoader)
        loss_2 = loss_2 / len(trainLoader)
        loss_3 = loss_3 / len(trainLoader)
        loss_concat = loss_concat / len(trainLoader)
        loss = loss / len(trainLoader)

        acc_1 = acc_1 / trainLength
        acc_2 = acc_2 / trainLength
        acc_3 = acc_3 / trainLength
        acc_concat = acc_com / trainLength

        val_loss_1 = val_loss_1 / len(validLoader)
        val_loss_2 = val_loss_2 / len(validLoader)
        val_loss_3 = val_loss_3 / len(validLoader)
        val_loss_concat = val_loss_concat / len(validLoader)
        val_loss_com = val_loss_com / len(validLoader)

        val_acc_1 = val_acc_1 / validLength
        val_acc_2 = val_acc_2 / validLength
        val_acc_3 = val_acc_3 / validLength
        val_acc_concat = val_acc_concat / validLength
        val_acc_com = val_acc_com / validLength

        # 模型验证有进步时,保存模型
        if val_acc_1 > best_L1_Acc:
            best_L1_Acc = val_acc_1

        if val_acc_2 > best_L2_Acc:
            best_L2_Acc = val_acc_2

        if val_acc_3 > best_L3_Acc:
            best_L3_Acc = val_acc_3

        if val_acc_concat > best_concat_Acc:
            best_concat_Acc = val_acc_concat

        if val_acc_com > best_com_Acc:
            best_epoch = epoch
            best_com_Acc = val_acc_com
            # saveModel(model,modelPath)

        # 训练日志
        now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        train_L1_Log = now + " Train L1 loss is :{:.4f},Train accuracy is:{:.4f}%\n".format(
            loss_1, 100 * acc_1)
        train_L2_Log = now + " Train L2 loss is :{:.4f},Train accuracy is:{:.4f}%\n".format(
            loss_2, 100 * acc_2)
        train_L3_Log = now + " Train L3 loss is :{:.4f},Train accuracy is:{:.4f}%\n".format(
            loss_3, 100 * acc_3)
        train_concat_Log = now + " Train concat loss is :{:.4f},Train accuracy is:{:.4f}%\n".format(
            loss_concat, 100 * acc_concat)
        train_total_Log = now + " Train total loss is :{:.4f}\n\n".format(loss)

        val_L1_log = now + " Valid L1 loss is :{:.4f},Valid accuracy is:{:.4f}%\n".format(
            val_loss_1, 100 * val_acc_1)
        val_L2_log = now + " Valid L2 loss is :{:.4f},Valid accuracy is:{:.4f}%\n".format(
            val_loss_2, 100 * val_acc_2)
        val_L3_log = now + " Valid L3 loss is :{:.4f},Valid accuracy is:{:.4f}%\n".format(
            val_loss_3, 100 * val_acc_3)
        val_concat_log = now + " Valid concat loss is :{:.4f},Valid accuracy is:{:.4f}%\n".format(
            val_loss_concat, 100 * val_acc_concat)
        val_com_log = now + " Valid com loss is :{:.4f},Valid accuracy is:{:.4f}%\n\n".format(
            val_loss_com, 100 * val_acc_com)

        best_L1_log = now + ' best L1 Acc is {:.4f}%\n'.format(
            100 * best_L1_Acc)
        best_L2_log = now + ' best L2 Acc is {:.4f}%\n'.format(
            100 * best_L2_Acc)
        best_L3_log = now + ' best L3 Acc is {:.4f}%\n'.format(
            100 * best_L3_Acc)
        best_concat_log = now + ' best concat Acc is {:.4f}%\n'.format(
            100 * best_concat_Acc)
        best_com_log = now + ' best com Acc is {:.4f}%\n'.format(
            100 * best_com_Acc)
        best_epoch_log = now + ' best Acc epoch is :' + str(
            best_epoch) + "\n\n"

        train_log = train_L1_Log + train_L2_Log + train_L3_Log + train_concat_Log + train_total_Log
        val_log = val_L1_log + val_L2_log + val_L3_log + val_concat_log + val_com_log
        best_log = best_L1_log + best_L2_log + best_L3_log + best_concat_log + best_com_log + best_epoch_log

        print(train_log + val_log + best_log)

        # 训练历史 每个EPOCH都覆盖一次
        # history = {
        #     'trainLosses':trainLosses,
        #     'trainAcces':trainAcces,
        #     'validLosses':validLosses,
        #     'validAcces':validAcces
        # }

        writeLog(logPath, train_log + val_log + best_log)
    def run(self):
        """Run the main loop."""
        while(True):
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self.autoBackgroundImg == None):
                    self.autoBackgroundImg = numpy.float32(frame)
                rawFrame = frame.copy()
                # First work out the region of interest by 
                #    subtracting the fixed background image 
                #    to create a mask.
                #print frame
                #print self._background_depth_img
                absDiff = cv2.absdiff(frame,self._background_depth_img)
                benMask,maskArea = filters.getBenMask(absDiff,8)

                cv2.accumulateWeighted(frame,
                                       self.autoBackgroundImg,0.05)
                # Convert the background image into the same format
                # as the main frame.
                #bg = self.autoBackgroundImg
                bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                         alpha=1.0)
                # Subtract the background from the frame image
                cv2.absdiff(frame,bg,frame)
                # Scale the difference image to make it more sensitive
                # to changes.
                cv2.convertScaleAbs(frame,frame,alpha=100)
                # Apply the mask so we only see the test subject.
                frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1)

                if (maskArea <= self.cfg.getConfigInt('area_threshold')):
                    bri=(0,0,0)
                else:
                    # Calculate the brightness of the test subject.
                    bri = filters.getMean(frame,benMask)

                # Add the brightness to the time series ready for analysis.
                self._ts.addSamp(bri[0])
                self._ts.addImg(rawFrame)

                # Write timeseries to a file every 'output_framecount' frames.
                if (self._outputFrameCount >= self.cfg.getConfigInt('output_framecount')):
                    # Write timeseries to file
                    self._ts.writeToFile("%s/%s" % \
                        ( self.cfg.getConfigStr('output_directory'),
                          self.cfg.getConfigStr('ts_fname')
                      ))
                    self._outputFrameCount = 0
                else:
                    self._outputFrameCount = self._outputFrameCount + 1
                    

                # Only do the analysis every 15 frames (0.5 sec), or whatever
                # is specified in configuration file analysis_framecount
                # parameter.
                if (self._frameCount < self.cfg.getConfigInt('analysis_framecount')):
                    self._frameCount = self._frameCount +1
                else:
                    # Look for peaks in the brightness (=movement).
                    self._nPeaks,self._ts_time,self._rate = self._ts.findPeaks()
                    #print "%d peaks in %3.2f sec = %3.1f bpm" % \
                    #    (nPeaks,ts_time,rate)

                    oldStatus = self._status
                    if (maskArea > self.cfg.getConfigInt('area_threshold')):
                        # Check for alarm levels
                        if (self._rate > self.cfg.getConfigInt(
                                "rate_warn")):
                            self._status= self.ALARM_STATUS_OK
                        elif (self._rate > self.cfg.getConfigInt(
                                "rate_alarm")):
                            self._status= self.ALARM_STATUS_WARN
                        else:
                            self._status= self.ALARM_STATUS_FULL
                    else:
                        self._status = self.ALARM_STATUS_NOT_FOUND


                    if (oldStatus == self.ALARM_STATUS_OK and
                        self._status == self.ALARM_STATUS_WARN) or \
                        (oldStatus == self.ALARM_STATUS_WARN and 
                         self._status == self.ALARM_STATUS_FULL):
                                # Write timeseries to file
                                self._ts.writeToFile("%s/%s" % \
                                    ( self.cfg.getConfigStr('output_directory'),
                                      self.cfg.getConfigStr('alarm_ts_fname')
                                  ),bgImg=self._background_depth_img)
                        

                    # Collect the analysis results together and send them
                    # to the web server.
                    resultsDict = {}
                    resultsDict['fps'] = "%3.0f" % self.fps
                    resultsDict['bri'] = "%4.0f" % self._ts.mean
                    resultsDict['area'] = "%6.0f" % maskArea
                    resultsDict['nPeaks'] = "%d" % self._nPeaks
                    resultsDict['ts_time'] = self._ts_time
                    resultsDict['rate'] = "%d" % self._rate
                    resultsDict['time_t'] = time.ctime()
                    resultsDict['status'] = self._status
                    self._ws.setAnalysisResults(resultsDict)

                    # Write the results to file as a json string
                    utils.writeJSON(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     self.cfg.getConfigStr("data_fname")))
                    utils.writeLog(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     "benFinder_alarms.log"))
                    # Plot the graph of brightness, and save the images
                    # to disk.
                    self._ts.plotRawData(
                        file=True,
                        fname="%s/%s" % \
                        (self._tmpdir,self.cfg.getConfigStr("chart_fname")))
                        
                    cv2.imwrite("%s/%s" % (self._tmpdir,
                                           self.cfg.getConfigStr(
                                               "raw_image_fname")),
                                rawFrame)
                    cv2.imwrite("%s/%s" % (self._tmpdir,self.cfg.getConfigStr(
                        "masked_image_fname")),
                        frame)
                    self._frameCount = 0
            else:
                print "Null frame received - assuming end of file and exiting"
                break
            self._captureManager.exitFrame()