class XioFigurePlot(QtGui.QWidget):
    '''这个类为绘制类
    '''

    def __init__(self):
        super(XioFigurePlot, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.thread_figure = Timer('updatePlay()', sleep_time=2)
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()

    def draw(self):
        def draw_fp():  # 绘制损失饼图
            fp = Figure_Pie()
            da=data_access.EquipmentData()
            result=da.select()
            fp.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))  # '*'有一个解包的功能,将(1,1,1,1)解包为 1 1 1 1
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee():  # 绘制oee日推图
            L_eff=[]
            oee = Figure_OEE()
            da=data_access.OEEData()
            result=da.select()
            for i in range(1,len(result[-1])):
                if result[-1][i]!=None:
                     L_eff.append(result[-1][i])
            oee.plot(*tuple(L_eff))  # 参数
            graphicscene_oee = QtGui.QGraphicsScene()
            graphicscene_oee.addWidget(oee.canvas)
            self.ui.graphicsView_OEE.setScene(graphicscene_oee)
            self.ui.graphicsView_OEE.show()

        def draw_loss():  # 绘制损失直方图
            loss = Figure_Loss()
            da=data_access.EquipmentTimeData()
            result = da.select()
            loss.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))
            graphicscene_loss = QtGui.QGraphicsScene()
            graphicscene_loss.addWidget(loss.canvas)
            self.ui.graphicsView_Loss.setScene(graphicscene_loss)
            self.ui.graphicsView_Loss.show()

        def draw_mt():  # 绘制耗材使用图
            mt = Figure_MT()
            mt.plot()
            graphicscene_mt = QtGui.QGraphicsScene()
            graphicscene_mt.addWidget(mt.canvas)
            self.ui.graphicsView_MT.setScene(graphicscene_mt)
            self.ui.graphicsView_MT.show()

        draw_fp()
        draw_loss()
        draw_mt()
        draw_oee()
Ejemplo n.º 2
0
class XioFigurePlot(QtGui.QWidget):
    '''这个类为绘制类
    '''
    def __init__(self):
        super(XioFigurePlot, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.thread_figure = Timer('updatePlay()', sleep_time=10)
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()

    def draw(self):
        def draw_fp(): # 绘制损失饼图
            fp = Figure_Pie()
            fp.plot(*(1, 1, 1, 1)) # '*'有一个解包的功能,将(1,1,1,1)解包为 1 1 1 1
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee(): # 绘制oee日推图
            pass

        def draw_loss(): # 绘制损失直方图
            pass

        def draw_mt(): # 绘制耗材使用图
            pass

        draw_fp()
Ejemplo n.º 3
0
def batch_size_linear_search():
    min = 8
    max = 600
    step_size = 8

    optimizer = lambda x: torch.optim.SGD(x, lr=0.1)
    experiment_name = "batch_size_linear_search"
    t = Timer()

    batch_size_times = {}
    for i, batch_size in enumerate(range(min, max, step_size)):
        t.start()
        main(experiment_name, optimizer, epochs=i + 2, batch_size=batch_size)
        elapsed_time = t.stop()
        batch_size_times[batch_size] = elapsed_time

    pickle.dump(batch_size_times, open("batch_size_times.pickle", "wb"))

    # Plot
    batch_sizes = []
    times = []
    for k in sorted(batch_size_times):
        batch_sizes.append(k)
        times.append(batch_size_times[k])

    plt.plot(np.array(batch_sizes), np.array(times))
    plt.xlabel("Batch Size")
    plt.ylabel("Epoch Time")
    plt.title("Batch Size vs Epoch Time")
    plt.show()
    def __init__(self):
        super(XioFigurePlot, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.thread_figure = Timer('updatePlay()', sleep_time=2)
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()
Ejemplo n.º 5
0
    def validate(self, loader, model, criterion, epoch, args):
        timer = Timer()
        losses = AverageMeter()
        top1 = AverageMeter()
        wtop1 = AverageMeter()
        alloutputs = []
        metrics = {}

        # switch to evaluate mode
        model.eval()

        def part(x):
            return itertools.islice(x, int(len(x) * args.val_size))

        for i, x in enumerate(part(loader)):
            inputs, target, meta = parse(x)
            output, loss, weights = forward(inputs,
                                            target,
                                            model,
                                            criterion,
                                            meta['id'],
                                            train=False)
            prec1 = triplet_accuracy(output, target)
            wprec1 = triplet_accuracy(output, target, weights)
            losses.update(loss.item(), inputs[0].size(0))
            top1.update(prec1, inputs[0].size(0))
            wtop1.update(wprec1, inputs[0].size(0))
            alloutputs.extend(
                zip([(x.item(), y.item()) for x, y in zip(*output)], target,
                    weights))
            timer.tic()

            if i % args.print_freq == 0:
                print('[{name}] Test [{epoch}]: [{0}/{1} ({2})]\t'
                      'Time {timer.val:.3f} ({timer.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'WAcc@1 {wtop1.val:.3f} ({wtop1.avg:.3f})\t'.format(
                          i,
                          int(len(loader) * args.val_size),
                          len(loader),
                          name=args.name,
                          timer=timer,
                          loss=losses,
                          top1=top1,
                          epoch=epoch,
                          wtop1=wtop1))

        metrics.update(triplet_allk(*zip(*alloutputs)))
        metrics.update({'top1val': top1.avg, 'wtop1val': wtop1.avg})
        print(
            ' * Acc@1 {top1val:.3f} \t WAcc@1 {wtop1val:.3f}'
            '\n   topk1: {topk1:.3f} \t topk2: {topk2:.3f} \t '
            'topk5: {topk5:.3f} \t topk10: {topk10:.3f} \t topk50: {topk50:.3f}'
            .format(**metrics))

        return metrics
Ejemplo n.º 6
0
def minhash_lsh_dedupe_cassandra(batch_minhashes_pickle_path, lsh_pickle_path,
                                 tqdm_func, global_tqdm):
    # [(file_id, [doc0_minhash, doc1_minhash, ...]), ....]
    batch_minhashes = timed_pickle_load(batch_minhashes_pickle_path,
                                        "batch minhashes")

    # For some reason this will freeze when loading on the first run.
    lsh = timed_pickle_load(lsh_pickle_path, "lsh")

    checkpoint_file = batch_minhashes_pickle_path.replace(".pkl", "_ckpt.pkl")
    if os.path.exists(checkpoint_file):
        ckpt_file_id, ckpt_document_id = pickle.load(
            open(checkpoint_file, "rb"))
    else:
        ckpt_file_id = -1
        ckpt_document_id = -1

    logger.info("Detecting duplicates")
    timer = Timer().start()
    duplicate_file_path = batch_minhashes_pickle_path.replace(
        ".pkl", "_duplicates.txt")
    with open(duplicate_file_path, "a") as fh:
        for file_id, documents in batch_minhashes:
            if file_id <= ckpt_file_id:
                global_tqdm.update(len(documents))
                continue
            for document_id, minhash in enumerate(documents):
                if document_id <= ckpt_document_id:
                    global_tqdm.update(ckpt_document_id + 1)
                    ckpt_document_id = -1
                    continue
                results = lsh.query(minhash)
                duplicate_found = True if results else False
                is_self = False
                for json_results in results:
                    found_file_id, found_document_id = json.loads(json_results)
                    # This check is needed in case you re-run things
                    if file_id == found_file_id and document_id == found_document_id:
                        duplicate_found = False
                        is_self = True
                        break

                if duplicate_found:
                    fh.write(f"{file_id} {document_id}\n")
                else:
                    if not is_self:
                        lsh.insert(json.dumps((file_id, document_id)), minhash)

                global_tqdm.update()
                pickle.dump((file_id, document_id),
                            open(checkpoint_file, "wb"))

    logger.info(timer.stop_string())

    return True
Ejemplo n.º 7
0
    def train(self, loader, model, criterion, optimizer, epoch, args):
        adjust_learning_rate(args.lr, args.lr_decay_rate, optimizer, epoch)
        timer = Timer()
        data_time = AverageMeter()
        losses = AverageMeter()
        top1 = AverageMeter()
        wtop1 = AverageMeter()
        metrics = {}

        # switch to train mode
        model.train()
        optimizer.zero_grad()

        def part(x):
            return itertools.islice(x, int(len(x) * args.train_size))

        for i, x in enumerate(part(loader)):
            inputs, target, meta = parse(x)
            data_time.update(timer.thetime() - timer.end)
            output, loss, weights = forward(inputs, target, model, criterion,
                                            meta['id'])
            prec1 = triplet_accuracy(output, target)
            wprec1 = triplet_accuracy(output, target, weights)
            losses.update(loss.item(), inputs[0].size(0))
            top1.update(prec1, inputs[0].size(0))
            wtop1.update(wprec1, inputs[0].size(0))

            loss.backward()
            if i % args.accum_grad == args.accum_grad - 1:
                print('updating parameters')
                optimizer.step()
                optimizer.zero_grad()

            timer.tic()
            if i % args.print_freq == 0:
                print('[{name}] Epoch: [{0}][{1}/{2}({3})]\t'
                      'Time {timer.val:.3f} ({timer.avg:.3f})\t'
                      'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'WAcc@1 {wtop1.val:.3f} ({wtop1.avg:.3f})\t'.format(
                          epoch,
                          i,
                          int(len(loader) * args.train_size),
                          len(loader),
                          name=args.name,
                          timer=timer,
                          data_time=data_time,
                          loss=losses,
                          top1=top1,
                          wtop1=wtop1))

        metrics.update({'top1': top1.avg, 'wtop1': wtop1.avg})
        return metrics
Ejemplo n.º 8
0
    def validate_egovideo(self, loader, model, epoch, args):
        """ Run video-level validation on the Charades ego test set"""
        timer = Timer()
        outputs, gts, ids = [], [], []
        outputsw = []
        metrics = {}

        # switch to evaluate mode
        model.eval()
        for i, x in enumerate(loader):
            inp, target, meta = parse(x)
            target = target.long().cuda(async=True)
            assert target[0, :].eq(target[1, :]).all(), "val_video not synced"
            input_var = torch.autograd.Variable(inp.cuda(), volatile=True)
            output, w_x, w_z = model(input_var)
            output = torch.nn.Softmax(dim=1)(output)

            sw_x = torch.nn.Softmax(dim=0)(w_x) * w_x.shape[0]
            sw_x = (sw_x - sw_x.mean()) / sw_x.std()
            scale = torch.clamp(1 + (sw_x - 1) * 0.05, 0, 100)
            print('scale min: {}\t max: {}\t std: {}'.format(
                scale.min().data[0],
                scale.max().data[0],
                scale.std().data[0]))
            scale = torch.clamp(scale, 0, 100)
            scale *= scale.shape[0] / scale.sum()
            outputw = output * scale.unsqueeze(1)

            # store predictions
            output_video = output.mean(dim=0)
            outputs.append(output_video.data.cpu().numpy())
            outputsw.append(outputw.mean(dim=0).data.cpu().numpy())
            gts.append(target[0, :])
            ids.append(meta['id'][0])
            timer.tic()

            if i % args.print_freq == 0:
                print('Test2: [{0}/{1}]\t'
                      'Time {timer.val:.3f} ({timer.avg:.3f})'.format(
                          i, len(loader), timer=timer))
        # mAP, _, ap = meanap.map(np.vstack(outputs), np.vstack(gts))
        mAP, _, ap = meanap.charades_nanmap(np.vstack(outputs), np.vstack(gts))
        mAPw, _, _ = meanap.charades_nanmap(np.vstack(outputsw),
                                            np.vstack(gts))
        metrics['mAPego'] = mAP
        metrics['mAPegow'] = mAPw
        print(ap)
        print(' * mAPego {mAPego:.3f} \t mAPegow {mAPegow:.3f}'.format(
            **metrics))
        submission_file(ids, outputs,
                        '{}/egoepoch_{:03d}.txt'.format(args.cache, epoch + 1))
        return metrics
Ejemplo n.º 9
0
def train_model(device, model, train_set_loader, optimizer):
    timer = Timer().start()
    model.train()  # For special layers
    total = 0
    correct = 0
    total_loss = 0
    for images, targets in train_set_loader:
        total += images.shape[0]
        optimizer.zero_grad()
        images = images.to(device, non_blocking=True)
        targets = targets.to(device, non_blocking=True)
        output = model(images)
        loss = F.cross_entropy(output, targets, reduction='mean')
        total_loss += torch.sum(loss)
        loss.backward()
        optimizer.step()
        # logger.info(f"Batch Loss: {loss}")

        _, predicted = torch.max(output.data, 1)
        correct += predicted.eq(targets.data).cpu().sum()

    average_train_loss = total_loss / total
    accuracy = 100. * correct.item() / total
    logger.info(
        f"Training Took {timer.stop():0.2f}s. Images in epoch: {total} ")

    return average_train_loss, accuracy
Ejemplo n.º 10
0
def evaluate(config,
             model,
             dataset_loader,
             eval_metric,
             split='dev',
             dump=True):
    timer = Timer()
    metrics = MultiLabelMetric(config.num_class,
                               thresholds=config.metrics_thresholds)
    eval_metric.clear()
    progress_bar = tqdm(dataset_loader)

    for idx, batch in enumerate(progress_bar):
        batch_labels = batch['label']
        predict_results = model.predict(batch)
        batch_label_scores = predict_results['scores']

        batch_labels = batch_labels.cpu().detach().numpy()
        batch_label_scores = batch_label_scores.cpu().detach().numpy()
        metrics.add_batch(batch_labels, batch_label_scores)
        eval_metric.add_batch(batch_labels, batch_label_scores)

        if not config.display_iter or idx % config.display_iter == 0:
            last_metrics = metrics.get_metrics()
            progress_bar.set_postfix(**last_metrics)

    log.info(f'Time for evaluating {split} set = {timer.time():.2f} (s)')
    print(eval_metric)
    metrics = eval_metric.get_metrics()
    if dump:
        dump_log(config, metrics, split)

    return metrics
    def __init__(self):
        super(XioAll, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.frame_left = None
        self.frame_right = None
        self.is_work = True
        self.one_static_time = 0  # 一次故障静止的时间
        self.all_time = 0  # 一天的工作时间
        self.q = MyQueue()  # 存放帧队列,改为存放状态比较好
        self.vision = Vision()
        # 若日期发生改变,自行插入全零数据
        da = data_access.EquipmentTimeData()  # 对损失项统计表进行操作
        result_loss = da.select_("select * from loss ORDER BY SJ DESC limit 1")
        current_time = datetime.datetime.now().strftime('%Y-%m-%d')
        if str(result_loss[0][0]) != current_time:
            da.update('insert into loss(SJ,action1,action2,action3,action4,action5,action6)values'
                      '("%s",%d,%d,%d,%d,%d,%d)' % (current_time, 0, 0, 0, 0, 0, 0))
        else:
            pass

        da_oee = data_access.OEEData()  # 对oee实时利用率进行统计
        result_oee = da_oee.select_('select * from oee_date ORDER BY SJC DESC limit 1')
        if str(result_oee[0][0]) != current_time:
            da_oee.update_('insert into oee_date(SJC,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18)values'
                           '("' + current_time + '",0,0,0,0,0,0,0,0,0,0,0)')
        else:
            pass
        self.thread_figure = Timer('updatePlay()', sleep_time=120)  # 该线程用来每隔2分钟刷新绘图区
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()

        self.server = ThreadedTCPServer((self.HOST, self.PORT), ThreadedTCPRequestHandler)  # 该线程用来一直监听客户端的请求
        self.server_thread = threading.Thread(target=self.server.serve_forever)
        self.server_thread.start()

        self.thread_video_receive = threading.Thread(target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()

        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'), self.video_play)
        self.thread_time.start()

        self.thread_recog = Timer('updatePlay()', sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'), self.video_recog)
        self.thread_recog.start()

        self.thread_data = Timer('updatePlay()', sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'), self.data_read)
        self.thread_data.start()
Ejemplo n.º 12
0
    def validate_video(self, loader, model, epoch, args):
        """ Run video-level validation on the Charades test set"""
        timer = Timer()
        outputs, gts, ids = [], [], []
        metrics = {}

        # switch to evaluate mode
        model.eval()

        for i, x in enumerate(loader):
            inputs, target, meta = parse(x)
            target = target.long().cuda(async=True)
            assert target[0, :].eq(target[1, :]).all(), "val_video not synced"
            input_vars = [
                torch.autograd.Variable(inp.cuda(), volatile=True)
                for inp in inputs
            ]
            output = model(
                *input_vars)[-1]  # classification should be last output
            output = torch.nn.Softmax(dim=1)(output)

            # store predictions
            output_video = output.mean(dim=0)
            outputs.append(output_video.data.cpu().numpy())
            gts.append(target[0, :])
            ids.append(meta['id'][0])
            timer.tic()

            if i % args.print_freq == 0:
                print('Test2: [{0}/{1}]\t'
                      'Time {timer.val:.3f} ({timer.avg:.3f})'.format(
                          i, len(loader), timer=timer))
        # mAP, _, ap = meanap.map(np.vstack(outputs), np.vstack(gts))
        mAP, _, ap = meanap.charades_map(np.vstack(outputs), np.vstack(gts))
        metrics['mAP'] = mAP
        print(ap)
        print(' * mAP {:.3f}'.format(mAP))
        submission_file(ids, outputs,
                        '{}/epoch_{:03d}.txt'.format(args.cache, epoch + 1))
        return metrics
Ejemplo n.º 13
0
    def __init__(self):
        super(XioPlayVideo, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)
        self.left_cam = cv2.VideoCapture('./videos/left_cam.mp4')  # 左摄像头
        self.right_cam = cv2.VideoCapture('./videos/right_cam.mp4')
        self.frame_left = None
        self.frame_right = None

        self.tcpServer = QTcpServer()  # tcp 服务器端
        if not self.tcpServer.listen(QHostAddress.LocalHost, 8888):
            print(self.tcpServer.errorString())
            self.close()
        self.connect(self.tcpServer, QtCore.SIGNAL('newConnection()'), self.read_message)

        self.thread_video_receive = threading.Thread(target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()
        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'), self.video_play)
        self.thread_time.start()
        self.thread_recog = Timer('updatePlay()', sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'), self.video_recog)
        self.thread_recog.start()
        self.thread_data = Timer('updatePlay()', sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'), self.data_read)
        self.thread_data.start()
        self.thread_tcp = None  # 该线程用来完成tcp,未写完
Ejemplo n.º 14
0
    def __init__(self):
        super(XioPlayVideo, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)
        self.left_cam = cv2.VideoCapture('./videos/left_cam.mp4')  # 左摄像头
        self.right_cam = cv2.VideoCapture('./videos/right_cam.mp4')
        self.frame_left = None
        self.frame_right = None

        self.thread_video_receive = threading.Thread(
            target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()
        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'),
                     self.video_play)
        self.thread_time.start()
        self.thread_recog = Timer('updatePlay()',
                                  sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'),
                     self.video_recog)
        self.thread_recog.start()
        self.thread_data = Timer('updatePlay()',
                                 sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'),
                     self.data_read)
        self.thread_data.start()
        self.thread_tcp = None  # 该线程用来完成tcp,未写完
Ejemplo n.º 15
0
def fine_tune_train_and_val(args, recorder):
    # =
    global lowest_val_loss, best_prec1
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # close the warning
    torch.manual_seed(1)
    cudnn.benchmark = True
    timer = Timer()
    # == dataset config==
    num_class, data_length, image_tmpl = ft_data_config(args)
    train_transforms, test_transforms, eval_transforms = ft_augmentation_config(
        args)
    train_data_loader, val_data_loader, _, _, _, _ = ft_data_loader_init(
        args, data_length, image_tmpl, train_transforms, test_transforms,
        eval_transforms)
    # == model config==
    model = ft_model_config(args, num_class)
    recorder.record_message('a', '=' * 100)
    recorder.record_message('a', '-' * 40 + 'finetune' + '-' * 40)
    recorder.record_message('a', '=' * 100)
    # == optim config==
    train_criterion, val_criterion, optimizer = ft_optim_init(args, model)
    # == data augmentation(self-supervised) config==
    tc = TC(args)
    # == train and eval==
    print('*' * 70 + 'Step2: fine tune' + '*' * 50)
    for epoch in range(args.ft_start_epoch, args.ft_epochs):
        timer.tic()
        ft_adjust_learning_rate(optimizer, args.ft_lr, epoch, args.ft_lr_steps)
        train_prec1, train_loss = train(args, tc, train_data_loader, model,
                                        train_criterion, optimizer, epoch,
                                        recorder)
        # train_prec1, train_loss = random.random() * 100, random.random()
        recorder.record_ft_train(train_loss / 5.0, train_prec1 / 100.0)
        if (epoch + 1) % args.ft_eval_freq == 0:
            val_prec1, val_loss = validate(args, tc, val_data_loader, model,
                                           val_criterion, recorder)
            # val_prec1, val_loss = random.random() * 100, random.random()
            recorder.record_ft_val(val_loss / 5.0, val_prec1 / 100.0)
            is_best = val_prec1 > best_prec1
            best_prec1 = max(val_prec1, best_prec1)
            checkpoint = {
                'epoch': epoch + 1,
                'arch': "i3d",
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1
            }
        recorder.save_ft_model(checkpoint, is_best)
        timer.toc()
        left_time = timer.average_time * (args.ft_epochs - epoch)
        message = "Step2: fine tune best_prec1 is: {} left time is : {} now is : {}".format(
            best_prec1, timer.format(left_time), datetime.now())
        print(message)
        recorder.record_message('a', message)
    return recorder.filename
Ejemplo n.º 16
0
def alignment(loader, model, epoch, args, task=best_one_sec_moment):
    timer = Timer()
    abssec = MedianMeter()
    abssec0 = MedianMeter()
    randsec = MedianMeter()
    model = ActorObserverFC7(model)

    # switch to evaluate mode
    model.eval()

    def fc7_generator():
        for i, x in enumerate(loader):
            inputs, target, meta = parse(x)
            target = target.long().cuda(async=True)
            input_vars = [
                torch.autograd.Variable(inp.cuda(), volatile=True)
                for inp in inputs
            ]
            first_fc7, third_fc7, w_x, w_y = model(*input_vars)
            timer.tic()
            if i % args.print_freq == 0:
                print('Alignment: [{0}/{1}]\t'
                      'Time {timer.val:.3f} ({timer.avg:.3f})'.format(
                          i, len(loader), timer=timer))
            for vid, o1, o2 in zip(meta['id'], first_fc7, third_fc7):
                yield vid, (o1.data.cpu().numpy(), o2.data.cpu().numpy())

    for key, grp in groupby(fc7_generator(), key=lambda x: x[0]):
        print('processing id: {}'.format(key))
        _, mat = fc7list2mat(grp)
        _, _, _, j, gt = task(mat, winsize=3)
        _, _, _, j0, gt0 = task(mat, winsize=0)
        _, _, _, jr, gtr = task(np.random.randn(*mat.shape), winsize=3)
        abssec.update(abs(j - gt))
        abssec0.update(abs(j0 - gt0))
        randsec.update(abs(jr - gtr))
        print(
            '  abs3: {abs3.val:.3f} ({abs3.avg:.3f}) [{abs3.med:.3f}]'
            '  abs0: {abs0.val:.3f} ({abs0.avg:.3f}) [{abs0.med:.3f}]'
            '\n'
            '  absr: {absr.val:.3f} ({absr.avg:.3f}) [{absr.med:.3f}]'.format(
                abs3=abssec, abs0=abssec0, absr=randsec))

    return abssec.med
Ejemplo n.º 17
0
    def train_epoch(self, data_loader):
        """Run through one epoch of model training with the provided data loader."""

        train_loss = AverageMeter()
        metrics = MultiLabelMetric(self.config.num_class)
        epoch_time = Timer()
        progress_bar = tqdm(data_loader)

        for idx, batch in enumerate(progress_bar):
            loss, batch_label_scores = self.train_step(batch)
            train_loss.update(loss)

            # training metrics
            batch_labels = batch['label'].cpu().detach().numpy()
            batch_label_scores = batch_label_scores.cpu().detach().numpy()
            metrics.add_batch(batch_labels, batch_label_scores)
            progress_bar.set_postfix(loss=train_loss.avg)
        log.info(metrics.get_metrics())
        log.info(f'Epoch done. Time for epoch = {epoch_time.time():.2f} (s)')
        log.info(f'Epoch loss: {train_loss.avg}')
Ejemplo n.º 18
0
def test_model(device, model, test_set_loader, optimizer):
    timer = Timer().start()
    model.eval()  # For special layers
    total = 0
    correct = 0
    with torch.no_grad():
        for images, targets in test_set_loader:
            total += images.shape[0]

            images = images.to(device, non_blocking=True)
            targets = targets.to(device, non_blocking=True)
            outputs = model(images)

            _, predicted = torch.max(outputs.data, 1)
            correct += predicted.eq(targets.data).cpu().sum()

    accuracy = 100. * correct.item() / total
    logger.info(f"Testing Took {timer.stop():0.2f}s. Images in epoch: {total}")

    return accuracy
Ejemplo n.º 19
0
def load_hcpcs_corpus(debug=False):
    corpus_file = 'debug-corpus.npy' if debug else 'corpus.npy'
    corpus_output = os.path.join(proj_dir, 'data', corpus_file)
    partb_file = 'partb-2012.csv.gz' if debug else 'partb-2012-2018.csv.gz'
    partb_output = os.path.join(proj_dir, 'data', partb_file)

    # load from disk if exists
    if os.path.isfile(corpus_output):
        print(f'Loading corpus from disk {corpus_output}')
        corpus = np.load(corpus_output, allow_pickle=True)
        return corpus

    # load Medicare Data
    timer = Timer()
    data = load_data(data_dir, partb_output, debug)
    print(f'Loaded data in {timer.lap()}')

    # clean missing values
    data.dropna(subset=['hcpcs', 'count'], inplace=True)

    # generate sequences of HCPCS codes
    # that occur in the same context
    grouped_hcpcs = data \
        .sort_values(by='count') \
        .groupby(by=['year', 'npi'])['hcpcs'] \
        .agg(list)
    grouped_hcpcs = pd.DataFrame(grouped_hcpcs)
    print(f'Generated hcpcs sequences in {timer.lap()}')

    # drop top 1 percent longest sequences
    quantile = 0.99
    grouped_hcpcs['seq_length'] = grouped_hcpcs['hcpcs'].agg(len)
    max_seq_length = grouped_hcpcs['seq_length'].quantile(quantile)
    grouped_hcpcs = grouped_hcpcs.loc[
        grouped_hcpcs['seq_length'] <= max_seq_length]
    print(f'Removed sequences longer than {max_seq_length}')

    # save corpus
    np.save(corpus_output, grouped_hcpcs['hcpcs'].values)

    return grouped_hcpcs['hcpcs'].values
Ejemplo n.º 20
0
def train(loader, D, G, optim_D, optim_G, criterion):
    G_losses = [0]
    D_losses = [0]

    timer = Timer()

    for i in range(1, config.num_epoch + 1):
        iters = 0

        for data in loader:
            current_size = data.size(0)

            labels0 = torch.tensor([0] * current_size).to(
                config.device, torch.long)
            labels1 = torch.tensor([1] * current_size).to(
                config.device, torch.long)

            noise = torch.randn(
                (current_size, config.latent_size, 1, 1)).to(config.device)

            D_loss = D_train(data, D, G, optim_D, criterion, current_size,
                             labels0, labels1, noise)
            G_loss = G_train(D, G, optim_G, criterion, current_size, labels0,
                             labels1, noise)

            iters += 1
            D_losses.append(D_loss)
            G_losses.append(G_loss)

            if iters % config.log_iter == 0:
                timer.save_batch_time()
                log_batch_history(i, iters, len(loader), D_losses, G_losses,
                                  timer)

        save_model(i, G, optim_G, D, optim_D)

        timer.save_epoch_time()
        log_epoch_history(i, len(loader), D_losses, G_losses, timer)

        if i % config.make_img_samples == 0:
            for x in range(5):
                make_img_samples(G)
Ejemplo n.º 21
0
import re

import numpy as np
from sklearn.neighbors import NearestNeighbors
from gensim.models import Word2Vec


from utils.utils import replace_umlauts, Timer

from utils.utils import raw_freq

model_path = "/media/echobot/Volume/home/simon/uni/masterarbeit/de/model/01/my.model"
model = Word2Vec.load_word2vec_format(model_path, binary=True)
w2v = {w: vec for w,vec in zip(model.index2word, model.syn0)}

with Timer('Loading model from %s' % model_path):
    model = Word2Vec.load_word2vec_format(model_path, binary=True)

dataset_path = "/media/echobot/Volume/home/simon/uni/masterarbeit/data/business_signals_samples/fuehrungswechsel.txt"
dataset_path += ".corpus"

with open(dataset_path, 'r') as f:
    W = [w.decode('utf-8') for line in f for w in line.split()]
    X = [w2v[w] for sentence in W for w in W]
    V = {w.decode for w in W}

X = np.array(X)

# with Timer("Calculating nearest neighbors... "):
#     nbrs = NearestNeighbors(n_neighbors=5, algorithm='ball_tree').fit(V)
#     distances, indices = nbrs.kneighbors(X)
Ejemplo n.º 22
0
    def __init__(self):
        super(XioAll, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.frame_left = None
        self.frame_right = None
        self.is_work = True
        self.stype = 0
        self.one_static_time = 0  # 一次故障静止的时间
        self.all_time = 0  # 一天的工作时间
        self.q = MyQueue()  # 存放帧队列,改为存放状态比较好
        self.vision = Vision()

        # 控制输入视频地址
        self.CamPath = ""
        self.isWebCam = False
        self.isCamChanged = False

        # 数据库操作
        self.da = data_access.DataAccess()

        # 若日期发生改变,自行插入全零数据
        result_loss = self.da.select_(
            "select * from loss ORDER BY SJ DESC limit 1")
        current_time = datetime.datetime.now().strftime('%Y-%m-%d')
        if str(result_loss[0][0]) != current_time:
            self.da.operate_(
                'insert into loss(SJ,action1,action2,action3,action4,action5,action6)values'
                '("%s",%d,%d,%d,%d,%d,%d)' %
                (current_time, 10, 10, 10, 10, 0, 0))
        else:
            pass

        result_oee = self.da.select_(
            'select * from oee_date ORDER BY SJC DESC limit 1')
        if str(result_oee[0][0]) != current_time:
            self.da.operate_(
                'insert into oee_date(SJC,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18)values'
                '("' + current_time + '",0,0,0,0,0,0,0,0,0,0,0)')
        else:
            pass

        self.yolo_Model = Yolo_Model.Yolo_Model()
        # self.displayMessage("...加载YOLO模型成功...")

        self.thread_figure = Timer('updatePlay()',
                                   sleep_time=120)  # 该线程用来每隔2分钟刷新绘图区
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'),
                     self.draw)
        self.thread_figure.start()

        # 按钮功能
        self.connect(self.ui.fileSelectButton, QtCore.SIGNAL('clicked()'),
                     self.fileSelect)
        self.connect(self.ui.mailSenderButton, QtCore.SIGNAL('clicked()'),
                     self.mailSend)
        self.connect(self.ui.confirmDateButton, QtCore.SIGNAL('clicked()'),
                     self.displayMonthData)
        self.connect(self.ui.WebCamButton, QtCore.SIGNAL('clicked()'),
                     self.webCamInput)

        self.server = ThreadedTCPServer(
            (self.HOST, self.PORT),
            ThreadedTCPRequestHandler)  # 该线程用来一直监听客户端的请求
        self.server_thread = threading.Thread(target=self.server.serve_forever)
        self.server_thread.start()

        self.thread_video_receive = threading.Thread(
            target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()

        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'),
                     self.video_play)
        self.thread_time.start()

        self.thread_recog = Timer('updatePlay()',
                                  sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'),
                     self.video_recog)
        self.thread_recog.start()

        self.thread_data = Timer('updatePlay()',
                                 sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'),
                     self.data_read)
        self.thread_data.start()

        self.thread_shumei = threading.Thread(target=self.shumeiDeal)
        self.thread_shumei.start()

        self.thread_control = Timer('updatePlay()',
                                    sleep_time=10)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_control, QtCore.SIGNAL('updatePlay()'),
                     self.control_judge)
        self.thread_control.start()

        # 12-25
        self.thread_recogtiaoshi = Timer('updatePlay()',
                                         sleep_time=0.3)  # 该线程用来每隔0.3秒分析图像
        self.connect(self.thread_recogtiaoshi, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogtiaoshi)
        self.thread_recogtiaoshi.start()

        self.thread_recogzhuangji = Timer('updatePlay()',
                                          sleep_time=0.3)  # 该线程用来每隔0.3秒分析图像
        self.connect(self.thread_recogzhuangji, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogzhuangji)
        self.thread_recogzhuangji.start()

        self.X_l = 0
        self.Y_l = 0
        self.type_l = ""
        self.flag = 0
        self.a = 0
        self.tiaoshi_back = False
        self.tiaoshi_forward = False
        self.X_r = 0
        self.Y_r = 0
        self.type_r = ""
        self.firstFrame = None
        self.chaiji_left = False
        self.chaiji_right = False
        self.cltime = 0
        self.crtime = 0
        self.totaltime = 0

        # 用于面板进行输出
        self.work_time = 0
        self.tf_time = 0
        self.tb_time = 0

        self.Ldown = [0] * 10
        self.Lup = [0] * 10  # 队列操作
        self.Lhandsdown = [0] * 10
        self.Lhandsup = [0] * 10

        self.isJudgeMachineT = True

        # 装机操作
        self.mask_right = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/right.jpg"
        )
        self.mask_left = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/maskleft.jpg"
        )
        self.left_base = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/left_base.jpg",
            0)
        self.redLower = np.array([26, 43, 46])
        self.redUpper = np.array([34, 255, 255])
        self.Lright = [0] * 10
        self.Lleft = [0] * 10
        self.is_JudgeRL = True
        self.isRightStart = False
        self.isLeftStart = False
        self.zhuangjitime = 0

        # 调试操作
        self.status_LUP = [0] * 8
        self.status_LDOWN = [0] * 8
        self.isActionStartUP = False
        self.isActionStartDOWN = False

        self.x1UP, self.y1UP, self.x2UP, self.y2UP = [0, 0, 0, 0]
        self.X1DOWN, self.Y1DOWN, self.X2DOWN, self.Y2DOWN = [0, 0, 0, 0]

        # 定时投入文字
        self.putTextStart_time = None
        self.putTextEnd_time_left = None
        self.putTextEnd_time_right = None
        self.putTextEnd_time_up = None
        self.putTextEnd_time_down = None
Ejemplo n.º 23
0
class XioAll(QtGui.QWidget):
    '''这个类为主程序类
    '''
    HOST = 'localhost'
    PORT = 8081
    TOTAL = 0
    isStatic = True
    Shumei = None
    action = None
    pre_action = None
    action_video = None  # 视频内能识别
    pre_action_video = None

    def __init__(self):
        super(XioAll, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.frame_left = None
        self.frame_right = None
        self.is_work = True
        self.stype = 0
        self.one_static_time = 0  # 一次故障静止的时间
        self.all_time = 0  # 一天的工作时间
        self.q = MyQueue()  # 存放帧队列,改为存放状态比较好
        self.vision = Vision()

        # 控制输入视频地址
        self.CamPath = ""
        self.isWebCam = False
        self.isCamChanged = False

        # 数据库操作
        self.da = data_access.DataAccess()

        # 若日期发生改变,自行插入全零数据
        result_loss = self.da.select_(
            "select * from loss ORDER BY SJ DESC limit 1")
        current_time = datetime.datetime.now().strftime('%Y-%m-%d')
        if str(result_loss[0][0]) != current_time:
            self.da.operate_(
                'insert into loss(SJ,action1,action2,action3,action4,action5,action6)values'
                '("%s",%d,%d,%d,%d,%d,%d)' %
                (current_time, 10, 10, 10, 10, 0, 0))
        else:
            pass

        result_oee = self.da.select_(
            'select * from oee_date ORDER BY SJC DESC limit 1')
        if str(result_oee[0][0]) != current_time:
            self.da.operate_(
                'insert into oee_date(SJC,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18)values'
                '("' + current_time + '",0,0,0,0,0,0,0,0,0,0,0)')
        else:
            pass

        self.yolo_Model = Yolo_Model.Yolo_Model()
        # self.displayMessage("...加载YOLO模型成功...")

        self.thread_figure = Timer('updatePlay()',
                                   sleep_time=120)  # 该线程用来每隔2分钟刷新绘图区
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'),
                     self.draw)
        self.thread_figure.start()

        # 按钮功能
        self.connect(self.ui.fileSelectButton, QtCore.SIGNAL('clicked()'),
                     self.fileSelect)
        self.connect(self.ui.mailSenderButton, QtCore.SIGNAL('clicked()'),
                     self.mailSend)
        self.connect(self.ui.confirmDateButton, QtCore.SIGNAL('clicked()'),
                     self.displayMonthData)
        self.connect(self.ui.WebCamButton, QtCore.SIGNAL('clicked()'),
                     self.webCamInput)

        self.server = ThreadedTCPServer(
            (self.HOST, self.PORT),
            ThreadedTCPRequestHandler)  # 该线程用来一直监听客户端的请求
        self.server_thread = threading.Thread(target=self.server.serve_forever)
        self.server_thread.start()

        self.thread_video_receive = threading.Thread(
            target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()

        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'),
                     self.video_play)
        self.thread_time.start()

        self.thread_recog = Timer('updatePlay()',
                                  sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'),
                     self.video_recog)
        self.thread_recog.start()

        self.thread_data = Timer('updatePlay()',
                                 sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'),
                     self.data_read)
        self.thread_data.start()

        self.thread_shumei = threading.Thread(target=self.shumeiDeal)
        self.thread_shumei.start()

        self.thread_control = Timer('updatePlay()',
                                    sleep_time=10)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_control, QtCore.SIGNAL('updatePlay()'),
                     self.control_judge)
        self.thread_control.start()

        # 12-25
        self.thread_recogtiaoshi = Timer('updatePlay()',
                                         sleep_time=0.3)  # 该线程用来每隔0.3秒分析图像
        self.connect(self.thread_recogtiaoshi, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogtiaoshi)
        self.thread_recogtiaoshi.start()

        self.thread_recogzhuangji = Timer('updatePlay()',
                                          sleep_time=0.3)  # 该线程用来每隔0.3秒分析图像
        self.connect(self.thread_recogzhuangji, QtCore.SIGNAL('updatePlay()'),
                     self.video_recogzhuangji)
        self.thread_recogzhuangji.start()

        self.X_l = 0
        self.Y_l = 0
        self.type_l = ""
        self.flag = 0
        self.a = 0
        self.tiaoshi_back = False
        self.tiaoshi_forward = False
        self.X_r = 0
        self.Y_r = 0
        self.type_r = ""
        self.firstFrame = None
        self.chaiji_left = False
        self.chaiji_right = False
        self.cltime = 0
        self.crtime = 0
        self.totaltime = 0

        # 用于面板进行输出
        self.work_time = 0
        self.tf_time = 0
        self.tb_time = 0

        self.Ldown = [0] * 10
        self.Lup = [0] * 10  # 队列操作
        self.Lhandsdown = [0] * 10
        self.Lhandsup = [0] * 10

        self.isJudgeMachineT = True

        # 装机操作
        self.mask_right = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/right.jpg"
        )
        self.mask_left = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/maskleft.jpg"
        )
        self.left_base = cv2.imread(
            "E:/projects-summary/xiaowork/maindo/images/zhuangjiimages/left_base.jpg",
            0)
        self.redLower = np.array([26, 43, 46])
        self.redUpper = np.array([34, 255, 255])
        self.Lright = [0] * 10
        self.Lleft = [0] * 10
        self.is_JudgeRL = True
        self.isRightStart = False
        self.isLeftStart = False
        self.zhuangjitime = 0

        # 调试操作
        self.status_LUP = [0] * 8
        self.status_LDOWN = [0] * 8
        self.isActionStartUP = False
        self.isActionStartDOWN = False

        self.x1UP, self.y1UP, self.x2UP, self.y2UP = [0, 0, 0, 0]
        self.X1DOWN, self.Y1DOWN, self.X2DOWN, self.Y2DOWN = [0, 0, 0, 0]

        # 定时投入文字
        self.putTextStart_time = None
        self.putTextEnd_time_left = None
        self.putTextEnd_time_right = None
        self.putTextEnd_time_up = None
        self.putTextEnd_time_down = None

    def fileSelect(self):
        absolute_path = QFileDialog.getOpenFileName(self, '视频选择', '.',
                                                    "MP4 files (*.mp4)")

        if absolute_path is not "":
            self.reFlushDetection()
            self.CamPath = absolute_path
            self.isWebCam = False
            self.isCamChanged = True
        else:
            self.displayMessage("...未进行选择,视频源路径不变...")

    def webCamInput(self):
        webCamDict = {"address": "", "status": ""}
        webCamBox = WebCamBox("网络摄像头管理", webCamDict)

        # 处理主动关闭输入框
        if webCamBox.exec_():
            return
        if webCamDict["status"] == "":
            return

        ret = False
        try:
            cap = cv2.VideoCapture(webCamDict["address"])
            ret, frame = cap.read()
        except Exception as e:
            raise e
        finally:
            if ret is True:
                self.CamPath = webCamDict["address"]
                self.isWebCam = True
                self.isCamChanged = True
                self.reFlushDetection()
                self.displayMessage("...更换网络摄像头成功...")
            else:
                if webCamDict["status"] != "WrongPassword":
                    self.displayMessage("...IP地址错误,请重新输入...")

    def reFlushDetection(self):
        self.X_l = 0
        self.Y_l = 0
        self.type_l = ""
        self.flag = 0
        self.a = 0
        self.tiaoshi_back = False
        self.tiaoshi_forward = False
        self.X_r = 0
        self.Y_r = 0
        self.type_r = ""
        self.firstFrame = None
        self.chaiji_left = False
        self.chaiji_right = False
        self.cltime = 0
        self.crtime = 0
        self.totaltime = 0

        # 用于面板进行输出
        self.work_time = 0
        self.tf_time = 0
        self.tb_time = 0

        self.Ldown = [0] * 10
        self.Lup = [0] * 10  # 队列操作
        self.Lhandsdown = [0] * 10
        self.Lhandsup = [0] * 10

        self.isJudgeMachineT = True
        self.tiaoshitime = 0

        self.Lright = [0] * 10
        self.Lleft = [0] * 10
        self.is_JudgeRL = True
        self.isRightStart = False
        self.isLeftStart = False
        self.zhuangjitime = 0

        self.status_LUP = [0] * 10
        self.status_LDOWN = [0] * 15
        self.isActionStartUP = False
        self.isActionStartDOWN = False

        # 定时投入文字
        self.putTextStart_time = None
        self.putTextEnd_time_left = None
        self.putTextEnd_time_right = None
        self.putTextEnd_time_up = None
        self.putTextEnd_time_down = None

        self.displayMessage("...初始化检测参数成功...")

    def mailSend(self):
        list_mail = []
        dilogUi = warningBox(u"邮件发送", u"请输入邮箱:", list_mail)
        if dilogUi.exec_():
            return
        if len(list_mail) == 0:
            return
        if len(list_mail[0]) != 0:
            print("准备发送!")

            list_oee = self.da.select_oee()
            list_loss = self.da.select_loss()
            dict_oee = {}
            hour = min(time.localtime()[3], 18)
            for i in range(8, hour + 1):
                dict_oee[str(i) + "点"] = list_oee[i - 8]
            sender = '*****@*****.**'
            list_mail.append("*****@*****.**")

            message = "侧板焊接生产线生产数据\n" \
                      "\n" \
                      "今日OEE效能数据如下所示:\n" \
                      "{}" \
                      "\n" \
                      "\n" \
                      "*注:效率为0时未进行检测。\n" \
                      "\n" \
                      "今日设备运行情况分布如下所示:" \
                      "\n" \
                      "清理焊嘴:{} \n" \
                      "装载侧板:{} \n" \
                      "机器静止:{} \n" \
                      "机器工作:{} \n".format(dict_oee, list_loss[0], list_loss[1], list_loss[2], list_loss[3])

            msg_wait = MIMEText(message, 'plain', 'utf-8')
            try:
                smtpObj = smtplib.SMTP()
                smtpObj.connect("smtp.qq.com", 25)
                mail_license = "wuhchbmndrjabgcc"
                print("准备登录")
                smtpObj.login(sender, mail_license)
                print("登录成功!")
                smtpObj.set_debuglevel(1)
                smtpObj.sendmail(sender, list_mail, msg_wait.as_string())
            except Exception as e:
                print(e)

    def displayMonthData(self):
        self.ui.DateTable.clear()

        # 获取月份
        select_date = self.ui.dateEdit.text()
        queryByMonth = "select * from oee_date where date_format(SJC,'%Y-%m')='{}'".format(
            select_date)

        # 取数据正常
        result = self.da.select_(queryByMonth)
        row = len(result)
        if row == 0:
            self.ui.DateTable.setRowCount(1)
            self.ui.DateTable.setColumnCount(1)
            self.ui.DateTable.setEditTriggers(
                QtGui.QTableWidget.NoEditTriggers)
            self.ui.DateTable.horizontalHeader().setResizeMode(
                QtGui.QHeaderView.Stretch)
            newItem = QtGui.QTableWidgetItem(
                "                    日期 {} 暂无数据".format(
                    select_date))  # 接受str,无法接收int
            textFont = QtGui.QFont("song", 16, QtGui.QFont.Bold)
            newItem.setFont(textFont)

            self.ui.DateTable.setItem(0, 0, newItem)
        else:
            # 表格属性
            self.ui.DateTable.setRowCount(row)
            self.ui.DateTable.setColumnCount(12)
            self.ui.DateTable.setHorizontalHeaderLabels([
                '日期', '8时', '9时', '10时', '11时', '12时', '13时', '14时', '15时',
                '16时', '17时', '18时'
            ])
            self.ui.DateTable.setEditTriggers(
                QtGui.QTableWidget.NoEditTriggers)
            self.ui.DateTable.horizontalHeader().setResizeMode(
                QtGui.QHeaderView.Stretch)

            # 数据处理
            for i in range(row):
                list_data = list(result[i])
                for j in range(12):
                    if j == 0:
                        cnt = str(list_data[j])[5:10]
                    else:
                        cnt = str(int(list_data[j]))
                    newItem = QtGui.QTableWidgetItem(cnt)  # 接受str,无法接收int
                    textFont = QtGui.QFont("song", 12, QtGui.QFont.Bold)
                    newItem.setFont(textFont)
                    self.ui.DateTable.setItem(i, j, newItem)

    def control_judge(self):
        pass

    def video_recogtiaoshi(self):
        if self.isWebCam:
            return
        frame = self.frame_left
        frameDown = frame[250:500, 680:970]

        # 上方坐标
        frameUP = frame[140:400, 540:800]

        # 根据队列进行检测

        isPersonUP, self.x1UP, self.y1UP, self.x2UP, self.y2UP = self.yolo_Model.detect_person(
            frameUP)
        if isPersonUP:
            self.status_LUP.append(1)
        else:
            self.status_LUP.append(0)
        self.status_LUP.pop(0)

        isPersonDOWN, self.X1DOWN, self.Y1DOWN, self.X2DOWN, self.Y2DOWN = self.yolo_Model.detect_person(
            frameDown)
        if isPersonDOWN:
            self.status_LDOWN.append(1)
        else:
            self.status_LDOWN.append(0)
        self.status_LDOWN.pop(0)

        if sum(self.status_LUP) > 5 and self.isActionStartUP is False:
            self.displayMessage("工人上方开始清理焊嘴")
            self.isActionStartUP = True
            self.putTextStart_time = time.time()
            self.da.insert_action_("qinglihanzuiUP", 0)
        if sum(self.status_LUP) < 2 and self.isActionStartUP is True:
            self.displayMessage("工人上方结束清理焊嘴")
            self.isActionStartUP = False
            self.putTextEnd_time_up = time.time()
            self.da.insert_action_("qinglihanzuiUP", 1)
            self.da.update_loss_("action1", 1)
            self.da.update_loss_("action3", random.randint(0, 2))

        if sum(self.status_LDOWN) > 5 and self.isActionStartDOWN is False:
            self.displayMessage("工人下方开始清理焊嘴")
            self.isActionStartDOWN = True
            self.putTextStart_time = time.time()
            self.da.insert_action_("qinglihanzuiDOWN", 0)
        if sum(self.status_LDOWN) == 0 and self.isActionStartDOWN is True:
            self.displayMessage("工人下方结束清理焊嘴")
            self.isActionStartDOWN = False
            self.putTextEnd_time_down = time.time()
            self.da.insert_action_("qinglihanzuiDOWN", 1)
            self.da.update_loss_("action1", 1)
            self.da.update_loss_("action3", random.randint(0, 2))

    def video_recogzhuangji(self):
        if self.isWebCam:
            return
        img = self.frame_left
        img = cv2.resize(img, (1280, 720))
        img_right = cv2.bitwise_and(self.mask_right, img)
        hsv_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2HSV)
        mask_det = cv2.inRange(hsv_right, self.redLower, self.redUpper)
        img_left = cv2.bitwise_and(self.mask_left, img)
        hsv_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2HSV)
        mask_det1 = cv2.inRange(hsv_left, self.redLower, self.redUpper)

        if self.is_JudgeRL is True:
            if np.sum(mask_det) < 10000:
                self.Lright.append(1)
            else:
                self.Lright.append(0)
            self.Lright.pop(0)
            if sum(self.Lright) > 6 and self.isRightStart is False:
                self.displayMessage("工人开始右方装载侧板")
                self.isRightStart = True
                self.putTextStart_time = time.time()
                self.da.insert_action_("zhuangjiRIGHT", 0)

            if sum(self.Lright) < 2 and self.isRightStart is True:
                self.displayMessage("工人结束右方装载侧板")
                self.isRightStart = False
                self.putTextEnd_time_right = time.time()
                self.da.insert_action_("zhuangjiRIGHT", 1)
                self.da.update_loss_("action2", 1)
            if np.sum(mask_det1) < 50000:
                self.Lleft.append(1)
            else:
                self.Lleft.append(0)
            self.Lleft.pop(0)
            if sum(self.Lleft) > 6 and self.isLeftStart is False:
                self.displayMessage("工人开始左方装载侧板")
                self.isLeftStart = True
                self.putTextStart_time = time.time()
                self.da.insert_action_("zhuangjiLEFT", 0)
            if sum(self.Lleft) < 2 and self.isLeftStart is True:
                self.displayMessage("工人结束左方装载侧板")
                self.isLeftStart = False
                self.putTextEnd_time_left = time.time()
                self.da.insert_action_("zhuangjiLEFT", 1)
                self.da.update_loss_("action2", 1)

    def shumeiDeal(self):
        global Stype
        while True:
            if Stype == 1 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 1
            if Stype == 2 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "5s保养"
                self.displayMessage(message)
                self.stype = 2
            if Stype == 3 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + ""
                self.displayMessage(message)
                self.stype = 3
            if Stype == 4 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 4
            if Stype == 5 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 5
            if Stype == 6 and self.stype == 0:
                message = '[' + time.strftime(
                    '%Y-%m-%d %H:%M:%S', time.localtime(
                        time.time())) + ']' + "******" + "工人吃饭!"
                self.displayMessage(message)
                self.stype = 6
            if Stype == 0:
                if self.stype == 1:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 2:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束5s!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 3:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 4:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人结束吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 5:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人吃饭!"
                    self.stype = 0
                    self.displayMessage(message)
                if self.stype == 6:
                    message = '[' + time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(
                            time.time())) + ']' + "******" + "工人吃饭!"
                    self.stype = 0
                    self.displayMessage(message)

            time.sleep(0.06)

    def video_receive_local(
            self,
            cam1='E:/projects-summary/xiaowork/侧板焊接待检测视频/检测视频200519134451.mp4',
            cam2='E:\\剪辑\\zhuangji\\ch11_20171221084313 00_09_06-00_10_21~2.mp4',
            time_flag=True):
        '''该方法用来接收本地视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :param time_flag: 是否休眠,本地视频为True
        :return: None
        '''

        self.left_cam = cv2.VideoCapture(cam1)
        ret_1, frame_1 = self.left_cam.read()

        # 无法重复播放
        # preCamPath = cam1
        # while True:
        #
        #     self.frame_left = frame_1
        #     if ret_1 is False:
        #         self.left_cam = cv2.VideoCapture(cam1)
        #     if self.CamPath != "" and self.CamPath != preCamPath:
        #         self.left_cam = cv2.VideoCapture(self.CamPath)
        #         preCamPath = self.CamPath
        #     ret_1, frame_1 = self.left_cam.read()
        #     if time_flag is True:
        #         time.sleep(0.04)

        # 优化版本
        while True:
            self.frame_left = frame_1
            if ret_1 is False:
                self.left_cam = cv2.VideoCapture(cam1)
            if self.CamPath != "" and self.isCamChanged:
                self.left_cam = cv2.VideoCapture(self.CamPath)
                self.isCamChanged = False
            ret_1, frame_1 = self.left_cam.read()
            if time_flag is True:
                time.sleep(0.04)

    def video_receive_rstp(self, cam1='rstp:', cam2='rstp:'):
        '''该方法用来接收网络视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :return: None
        '''
        self.video_receive_local(cam1=cam1, cam2=cam2, time_flag=False)

    def video_play(self):
        '''该方法用来播放视频
        :return: None
        '''
        def label_show_left(frame, label=self.ui.label):  # 左控件label播放
            height, width, _ = frame.shape
            frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if self.type_l == 'work':
            #    cv2.rectangle(frame_change, (self.X_l, self.Y_l), (self.X_l + 100, self.Y_l + 100), (0, 255, 0), 4)
            frame_change = putChineseText.cv2ImgAddText(
                frame_change, "生产操作行为的自动识别(侧板焊接车间)", 50, 30, (0, 0, 0), 50)
            if self.isActionStartUP is True:
                cv2.rectangle(frame_change, (540 + int(self.x1UP * 0.625),
                                             140 + int(self.y1UP * 0.625)),
                              (540 + int(self.x2UP * 0.625),
                               140 + int(self.y2UP * 0.625)), (255, 0, 0), 6)
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在上方清理焊嘴", 140, 60)

            if self.isActionStartDOWN is True:
                cv2.rectangle(frame_change, (int(self.X1DOWN * 0.721) + 680,
                                             int(self.Y1DOWN * 0.721) + 250),
                              (int(self.X2DOWN * 0.721) + 680,
                               int(self.Y2DOWN * 0.721) + 250), (255, 0, 0), 6)
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在下方清理焊嘴", 140, 60)

            if self.isLeftStart is True:
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    cv2.rectangle(frame_change, (0, 150), (300, 720),
                                  (255, 255, 0), 6)
                    cv2.circle(frame_change, (150, 435), 6, (255, 0, 0), 20)

                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在左方装载侧板", 140, 60)

            if self.isRightStart is True:
                if time.time() - self.putTextStart_time > 0 and time.time(
                ) - self.putTextStart_time < 5:
                    cv2.rectangle(frame_change, (880, 100), (1080, 380),
                                  (255, 255, 0), 6)
                    cv2.circle(frame_change, (980, 240), 6, (255, 0, 0), 20)
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人开始在右方装载侧板", 140, 60)

            # 投入结束文字

            if self.isLeftStart is False:
                if self.putTextEnd_time_left is not None and time.time(
                ) - self.putTextEnd_time_left > 0 and time.time(
                ) - self.putTextEnd_time_left < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束左方装载侧板", 140, 60)

            if self.isRightStart is False:
                if self.putTextEnd_time_right is not None and time.time(
                ) - self.putTextEnd_time_right > 0 and time.time(
                ) - self.putTextEnd_time_right < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束右方装载侧板", 140, 60)

            if self.isActionStartDOWN is False:
                if self.putTextEnd_time_down is not None and time.time(
                ) - self.putTextEnd_time_down > 0 and time.time(
                ) - self.putTextEnd_time_down < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束下方清理焊嘴", 140, 60)

            if self.isActionStartUP is False:
                if self.putTextEnd_time_up is not None and time.time(
                ) - self.putTextEnd_time_up > 0 and time.time(
                ) - self.putTextEnd_time_up < 3:
                    frame_change = putChineseText.cv2ImgAddText(
                        frame_change, "工人结束上方清理焊嘴", 140, 60)

            frame_resize = cv2.resize(frame_change, (360, 240),
                                      interpolation=cv2.INTER_AREA)

            image = QtGui.QImage(frame_resize.data, frame_resize.shape[1],
                                 frame_resize.shape[0],
                                 QtGui.QImage.Format_RGB888)  # 处理成QImage
            label.setPixmap(QtGui.QPixmap.fromImage(image))

        if self.frame_left is not None:
            label_show_left(self.frame_left)

    def draw(self):
        '''
        展示图标
        :return:
        '''
        def draw_fp():  # 绘制损失饼图
            fp = Figure_Pie()
            loss_data = self.da.select_loss()
            sum1 = sum(loss_data)
            loss_data /= sum1
            fp.plot(*tuple(loss_data))
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee():  # 绘制oee日推图
            self.da.update_oee()
            oee = Figure_OEE()
            l_eff = self.da.select_oee()
            oee.plot(*tuple(l_eff))  # 参数
            graphicscene_oee = QtGui.QGraphicsScene()
            graphicscene_oee.addWidget(oee.canvas)
            self.ui.graphicsView_OEE.setScene(graphicscene_oee)
            self.ui.graphicsView_OEE.show()

        def draw_loss():  # 绘制损失直方图
            loss = Figure_Loss()
            loss_data = self.da.select_loss()
            loss.plot(*tuple(loss_data))
            graphicscene_loss = QtGui.QGraphicsScene()
            graphicscene_loss.addWidget(loss.canvas)
            self.ui.graphicsView_Loss.setScene(graphicscene_loss)
            self.ui.graphicsView_Loss.show()

        # def draw_mt():  # 绘制耗材使用图
        #     mt = Figure_MT()
        #     mt.plot(*(4, 5, 3))
        #     graphicscene_mt = QtGui.QGraphicsScene()
        #     graphicscene_mt.addWidget(mt.canvas)
        #     self.ui.graphicsView_MT.setScene(graphicscene_mt)
        #     self.ui.graphicsView_MT.show()

        draw_fp()
        draw_loss()
        # draw_mt()
        draw_oee()

    def video_recog(self):
        '''
        视频识别部分
        :return:
        '''
        if self.isWebCam:
            return
        self.totaltime += 1
        frame_left = self.frame_left  # 原始彩色图,左边摄像头
        frame_left_gray = cv2.cvtColor(frame_left,
                                       cv2.COLOR_BGR2GRAY)  # 原始图的灰度图

        def video_recog_left():
            img = frame_left
            spark, x, y = self.vision.find_spark(img)
            self.q.enqueue(spark)
            # print(spark)
            if spark and x != 1070:
                self.type_l = 'work'
                self.X_l = x
                self.Y_l = y
            else:
                self.type_l = ''

            if spark or True in self.q.queue:  # 如果一段间隔时间内不断有火花(和机器移动,稍后完成),则说明机器必定处于工作状态
                self.one_static_time = 0  # 恢复到运动后,一次静止时间重新清零
                self.work_time += 1
                self.is_work = True

                if self.work_time % 20 == 0:
                    if x != 1070:
                        self.displayMessage("机器正在工作")
                if self.work_time % 60 == 0:
                    self.da.update_loss_("action4", 1)
            else:
                # ******* 截图
                self.is_work = False
                self.one_static_time += 1  # 一次静止时间

                if self.one_static_time % 20 == 0:
                    self.da.update_loss_("action3", 1)
                # ********

                self.action = ThreadedTCPRequestHandler.action  # 键盘操作
                if self.action is not None:  # 往面板上写当前由于什么原因导致机器静止
                    if self.pre_action is None:
                        pass

                if self.action_video is not None:
                    if self.pre_action_video is None:
                        pass

        video_recog_left()
        self.pre_action = self.action
        self.pre_action_video = self.action_video

    def data_read(self):
        pass

    def displayMessage(self, message):

        self.ui.textBrowser.append(
            '[' +
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) +
            '] ' + message)
    l2_reg = float(l2_reg)
l1_reg = cli_args.get('l1_reg')
if l1_reg != None:
    l1_reg = float(l1_reg)
print(f'Running job with arguments\n{cli_args}')

# define configs
train_perf_filename = 'train-results.csv'
test_perf_filename = 'test-results.csv'

n_estimators = 5 if debug else 100
print(f'n_estimators: {n_estimators}')
print(f'max_depth: {max_depth}')

# init timer
timer = Timer()

# iterate over runs
for run in range(runs):
    print(f'Starting run {run}')

    # load data
    data = load_sampled_data(sample_size)
    print(f'Loaded data with shape {data.shape}')

    # drop columns, onehot encode, or lookkup embeddings
    x, y = get_embedded_data(data, embedding_type, embedding_path,
                             drop_columns)
    del data
    print(f'Encoded data shape: {x.shape}')
Ejemplo n.º 25
0
# model config
window_size = int(cli_args.get('window_size', 5))
min_seq_length = int(cli_args.get('min_seq_length', 2))
embedding_size = int(cli_args.get('embedding_size', 300))
iters = int(cli_args.get('iters', 5))
desc = f'e{embedding_size}-w{window_size}-i{iters}-t{ts}'

# I/O
data_dir = os.environ['CMS_RAW']
curr_dir = os.path.join(proj_dir, 'cbow')
embeddings_output = os.path.join(proj_dir, 'embeddings', f'cbow-{desc}.kv')
loss_output = os.path.join(curr_dir, 'logs', f'train-loss-{desc}.csv')
time_output = os.path.join(curr_dir, 'logs', f'train-time-{desc}.csv')

# load corpus
timer = Timer()
corpus = load_hcpcs_corpus(debug)
print(f'Loaded corpus with length {len(corpus)} in {timer.lap()}')

# use sample for debug
if debug:
    corpus = corpus[:500000]
    print(f'Using sample of corpus with length {len(corpus)}')

# vocab size
vocab_size = get_vocab_size(corpus)

# loss and timing callback
callback = GensimEpochCallback(loss_output, time_output)

# train model
def main(experiment_name,
         optimizer,
         output_directory_root="experiments/resnet18_logistic_cifar10",
         epochs=60,
         batch_size=512,
         num_workers=1):

    output_directory = os.path.join(output_directory_root, experiment_name)
    if not os.path.isdir(output_directory):
        os.makedirs(output_directory, exist_ok=True)

    # Setup regular log file + tensorboard
    logfile_path = os.path.join(output_directory, "logfile.txt")
    setup_logger_tqdm(logfile_path)

    tensorboard_log_directory = os.path.join("runs",
                                             "resnet18_logistic_cifar10",
                                             experiment_name)
    tensorboard_summary_writer = SummaryWriter(
        log_dir=tensorboard_log_directory)

    # Choose Training Device
    use_cuda = torch.cuda.is_available()
    logger.info(f"CUDA Available? {use_cuda}")
    device = "cuda" if use_cuda else "cpu"

    # Datasets and Loaders
    train_set_loader, test_set_loader = get_data_loaders(
        batch_size, num_workers)

    # Create Model & Optimizer
    model = torchvision.models.resnet18(pretrained=True)
    for param in model.parameters():
        param.requires_grad = False
    num_classes = 10
    model.fc = nn.Linear(model.fc.in_features, 10)
    model.to(device)
    optimizer = optimizer(model.parameters())

    logger.info("=========== Commencing Training ===========")
    logger.info(f"Epoch Count: {epochs}")
    logger.info(f"Batch Size: {batch_size}")

    # Load Checkpoint
    checkpoint_file_path = os.path.join(output_directory, "checkpoint.pth")
    start_epoch = 0
    if os.path.exists(checkpoint_file_path):
        logger.info("Checkpoint Found - Loading!")

        checkpoint = torch.load(checkpoint_file_path)
        logger.info(f"Last completed epoch: {checkpoint['epoch']}")
        logger.info(f"Average Train Loss: {checkpoint['train_loss']}")
        logger.info(f"Top-1 Train Accuracy: {checkpoint['train_accuracy']}")
        logger.info(f"Top-1 Test Accuracy: {checkpoint['test_accuracy']}")
        start_epoch = checkpoint["epoch"] + 1
        logger.info(f"Resuming at epoch {start_epoch}")

        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
    else:
        logger.info("No checkpoint found, starting from scratch.")

    # Training Loop
    t = Timer()
    for epoch in range(start_epoch, epochs):
        t.start()
        logger.info("-" * 10)
        logger.info(f"Epoch {epoch}")
        logger.info("-" * 10)

        train_loss, train_accuracy = train_model(device, model,
                                                 train_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("train_loss", train_loss, epoch)
        tensorboard_summary_writer.add_scalar("train_accuracy", train_accuracy,
                                              epoch)

        test_accuracy = test_model(device, model, test_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("test_accuracy", test_accuracy,
                                              epoch)

        # Save Checkpoint
        logger.info("Saving checkpoint.")
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_loss': train_loss,
                'train_accuracy': train_accuracy,
                'test_accuracy': test_accuracy
            }, checkpoint_file_path)

        elapsed_time = t.stop()
        logger.info(f"End of epoch {epoch}, took {elapsed_time:0.4f} seconds.")
        logger.info(f"Average Train Loss: {train_loss}")
        logger.info(f"Top-1 Train Accuracy: {train_accuracy}")
        logger.info(f"Top-1 Test Accuracy: {test_accuracy}")
        logger.info("")
class XioAll(QtGui.QWidget):
    '''这个类为主程序类
    '''
    HOST = 'localhost'
    PORT = 8081
    TOTAL = 0
    isStatic = True
    action = None
    pre_action = None
    action_video = None # 视频内能识别
    pre_action_video = None

    def __init__(self):
        super(XioAll, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)

        self.frame_left = None
        self.frame_right = None
        self.is_work = True
        self.one_static_time = 0  # 一次故障静止的时间
        self.all_time = 0  # 一天的工作时间
        self.q = MyQueue()  # 存放帧队列,改为存放状态比较好
        self.vision = Vision()
        # 若日期发生改变,自行插入全零数据
        da = data_access.EquipmentTimeData()  # 对损失项统计表进行操作
        result_loss = da.select_("select * from loss ORDER BY SJ DESC limit 1")
        current_time = datetime.datetime.now().strftime('%Y-%m-%d')
        if str(result_loss[0][0]) != current_time:
            da.update('insert into loss(SJ,action1,action2,action3,action4,action5,action6)values'
                      '("%s",%d,%d,%d,%d,%d,%d)' % (current_time, 0, 0, 0, 0, 0, 0))
        else:
            pass

        da_oee = data_access.OEEData()  # 对oee实时利用率进行统计
        result_oee = da_oee.select_('select * from oee_date ORDER BY SJC DESC limit 1')
        if str(result_oee[0][0]) != current_time:
            da_oee.update_('insert into oee_date(SJC,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18)values'
                           '("' + current_time + '",0,0,0,0,0,0,0,0,0,0,0)')
        else:
            pass
        self.thread_figure = Timer('updatePlay()', sleep_time=120)  # 该线程用来每隔2分钟刷新绘图区
        self.connect(self.thread_figure, QtCore.SIGNAL('updatePlay()'), self.draw)
        self.thread_figure.start()

        self.server = ThreadedTCPServer((self.HOST, self.PORT), ThreadedTCPRequestHandler)  # 该线程用来一直监听客户端的请求
        self.server_thread = threading.Thread(target=self.server.serve_forever)
        self.server_thread.start()

        self.thread_video_receive = threading.Thread(target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()

        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'), self.video_play)
        self.thread_time.start()

        self.thread_recog = Timer('updatePlay()', sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'), self.video_recog)
        self.thread_recog.start()

        self.thread_data = Timer('updatePlay()', sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'), self.data_read)
        self.thread_data.start()

    def video_receive_local(self, cam1='./videos/left_cam.mp4', cam2='./videos/right_cam.mp4', time_flag=True):
        '''该方法用来接收本地视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :param time_flag: 是否休眠,本地视频为True
        :return: None
        '''
        self.left_cam = cv2.VideoCapture(cam1)
        self.right_cam = cv2.VideoCapture(cam2)
        ret_1, frame_1 = self.left_cam.read()
        ret_2, frame_2 = self.right_cam.read()
        while True:
            self.frame_left = frame_1
            self.frame_right = frame_2
            if ret_1 is False:
                self.left_cam = cv2.VideoCapture(cam1)
            if ret_2 is False:
                self.right_cam = cv2.VideoCapture(cam2)
            ret_1, frame_1 = self.left_cam.read()
            ret_1, frame_2 = self.right_cam.read()
            if time_flag is True:
                time.sleep(0.04)

    def video_receive_rstp(self, cam1='rstp:', cam2='rstp:'):
        '''该方法用来接收网络视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :return: None
        '''
        self.video_receive_local(cam1=cam1, cam2=cam2, time_flag=False)

    def video_play(self):
        '''该方法用来播放视频
        :return: None
        '''

        def label_show_left(frame, label=self.ui.label):  # 左控件label播放
            height, width, _ = frame.shape
            frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resize = cv2.resize(frame_change, (360, 240), interpolation=cv2.INTER_AREA)
            image = QtGui.QImage(frame_resize.data, frame_resize.shape[1], frame_resize.shape[0],
                                 QtGui.QImage.Format_RGB888)  # 处理成QImage
            label.setPixmap(QtGui.QPixmap.fromImage(image))

        def label_show_right(frame, label=self.ui.label_2):  # 右空间Lable播放
            label_show_left(frame, label)

        if self.frame_left is not None:
            label_show_left(self.frame_left)
        if self.frame_right is not None:
            label_show_right(self.frame_right)

    def draw(self):
        '''
        展示图标
        :return:
        '''

        def draw_fp():  # 绘制损失饼图
            fp = Figure_Pie()
            da = data_access.EquipmentData()
            result = da.select()
            fp.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))  # '*'有一个解包的功能,将(1,1,1,1)解包为 1 1 1 1
            graphicscene_fp = QtGui.QGraphicsScene()
            graphicscene_fp.addWidget(fp.canvas)
            self.ui.graphicsView_Pie.setScene(graphicscene_fp)
            self.ui.graphicsView_Pie.show()

        def draw_oee():  # 绘制oee日推图
            current_time = datetime.datetime.now().strftime('%Y-%m-%d')
            lossTime = data_access.EquipmentTimeData()
            result_loss = lossTime.select_("select * from loss ORDER BY SJ DESC limit 1")
            zongshijian = time.strftime('%H:%M:%S', time.localtime(time.time()))
            huanxing = result_loss[0][1]
            dailiao = result_loss[0][2]
            shebeiguzhang = result_loss[0][3]
            tingzhi = result_loss[0][4]
            # qitashijian=result[0][5]
            # kongyunzhuan=result[0][6]
            fuheshijian = (int(zongshijian.split(':')[0]) - 8) * 3600 + int(zongshijian.split(':')[1]) * 60 + int(
                zongshijian.split(':')[2]) - tingzhi
            shijijiagong_1 = fuheshijian - huanxing - dailiao - shebeiguzhang
            eff = int(shijijiagong_1 / fuheshijian * 100)  # 计算效率
            print(eff)

            hour = time.localtime()[3]  # 实时更新
            da_oee = data_access.OEEData()
            da_oee.update_("update oee_date set O" + str(hour) + "=" + str(eff) + ' where SJC="' + current_time + '"')
            L_eff = []
            oee = Figure_OEE()
            da = data_access.OEEData()
            result = da.select()
            hour = time.localtime()[3]
            if hour < 20:
                for i in range(1, hour - 6):
                    L_eff.append(result[-1][i])
            oee.plot(*tuple(L_eff))  # 参数
            graphicscene_oee = QtGui.QGraphicsScene()
            graphicscene_oee.addWidget(oee.canvas)
            self.ui.graphicsView_OEE.setScene(graphicscene_oee)
            self.ui.graphicsView_OEE.show()

        def draw_loss():  # 绘制损失直方图
            loss = Figure_Loss()
            da = data_access.EquipmentTimeData()
            result = da.select()
            loss.plot(*(result[-1][1], result[-1][2], result[-1][3], result[-1][4]))
            graphicscene_loss = QtGui.QGraphicsScene()
            graphicscene_loss.addWidget(loss.canvas)
            self.ui.graphicsView_Loss.setScene(graphicscene_loss)
            self.ui.graphicsView_Loss.show()

        def draw_mt():  # 绘制耗材使用图
            mt = Figure_MT()
            mt.plot()
            graphicscene_mt = QtGui.QGraphicsScene()
            graphicscene_mt.addWidget(mt.canvas)
            self.ui.graphicsView_MT.setScene(graphicscene_mt)
            self.ui.graphicsView_MT.show()

        draw_fp()
        draw_loss()
        draw_mt()
        draw_oee()

    def video_recog(self):
        '''
        视频识别部分
        :return:
        '''
        frame_left = self.frame_left  # 原始彩色图,左边摄像头
        frame_left_gray = cv2.cvtColor(frame_left, cv2.COLOR_BGR2GRAY)  # 原始图的灰度图

        # frame_right = self.frame_left  # 原始彩色图
        # frame_right_gray = cv2.cvtColor(frame_right, cv2.COLOR_BGR2GRAY)

        def video_recog_left():
            img = frame_left
            spark = self.vision.find_spark(img)
            self.q.enqueue(spark)
            # print(spark)
            if spark or True in self.q.queue:  # 如果一段间隔时间内不断有火花(和机器移动,稍后完成),则说明机器必定处于工作状态
                #print('work')
                self.action_video = None
                self.one_static_time = 0  # 恢复到运动后,一次静止时间重新清零
            else:
                # ******* 截图
                self.one_static_time += 1  # 一次静止时间
                if self.one_static_time % 60 == 0:
                    print('start or static')
                    print('静止了,往catch文件夹中查看原因')
                    t = time.localtime()
                    hour = t[3]
                    mini = t[4]
                    seco = t[5]
                    filename = str(hour) + '-' + str(mini) + '-' + str(seco)
                    cv2.imwrite('./catch/' + filename + '.jpg', img)
                # ********

                self.action = ThreadedTCPRequestHandler.action # 键盘操作
                if self.action is not None:  # 往面板上写当前由于什么原因导致机器静止
                    if self.pre_action is None:
                        print(self.action)
                        message = '[' + time.strftime('%Y-%m-%d %H:%M:%S',
                                                      time.localtime(time.time())) + ']' + str(self.action)
                        self.displayMessage(message)

                if self.vision.tiaoshi(frame_left_gray):
                    self.action_video = 'tiaoshi'
                if self.action_video is not None:
                    if self.pre_action_video is None:
                        print(self.action_video)
                        message = '[' + time.strftime('%Y-%m-%d %H:%M:%S',
                                                      time.localtime(time.time())) + ']' + str(self.action_video)
                        self.displayMessage(message)



        def video_recog_right():  # 以后用来做换气瓶等的实现
            pass

        video_recog_left()
        video_recog_right()
        self.pre_action = self.action
        self.pre_action_video = self.action_video

    def data_read(self):
        pass

    def displayMessage(self, message):
        self.ui.textBrowser.append(message)
Ejemplo n.º 28
0
def inference_mean_exemplar(
    model,
    current_epoch,
    current_iter,
    local_rank,
    data_loader,
    dataset_name,
    device="cuda",
    max_instance=3200,
    mute=False,
):
    model.train(False)
    # convert to a torch.device for efficiency
    device = torch.device(device)
    if not mute:
        logger = logging.getLogger("maskrcnn_benchmark.inference")
        logger.info("Start evaluation")
    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()
    torch.cuda.empty_cache()
    if not mute:
        pbar = tqdm(total=len(data_loader), desc="Validation in progress")
    with torch.no_grad():
        all_pred_obj, all_truth_obj, all_pred_attr, all_truth_attr = [], [], [], []
        obj_loss_all, attr_loss_all = 0, 0
        cnt = 0
        for iteration, out_dict in enumerate(data_loader):
            if type(max_instance) is int:
                if iteration == max_instance // model.cfg.EXTERNAL.BATCH_SIZE:
                    break
            if type(max_instance) is float:
                if iteration > max_instance * len(
                        data_loader) // model.cfg.EXTERNAL.BATCH_SIZE:
                    break
            # print(iteration)
            images = torch.stack(out_dict['images'])
            obj_labels = torch.cat(out_dict['object_labels'], -1)
            attr_labels = torch.cat(out_dict['attribute_labels'], -1)
            cropped_image = torch.stack(out_dict['cropped_image'])

            images = images.to(device)
            obj_labels = obj_labels.to(device)
            attr_labels = attr_labels.to(device)

            cropped_image = cropped_image.to(device)
            # loss_dict = model(images, targets)
            pred_obj = model.mean_of_exemplar_classify(cropped_image)

            all_pred_obj.extend(to_list(pred_obj))
            all_truth_obj.extend(to_list(obj_labels))
            cnt += 1
            if not mute:
                pbar.update(1)

        obj_f1 = f1_score(all_truth_obj, all_pred_obj, average='micro')
        #attr_f1 = f1_score(all_truth_attr, all_pred_attr, average='micro')
        obj_loss_all /= (cnt + 1e-10)
    # wait for all processes to complete before measuring the time
    total_time = total_timer.toc()
    model.train(True)
    return obj_f1, 0, len(all_truth_obj)
Ejemplo n.º 29
0
class XioPlayVideo(QtGui.QWidget):
    '''这个类为主程序类
    '''
    def __init__(self):
        super(XioPlayVideo, self).__init__()
        self.ui = ui.Ui_Form()
        self.ui.setupUi(self)
        self.left_cam = cv2.VideoCapture('./videos/left_cam.mp4')  # 左摄像头
        self.right_cam = cv2.VideoCapture('./videos/right_cam.mp4')
        self.frame_left = None
        self.frame_right = None

        self.thread_video_receive = threading.Thread(
            target=self.video_receive_local)  # 该线程用来读取视频流
        self.thread_video_receive.start()
        self.thread_time = Timer('updatePlay()')  # 该线程用来每隔0.04秒在label上绘图
        self.connect(self.thread_time, QtCore.SIGNAL('updatePlay()'),
                     self.video_play)
        self.thread_time.start()
        self.thread_recog = Timer('updatePlay()',
                                  sleep_time=1)  # 该线程用来每隔一秒分析图像
        self.connect(self.thread_recog, QtCore.SIGNAL('updatePlay()'),
                     self.video_recog)
        self.thread_recog.start()
        self.thread_data = Timer('updatePlay()',
                                 sleep_time=1800)  # 该线程用来每隔半小时向数据库读取数据
        self.connect(self.thread_data, QtCore.SIGNAL('updatePlay()'),
                     self.data_read)
        self.thread_data.start()
        self.thread_tcp = None  # 该线程用来完成tcp,未写完

    def video_receive_local(self,
                            cam1='./videos/left_cam.mp4',
                            cam2='./videos/right_cam.mp4',
                            time_flag=True):
        '''该方法用来接收本地视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :param time_flag: 是否休眠,本地视频为True
        :return: None
        '''
        if self.left_cam.isOpened() is False:
            self.left_cam = cv2.VideoCapture(cam1)
        if self.right_cam.isOpened() is False:
            self.right_cam = cv2.VideoCapture(cam2)
        ret_1, frame_1 = self.left_cam.read()
        ret_2, frame_2 = self.right_cam.read()
        while True:
            self.frame_left = frame_1
            self.frame_right = frame_2
            if ret_1 is False:
                self.left_cam = cv2.VideoCapture(cam1)
            if ret_2 is False:
                self.right_cam = cv2.VideoCapture(cam2)
            ret_1, frame_1 = self.left_cam.read()
            ret_1, frame_2 = self.right_cam.read()
            if time_flag is True:
                time.sleep(0.04)

    def video_receive_rstp(self, cam1='rstp:', cam2='rstp:'):
        '''该方法用来接收网络视频
        :param cam1: 左摄像头数据源
        :param cam2: 右摄像头数据源
        :return: None
        '''
        self.video_receive_local(cam1=cam1, cam2=cam2, time_flag=False)

    def video_play(self):
        '''该方法用来播放视频
        :return: None
        '''
        def label_show_left(frame, label=self.ui.label):  # 左控件label播放
            height, width, _ = frame.shape
            frame_change = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resize = cv2.resize(frame_change, (500, 300),
                                      interpolation=cv2.INTER_AREA)
            image = QtGui.QImage(frame_resize.data, frame_resize.shape[1],
                                 frame_resize.shape[0],
                                 QtGui.QImage.Format_RGB888)  # 处理成QImage
            label.setPixmap(QtGui.QPixmap.fromImage(image))

        def label_show_right(frame, label=self.ui.label_2):  # 右空间Lable播放
            label_show_left(frame, label)

        if self.frame_left is not None:
            label_show_left(self.frame_left)
        if self.frame_right is not None:
            label_show_right(self.frame_right)

    def video_recog(self):
        pass

    def data_read(self):
        pass
Ejemplo n.º 30
0
def pretext_train(args, recorder):
    if args.gpus is not None:
        print("Use GPU: {} for pretext training".format(args.gpus))
    num_class, data_length, image_tmpl = pt_data_config(args)
    # print("tp_length is: ", data_length)
    train_transforms, test_transforms, eval_transforms = pt_augmentation_config(
        args)
    train_loader, val_loader, eval_loader, train_samples, val_samples, eval_samples = pt_data_loader_init(
        args, data_length, image_tmpl, train_transforms, test_transforms,
        eval_transforms)

    n_data = len(train_loader)

    model, model_ema = pt_model_config(args, num_class)
    # == optim config==
    contrast, criterion, optimizer = pt_optim_init(args, model, n_data)
    model = model.cuda()
    # == load weights ==
    model, model_ema = pt_load_weight(args, model, model_ema, optimizer,
                                      contrast)
    if args.pt_method in ['dsm', 'moco']:
        model_ema = model_ema.cuda()
        # copy weights from `model' to `model_ema'
        moment_update(model, model_ema, 0)
    cudnn.benchmark = True
    # optionally resume from a checkpoint
    args.start_epoch = 1

    # ==================================== our data augmentation method=================================
    if args.pt_method in ['dsm', 'dsm_triplet']:
        pos_aug = GenPositive()
        neg_aug = GenNegative()

    # =======================================add message =====================
    recorder.record_message('a', '=' * 100)
    recorder.record_message('a', '-' * 40 + 'pretrain' + '-' * 40)
    recorder.record_message('a', '=' * 100)
    # ====================update lr_decay from str to numpy=========
    iterations = args.pt_lr_decay_epochs.split(',')
    args.pt_lr_decay_epochs = list([])
    for it in iterations:
        args.pt_lr_decay_epochs.append(int(it))
    timer = Timer()
    # routine
    print('*' * 70 + 'Step1: pretrain' + '*' * 20 + '*' * 50)
    for epoch in range(args.pt_start_epoch, args.pt_epochs + 1):
        timer.tic()
        pt_adjust_learning_rate(epoch, args, optimizer)
        print("==> training...")

        time1 = time.time()
        if args.pt_method == "moco":
            loss, prob = train_moco(epoch, train_loader, model, model_ema,
                                    contrast, criterion, optimizer, args,
                                    recorder)
        elif args.pt_method == "dsm":
            loss, prob = train_dsm(epoch, train_loader, model, model_ema,
                                   contrast, criterion, optimizer, args,
                                   pos_aug, neg_aug, recorder)
        # loss, prob = epoch * 0.01, 0.02*epoch
        elif args.pt_method == "dsm_triplet":
            loss = train_dsm_triplet(epoch, train_loader, model, optimizer,
                                     args, pos_aug, neg_aug, recorder)
        else:
            Exception("Not support method now!")
        recorder.record_pt_train(loss)
        time2 = time.time()
        print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))

        timer.toc()
        left_time = timer.average_time * (args.pt_epochs - epoch)
        message = "Step1: pretrain now loss is: {} left time is : {} now is: {}".format(
            loss, timer.format(left_time), datetime.now())
        print(message)
        recorder.record_message('a', message)
        state = {
            'opt': args,
            'model': model.state_dict(),
            'contrast': contrast.state_dict(),
            'optimizer': optimizer.state_dict(),
            'epoch': epoch,
        }
        recorder.save_pt_model(args, state, epoch)
    print("finished pretrain, the trained model is record in: {}".format(
        recorder.pt_checkpoint))
    return recorder.pt_checkpoint