Ejemplo n.º 1
0
Archivo: azure.py Proyecto: gomes-/alx
    def print_list(self):
        try:
            rows = self.tbl_row_query(self.tbl_name, "PartitionKey eq 'net'")
            #print(dir(row))

            from colorclass import Color, Windows

            from terminaltables import AsciiTable

            Windows.enable(auto_colors=True, reset_atexit=True)

            table_data = [[Color('{autocyan}Hostname'),
                           Color('{autocyan}Last Reply'),
                           Color('{autocyan}IP'),
                           Color('{autocyan}OS'),
                           Color('{autocyan}OS Release'),
                           Color('{autocyan}OS Version'),
                           Color('{autocyan}System'),
                           Color('{autocyan}Processor'),
                           ]
                          ]

            max_wrap=10

            for row in rows:
                #d = self.entity2dict(row)
                d = row.__dict__
                time = alxlib.time_help.format_from_timestamp(d['Timestamp'])
                li = [d['hostname'], time, d['ip'], d['os'], d['osrelease'],
                      self.wrap_text(d['osversion'], max_wrap), d["system"],
                      self.wrap_text(d["processor"], max_wrap), ]
                table_data.append(li)

            table = AsciiTable(table_data)

            table.table_data = table_data

            print(table.table)
        except Exception as e:
            logging.warning("Error print_list")
            print(e)
Ejemplo n.º 2
0
def val(model, optimizer, dataloader, epoch, opt, val_logger, visualizer=None):
    labels = []
    sample_matrics = []
    for i, (images, targets) in enumerate(dataloader):
        labels += targets[:, 1].tolist()
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= opt.image_size

        outputs = model.forward(images)
        outputs = non_max_suppression(outputs, opt.conf_thres, opt.nms_thres)
        sample_matrics += get_batch_statistics(outputs,
                                               targets,
                                               iou_threshold=0.5)

        if visualizer is not None:
            vis.plot_current_visuals(images, outputs)

    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_matrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    # logging
    metric_table_data = [['Metrics', 'Value'], ['precision',
                                                precision.mean()],
                         ['recall', recall.mean()], ['f1', f1.mean()],
                         ['mAP', AP.mean()]]

    metric_table = AsciiTable(metric_table_data,
                              title='[Epoch {:d}/{:d}'.format(
                                  epoch, opt.num_epochs))
    print('{}\n\n\n'.format(metric_table.table))

    class_names = load_classe_names(opt.classname_path)
    for i, c in enumerate(ap_class):
        metric_table_data += [['AP-{}'.format(class_names[c]), AP[i]]]
    metric_table.table_data = metric_table_data
    val_logger.write('{}\n\n\n'.format(metric_table.table))
Ejemplo n.º 3
0
def val(model, optimizer, dataloader, epoch, opt, val_logger, best_mAP=0):
    labels = []
    sample_matrics = []
    if opt.gpu:
        model = model.to(opt.device)
    model.eval()

    for i, (images, targets) in enumerate(dataloader):
        labels += targets[:, 1].tolist()
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= opt.image_size

        batches_done = len(dataloader) * epoch + i
        if opt.gpu:
            images = Variable(images.to(opt.device))

        detections = model.forward(images)
        detections = non_max_suppression(detections, opt.conf_thresh,
                                         opt.nms_thresh)
        sample_matrics += get_batch_statistics(detections,
                                               targets.cpu(),
                                               iou_threshold=0.5)

    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_matrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    # logging
    metric_table_data = [['Metrics', 'Value'], ['precision',
                                                precision.mean()],
                         ['recall', recall.mean()], ['f1', f1.mean()],
                         ['mAP', AP.mean()]]

    metric_table = AsciiTable(metric_table_data,
                              title='[Epoch {:d}/{:d}'.format(
                                  epoch, opt.num_epochs))

    class_names = load_classe_names(opt.classname_path)
    for i, c in enumerate(ap_class):
        metric_table_data += [['AP-{}'.format(class_names[c]), AP[i]]]
    metric_table.table_data = metric_table_data
    val_logger.print_and_write('{}\n'.format(metric_table.table))

    if best_mAP < AP.mean():
        save_file_path = os.path.join(opt.checkpoint_path, 'best.pth')
        states = {
            'epoch': epoch + 1,
            'model': opt.model,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'best_mAP': best_mAP,
        }
        torch.save(states, save_file_path)
        best_mAP = AP.mean()

    print("current best mAP:" + str(best_mAP))

    return best_mAP