Exemple #1
0
def predict(one_case_path, modality):
    data_source = ds.Dataset(one_case_path, modality)
    params = param.dict_supper_parameters
    list_label_volume_data = []
    for roi_name in params.keys():
        print('Predicting %s' % (roi_name))
        result_label_volume_data = predictor.predict(roi_name, data_source)
        list_label_volume_data.append([roi_name, result_label_volume_data])
    list_dicom_dataset = data_source.get_dicom_dataset_list()
    roi_list = dcmio.get_roi_list(list_dicom_dataset, list_label_volume_data)
    result_roi_list = []
    roi_nums = len(roi_list)
    for i in range(roi_nums):
        roi = roi_list[i]
        roi_name = roi[0]
        contour_list = roi[5]
        result_contour_list = []
        contour_nums = len(contour_list)
        for j in range(contour_nums):
            contour = contour_list[j]
            point_list = contour[1]
            result_point_list = []
            point_nums = len(point_list)
            # if point_nums < 3:
            #     continue;
            for k in range(point_nums):
                point = point_list[k]
                result_point = [point[0,0], point[0,1], point[0,2]]
                result_point_list.append(result_point)
            result_contour_list.append(result_point_list)
        result_roi_list.append([roi_name, result_contour_list])
    return result_roi_list
Exemple #2
0
    def test_atributos_eh_consistente_ds_cheio(self):
        '''Verifica se o atributo *_atributos* de Dataset eh consistente 
		quando *_dados* é não vazio'''
        dados = np.zeros(shape=(5, 4))
        ds = dm.Dataset(dados)
        #o vetor de colunas é convertido para lista para ser possível a comparação
        self.assertEquals(
            pd.DataFrame(dados).columns.tolist(), ds.get_atributos())
Exemple #3
0
    def test_atualiza_atributos(self):
        '''Verifica se o método atualiza_atributos atualiza os _atributos *_atributos*,
		*_natributos* e *_ninstancias* quando *_dados* é modificado'''
        ds = dm.Dataset()
        dados = np.zeros(shape=(5, 4))
        ds.set_dados(dados)
        ds.atualiza_atributos()
        #o vetor de colunas é convertido para lista para ser possível a comparação
        self.assertEquals(
            pd.DataFrame(dados).columns.tolist(), ds.get_atributos())
        self.assertEquals(4, ds.get_natributos())
        self.assertEquals(5, ds.get_ninstancias())
Exemple #4
0
def load_dataset(category, path, validation_percentage, test_percentage):
    dt = dd.Dataset(path, validation_percentage, test_percentage)
    dt.split()
    if category == 'train':
        train_dataset = dt.get_train()
        return train_dataset
    elif category == 'validation':
        validation_dataset = dt.get_validation()
        return validation_dataset
    else:
        test_dataset = dt.get_test()
        return test_dataset
Exemple #5
0
def readDataset(metaFile, images_dir):
    # Read metadata and split data in training and validation
    metadata = meta.Metadata(metaFile, dtype=None)
    trainingFilter = lambda df: df["Allele_Replicate"] <= 5
    validationFilter = lambda df: df["Allele_Replicate"] > 5
    metadata.splitMetadata(trainingFilter, validationFilter)
    # Create a dataset
    keyGen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r[
        "Metadata_Well"], r["Metadata_Site"])
    dataset = ds.Dataset(metadata, "Allele", CHANNELS, images_dir, keyGen)
    print(metadata.data.iloc[100])
    return dataset
Exemple #6
0
def compressBatch(args):
    plate, imgsDir, statsDir, outDir = args
    statsfile = statsDir + plate.data.iloc[0]["Metadata_Plate"] + ".pkl"
    stats = pickle.load(open(statsfile, "rb"))
    dataset = ds.Dataset(plate, "Allele", CHANNELS, imgsDir)
    compress = px.Compress(stats, CHANNELS, outDir)
    compress.setFormats(sourceFormat="tif", targetFormat="png")
    compress.setScalingFactor(1.0)
    compress.recomputePercentile(0.0001, side="lower")
    compress.recomputePercentile(0.9999, side="upper")
    compress.expected = dataset.numberOfRecords("all")
    dataset.scan(compress.processImage, frame="all")
Exemple #7
0
    def test_ler_csv_existente_carrega_dataset(self):
        '''Verifica se o método ler_csv le um arquivo csv existente e carrega 
		o atributo *_dados* corretamente'''
        dados = np.zeros(shape=(5, 4))
        df = pd.DataFrame(dados)
        df.columns = [str(i) for i in df.columns
                      ]  #os nomes dos _atributos tem que ser strings
        nomeArquivo = "teste.csv"
        df.to_csv(nomeArquivo)
        ds = dm.Dataset()
        ds.ler_csv(nomeArquivo)
        pdt.assert_frame_equal(
            df, ds.get_dados())  #verifica se os dois dataframes são iguais
        os.remove(nomeArquivo)
Exemple #8
0
def intensityStats(args):
    plate, root, outDir = args
    plateName = plate.data["Metadata_Plate"].iloc[0]
    dataset = ds.Dataset(plate, "Allele", CHANNELS, root)
    hist = px.ImageStatistics(BITS,
                              CHANNELS,
                              DOWN_SCALE_FACTOR,
                              MEDIAN_FILTER_SIZE,
                              name=plateName)
    hist.expected = dataset.numberOfRecords("all")
    dataset.scan(hist.processImage, frame="all")
    stats = hist.computeStats()
    outfile = outDir + plateName + ".pkl"
    with open(outfile, "wb") as output:
        pickle.dump(stats, output)
    return
Exemple #9
0
def main(arguments):
    data = dt.Dataset(dt.DATASET_FILE, dt.VOCAB_FILE)

    input_data = np.asarray([[0, 1], [1, 0], [0, 0], [0, 1]])
    input_shape = input_data[0].shape

    print(input_data.shape)
    intut = tf.keras.layers.Input(shape=input_shape)

    #dnc = tf.keras.layers.Dense(32, activation="relu")(intut)
    dnc = DenseDNC(2)(intut)

    model = tf.keras.models.Model(inputs=intut, outputs=dnc)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['mae'])
    model.summary()
Exemple #10
0
    def __init__(self):
        self.initial_weight      = cfg.EVAL.WEIGHT
        self.time                = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.moving_ave_decay    = cfg.CONTFUSE.MOVING_AVE_DECAY
        self.eval_logdir        = "./data/logs/eval"
        self.lidar_preprocessor  = preprocess.LidarPreprocessor()
        self.evalset             = dataset.Dataset(self.lidar_preprocessor, 'test')
        self.output_dir          = cfg.EVAL.OUTPUT_PRED_PATH
        self.img_anchors         = loader.load_anchors(cfg.IMAGE.ANCHORS )
        self.bev_anchors         = loader.load_anchors(cfg.BEV.ANCHORS)

        with tf.name_scope('model'):
            self.model               = contfuse_network.ContfuseNetwork()
            self.net                 = self.model.load()
            self.img_pred            = self.net['img_pred']
            self.bev_pred            = self.net['bev_pred']

        self.sess  = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        self.saver = tf.train.Saver()#ema_obj.variables_to_restore())
        self.saver.restore(self.sess, self.initial_weight)
    def __init__(self):
        self.initial_weight = cfg.EVAL.WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLOv2.MOVING_AVE_DECAY
        self.eval_logdir = "./data/logs/eval"
        self.evalset = dataset.Dataset('test')
        self.output_dir = cfg.EVAL.OUTPUT_PRED_PATH
        self.img_anchors = loader.load_anchors(cfg.IMG.ANCHORS)

        with tf.name_scope('model'):
            self.model = yolov2_network.YOLOv2Network()
            self.net = self.model.load()
            self.img_pred = self.net['img_pred']

        config = ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = InteractiveSession(config=config)
        self.saver = tf.train.Saver()  #ema_obj.variables_to_restore())
        self.saver.restore(self.sess, self.initial_weight)
        self.timer = timer.Timer()
Exemple #12
0
    def test_ninstancias_eh_consistente_ds_vazio(self):
        '''Verifica se o atributo *_ninstancias* de Dataset eh consistente 
		quando *_dados* é vazio'''
        ds = dm.Dataset()
        self.assertEquals(0, ds.get_ninstancias())
Exemple #13
0
def main(args):
    # s_ = time.time()
    print(torch.cuda.get_device_properties(device=0).total_memory)
    torch.cuda.empty_cache()
    print(args)
    save_dir = args.save_dir
    mkdir_if_missing(save_dir)
    num_txt = len(glob.glob(save_dir + "/*.txt"))
    sys.stdout = logging.Logger(
        os.path.join(save_dir, "log_" + str(num_txt) + ".txt"))
    display(args)
    start = 0

    model = models.create(args.net,
                          pretrained=args.pretrained,
                          dim=args.dim,
                          self_supervision_rot=args.self_supervision_rot)
    all_pretrained = glob.glob(save_dir + "/*.pth.tar")

    if (args.resume is None) or (len(all_pretrained) == 0):
        model_dict = model.state_dict()

    else:
        # resume model
        all_pretrained_epochs = sorted(
            [int(x.split("/")[-1][6:-8]) for x in all_pretrained])
        args.resume = os.path.join(
            save_dir, "ckp_ep" + str(all_pretrained_epochs[-1]) + ".pth.tar")
        print('load model from {}'.format(args.resume))
        chk_pt = load_checkpoint(args.resume)
        weight = chk_pt['state_dict']
        start = chk_pt['epoch']
        model.load_state_dict(weight)

    model = torch.nn.DataParallel(model)
    model = model.cuda()
    fake_centers_dir = os.path.join(args.save_dir, "fake_center.npy")

    if np.sum(["train_1.txt" in x
               for x in glob.glob(args.save_dir + "/**/*")]) == 0:
        if args.rot_only:
            create_fake_labels(None, None, args)

        else:
            data = dataset.Dataset(args.data,
                                   ratio=args.ratio,
                                   width=args.width,
                                   origin_width=args.origin_width,
                                   root=args.data_root,
                                   self_supervision_rot=0,
                                   mode="test",
                                   rot_bt=args.rot_bt,
                                   corruption=args.corruption,
                                   args=args)

            fake_train_loader = torch.utils.data.DataLoader(
                data.train,
                batch_size=100,
                shuffle=False,
                drop_last=False,
                pin_memory=True,
                num_workers=args.nThreads)

            train_feature, train_labels = extract_features(
                model,
                fake_train_loader,
                print_freq=1e5,
                metric=None,
                pool_feature=args.pool_feature,
                org_feature=True)

            create_fake_labels(train_feature, train_labels, args)

            del train_feature

            fake_centers = "k-means++"

            torch.cuda.empty_cache()

    elif os.path.exists(fake_centers_dir):
        fake_centers = np.load(fake_centers_dir)
    else:
        fake_centers = "k-means++"

    time.sleep(60)

    model.train()

    # freeze BN
    if (args.freeze_BN is True) and (args.pretrained):
        print(40 * '#', '\n BatchNorm frozen')
        model.apply(set_bn_eval)
    else:
        print(40 * '#', 'BatchNorm NOT frozen')

    # Fine-tune the model: the learning rate for pre-trained parameter is 1/10
    new_param_ids = set(map(id, model.module.classifier.parameters()))
    new_rot_param_ids = set()
    if args.self_supervision_rot:
        new_rot_param_ids = set(
            map(id, model.module.classifier_rot.parameters()))
        print(new_rot_param_ids)

    new_params = [
        p for p in model.module.parameters() if id(p) in new_param_ids
    ]

    new_rot_params = [
        p for p in model.module.parameters() if id(p) in new_rot_param_ids
    ]

    base_params = [
        p for p in model.module.parameters()
        if (id(p) not in new_param_ids) and (id(p) not in new_rot_param_ids)
    ]

    param_groups = [{
        'params': base_params
    }, {
        'params': new_params
    }, {
        'params': new_rot_params,
        'lr': args.rot_lr
    }]

    print('initial model is save at %s' % save_dir)

    optimizer = torch.optim.Adam(param_groups,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    criterion = losses.create(args.loss,
                              margin=args.margin,
                              alpha=args.alpha,
                              beta=args.beta,
                              base=args.loss_base).cuda()

    data = dataset.Dataset(args.data,
                           ratio=args.ratio,
                           width=args.width,
                           origin_width=args.origin_width,
                           root=args.save_dir,
                           self_supervision_rot=args.self_supervision_rot,
                           rot_bt=args.rot_bt,
                           corruption=1,
                           args=args)
    train_loader = torch.utils.data.DataLoader(
        data.train,
        batch_size=args.batch_size,
        sampler=FastRandomIdentitySampler(data.train,
                                          num_instances=args.num_instances),
        drop_last=True,
        pin_memory=True,
        num_workers=args.nThreads)

    # save the train information

    for epoch in range(start, args.epochs):

        train(epoch=epoch,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              train_loader=train_loader,
              args=args)

        if (epoch + 1) % args.save_step == 0 or epoch == 0:
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint({
                'state_dict': state_dict,
                'epoch': (epoch + 1),
            },
                            is_best=False,
                            fpath=osp.join(
                                args.save_dir,
                                'ckp_ep' + str(epoch + 1) + '.pth.tar'))

        if ((epoch + 1) % args.up_step == 0) and (not args.rot_only):
            # rewrite train_1.txt file
            data = dataset.Dataset(args.data,
                                   ratio=args.ratio,
                                   width=args.width,
                                   origin_width=args.origin_width,
                                   root=args.data_root,
                                   self_supervision_rot=0,
                                   mode="test",
                                   rot_bt=args.rot_bt,
                                   corruption=args.corruption,
                                   args=args)
            fake_train_loader = torch.utils.data.DataLoader(
                data.train,
                batch_size=args.batch_size,
                shuffle=False,
                drop_last=False,
                pin_memory=True,
                num_workers=args.nThreads)
            train_feature, train_labels = extract_features(
                model,
                fake_train_loader,
                print_freq=1e5,
                metric=None,
                pool_feature=args.pool_feature,
                org_feature=(args.dim % 64 != 0))
            fake_centers = create_fake_labels(train_feature,
                                              train_labels,
                                              args,
                                              init_centers=fake_centers)
            del train_feature
            torch.cuda.empty_cache()
            time.sleep(60)
            np.save(fake_centers_dir, fake_centers)
            # reload data
            data = dataset.Dataset(
                args.data,
                ratio=args.ratio,
                width=args.width,
                origin_width=args.origin_width,
                root=args.save_dir,
                self_supervision_rot=args.self_supervision_rot,
                rot_bt=args.rot_bt,
                corruption=1,
                args=args)

            train_loader = torch.utils.data.DataLoader(
                data.train,
                batch_size=args.batch_size,
                sampler=FastRandomIdentitySampler(
                    data.train, num_instances=args.num_instances),
                drop_last=True,
                pin_memory=True,
                num_workers=args.nThreads)

            # test on testing data
            # extract_recalls(data=args.data, data_root=args.data_root, width=args.width, net=args.net, checkpoint=None,
            #         dim=args.dim, batch_size=args.batch_size, nThreads=args.nThreads, pool_feature=args.pool_feature,
            #         gallery_eq_query=args.gallery_eq_query, model=model)
            model.train()
            if (args.freeze_BN is True) and (args.pretrained):
                print(40 * '#', '\n BatchNorm frozen')
                model.apply(set_bn_eval)
Exemple #14
0
    # 参数
    opt = config.MobileNetV3Config()

    # 设置路径--保存训练产生的数据
    date = time.strftime("%Y-%m-%d", time.localtime())
    save_path = os.path.join(opt.checkpoints_path, date)  # 保存的文件夹路径
    os.makedirs(save_path, exist_ok=True)
    log_filename = os.path.join(save_path, 'Console_Log.txt')  # 日志路径

    # 验证集
    identity_list = dataset.get_lfw_list(opt.lfw_test_list)
    lfw_img_paths = [os.path.join(opt.lfw_root, each) for each in identity_list]  # 所有图片的路径

    # 读取训练数据集
    train_dataset = dataset.Dataset(opt.train_root, opt.path_split, phase='train', input_shape=opt.input_shape)
    trainloader = data.DataLoader(train_dataset,
                                  batch_size=opt.train_batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers)

    opt.num_classes = len(train_dataset.classes)  # 分类数量
    epoch_iters = len(trainloader)  # 每个epoch里iter总个数

    criterion = focal_loss.FocalLoss(gamma=2)
    metric_fc = metrics.ArcMarginProduct(opt.embedding, opt.num_classes, s=64, m=0.5, easy_margin=opt.easy_margin)

    # 加载模型
    model = mobileNetV3_MixNet.MobileNetV3_MixNet(n_class=opt.embedding, input_size=opt.input_shape[2], dropout=opt.dropout_rate)
    model.to(device)
    model = DataParallel(model)
import numpy as np
import cv2
from data import dataset
from data import preprocess
from utils import vis_tools
from tqdm import tqdm


def project_fusionmap_to_img(img, fusionmap, down_ratio):
    points = fusionmap[fusionmap[..., 0] > 0]
    new_size = (int(img.shape[1] / down_ratio), int(img.shape[0] / down_ratio))
    img = cv2.resize(img, new_size)
    for p in points:
        img[p[1]][p[0]] = 1.0
    vis_tools.imshow_image(img)
    vis_tools.imshow_image(fusionmap[..., 0].astype(np.float32))


if __name__ == "__main__":
    lidar_preprocessor = preprocess.LidarPreprocessor()
    trainset = dataset.Dataset(lidar_preprocessor, 'train')
    pbar = tqdm(trainset)
    for data in pbar:
        img = data[1][0]
        # print(img)
        mapping1x = data[2][0]
        mapping2x = data[3][0]
        mapping4x = data[4][0]
        mapping8x = data[5][0]
        project_fusionmap_to_img(img, mapping1x, 1)
        # vis_tools.imshow_image(img)
Exemple #16
0
def Model2Feature(data,
                  net,
                  checkpoint,
                  dim=512,
                  width=224,
                  root=None,
                  nThreads=16,
                  batch_size=100,
                  pool_feature=False,
                  model=None,
                  org_feature=False,
                  args=None):
    dataset_name = data
    if model is None:
        model = models.create(net, dim=dim, pretrained=False)
        resume = checkpoint
        model.load_state_dict(resume['state_dict'], strict=False)
        model = torch.nn.DataParallel(model).cuda()
    data = dataset.Dataset(data,
                           width=width,
                           root=root,
                           mode="test",
                           self_supervision_rot=0,
                           args=args)

    if dataset_name in ['shop', 'jd_test', 'cifar']:
        gallery_loader = torch.utils.data.DataLoader(data.gallery,
                                                     batch_size=batch_size,
                                                     shuffle=False,
                                                     drop_last=False,
                                                     pin_memory=True,
                                                     num_workers=nThreads)

        query_loader = torch.utils.data.DataLoader(data.query,
                                                   batch_size=batch_size,
                                                   shuffle=False,
                                                   drop_last=False,
                                                   pin_memory=True,
                                                   num_workers=nThreads)

        gallery_feature, gallery_labels = extract_features(
            model,
            gallery_loader,
            print_freq=1e5,
            metric=None,
            pool_feature=pool_feature,
            org_feature=org_feature)
        query_feature, query_labels = extract_features(
            model,
            query_loader,
            print_freq=1e5,
            metric=None,
            pool_feature=pool_feature,
            org_feature=org_feature)
        if org_feature:
            norm = query_feature.norm(dim=1, p=2, keepdim=True)
            query_feature = query_feature.div(norm.expand_as(query_feature))
            print("feature normalized 1")
            norm = gallery_feature.norm(dim=1, p=2, keepdim=True)
            gallery_feature = gallery_feature.div(
                norm.expand_as(gallery_feature))
            print("feature normalized 2")
    else:
        data_loader = torch.utils.data.DataLoader(data.gallery,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  drop_last=False,
                                                  pin_memory=True,
                                                  num_workers=nThreads)
        features, labels = extract_features(model,
                                            data_loader,
                                            print_freq=1e5,
                                            metric=None,
                                            pool_feature=pool_feature,
                                            org_feature=org_feature)
        if org_feature:
            norm = features.norm(dim=1, p=2, keepdim=True)
            features = features.div(norm.expand_as(features))
            print("feature normalized")
        gallery_feature, gallery_labels = query_feature, query_labels = features, labels
    return gallery_feature, gallery_labels, query_feature, query_labels
    def __init__(self):
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FRIST_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.PRETRAIN_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLOv2.MOVING_AVE_DECAY
        self.train_logdir = "./data/log/train"
        self.trainset = dataset.Dataset('train')
        self.valset = dataset.Dataset('val')
        self.steps_per_period = len(self.trainset)
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = InteractiveSession(config=config)
        self.timer = timer.Timer()
        # self.sess                = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

        with tf.name_scope('model'):
            self.model = yolov2_network.YOLOv2Network()
            self.net = self.model.load()
            self.net_var = tf.global_variables()
            self.loss = self.net["yolov2_loss"]

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in ["yolov2_headnet"]:
                    self.first_stage_trainable_var_list.append(var)
            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("yolov2_loss", self.net["yolov2_loss"])
            tf.summary.scalar("img_obj_loss", self.net["img_obj_loss"])
            tf.summary.scalar("img_cls_loss", self.net["img_cls_loss"])
            tf.summary.scalar("img_bbox_loss", self.net["img_bbox_loss"])
            logdir = "../logs/tensorboard"
            if os.path.exists(logdir):
                shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
        img_pred_dir = cfg.YOLOv2.LOG_DIR + "/pred/img_pred/"
        if os.path.exists(img_pred_dir):
            shutil.rmtree(img_pred_dir)
        os.mkdir(img_pred_dir)
    prob = objness * clsness
    cls_max_prob = np.max(prob, axis=-1)
    cls_idx = np.argmax(prob, axis=-1)
    bbox[..., :2] = bbox[..., :2]
    bbox[..., 2:4] = bbox[..., 2:4]
    x = (bbox[..., 0] + bbox[..., -4]) * cfg.IMG.STRIDE / cfg.IMG.H_SCALE_RATIO
    y = (bbox[..., 1] + bbox[..., -3]) * cfg.IMG.STRIDE / cfg.IMG.W_SCALE_RATIO
    h = bbox[..., 2] / cfg.IMG.H_SCALE_RATIO * bbox[:, -2]
    w = bbox[..., 3] / cfg.IMG.W_SCALE_RATIO * bbox[:, -1]
    left = y - w / 2
    top = x - h / 2
    right = y + w / 2
    bottom = x + h / 2
    result = np.stack([cls_idx, cls_max_prob, left, top, right, bottom],
                      axis=-1)
    return result[cls_max_prob > 0.3]


trainset = dataset.Dataset('train')
img_anchors = trainset.img_anchors
img_dir = os.path.join(cfg.YOLOv2.DATASETS_DIR, "image_files/")

for j in range(len(trainset)):
    data = trainset.load()
    # vis_tools.imshow_img(data[0][0].astype(np.float32))
    shape = [cfg.IMG.OUTPUT_H, cfg.IMG.OUTPUT_W, cfg.IMG.LABEL_Z]
    label = np.reshape(data[1][0], shape)
    imglabel = parse_img_labelmap(label, img_anchors)
    img_bboxes = postprocess.img_nms(imglabel, cfg.IMG.IOU_THRESHOLDS)
    vis_tools.imshow_img_bbox(data[0][0].astype(np.float32), img_bboxes)
Exemple #19
0
    def test_ler_csv_nao_existente(self):
        '''Verifica se o método ler_csv retorna Falso quando o recebe o 
		nome de um arquivo csv inexiste'''
        ds = dm.Dataset()
        self.assertFalse(ds.ler_csv("blah.csv"))
Exemple #20
0
 def test_remover_atributo_existente(self):
     '''Verifica se o método remover_atributo remove um atributo existente'''
     dados = np.zeros(shape=(5, 4))
     ds = dm.Dataset(dados)
     ds.remover_atributos([1])
     self.assertFalse(1 in ds.get_atributos())
Exemple #21
0
 def test_remover_atributo_inexistente(self):
     '''Verifica se o método remover_atributo avisa sobre um atributo inexistente'''
     dados = np.zeros(shape=(5, 4))
     ds = dm.Dataset(dados)
     self.assertFalse(ds.remover_atributos([8]))
Exemple #22
0
 def test_dados_eh_dataframe(self):
     '''Verifica se o atributo *_dados* é do tipo pandas.DataFrame'''
     ds = dm.Dataset()
     self.assertIsInstance(ds.get_dados(), pd.DataFrame)
Exemple #23
0
def save_model(model, save_path, name, iter_cnt):
    save_name = os.path.join(save_path, name + '_' + str(iter_cnt) + '.pth')
    torch.save(model.state_dict(), save_name)
    return save_name


if __name__ == '__main__':

    opt = config.Config()
    if opt.display:
        visualizer = Visualizer()
    device = torch.device("cuda")

    train_dataset = dataset.Dataset(
        opt.train_root,  # '/data/Datasets/webface/CASIA-maxpy-clean-crop-144/'
        opt.train_list,  # '/data/Datasets/webface/train_data_13938.txt'
        phase='train',
        input_shape=opt.input_shape  # (1, 128, 128)
    )
    trainloader = data.DataLoader(
        train_dataset,
        batch_size=opt.train_batch_size,  # 16
        shuffle=True,
        num_workers=opt.num_workers  # 4
    )

    identity_list = test.get_lfw_list(opt.lfw_test_list)
    img_paths = [os.path.join(opt.lfw_root, each) for each in identity_list]

    print('{} train iters per epoch:'.format(len(trainloader)))

    1 / 0
Exemple #24
0
    def test_ninstancias_eh_consistentes_ds_cheio(self):
        '''Verifica se o atributo *_ninstancias* de Dataset eh consistente 
		quando *_dados* é não vazio'''
        dados = np.zeros(shape=(5, 4))
        ds = dm.Dataset(dados)
        self.assertEquals(5, ds.get_ninstancias())