Ejemplo n.º 1
0
def get_eval(opt,net_path,args):
    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    load(net,net_path)
    #####################  Pruning Strategy Generation ###############
    compression_scheduler = distiller.file_config(
        net.get_compress_part(), net.optimizer, opt.compress_schedule_path
    )
    compression_scheduler = SetCompressionScheduler(
        compression_scheduler, args["channel_config"]
    )
    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=args["dummy_input"])
    flops_after, params_after = model_summary(net.get_compress_part(), args["dummy_input"])
    ratio = flops_after / args["flops_before"]
    net = net.to(args["device"])
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    with torch.no_grad():
        for index, sample in enumerate(args["dataloader_train"]):
            _ = net.get_loss(sample)
            if index > 100:
                break
    strategy_score = net.get_eval_scores(args["dataloader_val"])["accuracy"]
    del net
    return 1-strategy_score,ratio
Ejemplo n.º 2
0
    def filteredimage(self):
        im = self.img.copy()

        print "opening"
        for i in range(0, 12):
            im = GraphicsFilters.opening(im, 3)
            ArrayWriter(im.copy()).save("data/temp/t1_"+str(i)+".png")

        im = GraphicsFilters.deletecluster(im,100)
        ArrayWriter(im).save("data/temp/t2.png")

        print "opening"
        for i in range(0, 12):
            im = GraphicsFilters.opening(im, 3)
            ArrayWriter(im.copy()).save("data/temp/t3_"+str(i)+".png")

        print "closing"
        for i in range(0, 1):
            im = GraphicsFilters.closing(im, 5)
            ArrayWriter(im).save("data/temp/t4_"+str(i)+".png")

        im = GraphicsFilters.deletecluster(im,2000)
        ArrayWriter(im).save("data/temp/t5.png")

        print "thinning"
        im = thinning.thinning(im)

        return im
Ejemplo n.º 3
0
    def filteredimage(self):
        im = self.img.copy()

        print "opening"
        for i in range(0, 12):
            im = GraphicsFilters.opening(im, 3)
            ArrayWriter(im.copy()).save("data/temp/t1_" + str(i) + ".png")

        im = GraphicsFilters.deletecluster(im, 100)
        ArrayWriter(im).save("data/temp/t2.png")

        print "opening"
        for i in range(0, 12):
            im = GraphicsFilters.opening(im, 3)
            ArrayWriter(im.copy()).save("data/temp/t3_" + str(i) + ".png")

        print "closing"
        for i in range(0, 1):
            im = GraphicsFilters.closing(im, 5)
            ArrayWriter(im).save("data/temp/t4_" + str(i) + ".png")

        im = GraphicsFilters.deletecluster(im, 2000)
        ArrayWriter(im).save("data/temp/t5.png")

        print "thinning"
        im = thinning.thinning(im)

        return im
Ejemplo n.º 4
0
def get_edge(image, show=False):
    # Resize image
    old_height, old_width = image.shape
    new_width = WIDTH
    new_height = int((new_width * old_height) / old_width)
    resized_image = cv2.resize(image, (new_width, new_height))

    # Reduce image noise
    smoothened_image = cv2.bilateralFilter(resized_image, 5, 150, 30)

    # Normalise image
    normalised_image = cv2.normalize(smoothened_image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    # Get image edge
    grad_x = cv2.Sobel(normalised_image, cv2.CV_16S, 1, 0)
    grad_y = cv2.Sobel(normalised_image, cv2.CV_16S, 0, 1)
    sobel_x = cv2.convertScaleAbs(grad_x)
    sobel_y = cv2.convertScaleAbs(grad_y)
    edges = cv2.addWeighted(sobel_x, 0.5, sobel_y, 0.5, 0)

    # Convert edges to binary
    _, binary_edges = cv2.threshold(edges, cv2.mean(edges)[0], 255, cv2.THRESH_BINARY)

    # Thin binary edges
    thinned_edges = thinning(binary_edges)

    if show:
        cv2.imshow("binary edges", binary_edges)
        cv2.imshow("thinned", thinned_edges)
        cv2.waitKey()

    # Display process steps
    # cv2.imshow('original', resized_image)
    # cv2.imshow('blurred', smoothened_image)
    # cv2.imshow('normalised', normalised_image)
    # cv2.imshow('edges', edges)
    # cv2.imshow('binary', binary_edges)
    # cv2.imshow('thinned', thinned_edges)
    # cv2.waitKey()
    # cv2.destroyAllWindows()

    return smoothened_image, thinned_edges
Ejemplo n.º 5
0
def main(opt):
    # basic settings
    os.environ["CUDA_VISIBLE_DEVICES"]=str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = 'cuda'
        torch.backends.cudnn.benchmark = True
    else:
        device = 'cpu'
    ##################### Get Dataloader ####################
    dataloader_train, dataloader_val = custom_get_dataloaders(opt)
    # dummy_input is sample input of dataloaders
    if hasattr(dataloader_val, 'dataset'):
        dummy_input = dataloader_val.dataset.__getitem__(0)
        dummy_input = dummy_input[0]
        dummy_input = dummy_input.unsqueeze(0)
    else:
        # for imagenet dali loader
        dummy_input = torch.rand(1, 3, 224, 224)

    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    net.load_checkpoint(opt.checkpoint)
    flops_before, params_before = model_summary(net.get_compress_part(), dummy_input)

    #####################  Pruning Strategy Generation ###############
    compression_scheduler = distiller.file_config(net.get_compress_part(), net.optimizer, opt.compress_schedule_path)
    num_layer = len(compression_scheduler.policies[1])
    
    channel_config = get_pruning_strategy(opt, num_layer) # pruning strategy
    
    compression_scheduler = random_compression_scheduler(compression_scheduler, channel_config)

    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=dummy_input)
    print(net)

    flops_after, params_after = model_summary(net.get_compress_part(), dummy_input)
    ratio = flops_after / flops_before
    print('FLOPs ratio:', ratio)
    if ratio < opt.flops_target - 0.005 or ratio > opt.flops_target + 0.005:
        # illegal pruning strategy
        return
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    with torch.no_grad():
        for index, sample in enumerate(tqdm(dataloader_train, leave=False)):
            _ = net.get_loss(sample)
            if index > 50:
                break

    strategy_score = net.get_eval_scores(dataloader_val)['accuracy']

    #################### Save Pruning Strategy and Score #########
    log_file = open(opt.output_file, 'a+')
    log_file.write("{} {} ".format(strategy_score, ratio))

    for item in channel_config:
        log_file.write("{} ".format(str(item)))
    log_file.write('\n')
    log_file.close()
    print('Eval Score:{}'.format(strategy_score))
Ejemplo n.º 6
0
        off = 1
        if (fill == ones).any():
            i += 1
            continue
        if not (sh == ones).all():
            if not (sh2 == ones).all():
                i += 1
                continue
            sh = sh2
            off = 2
        if y > 16:
            print(roi_mask[y, (x + i):(x + i + FW)], fill, sh)
            print('FND', x + i, y)
        for j in range(off):
            for k in range(3):
                if (sh[k] == [255, 255, 255]).all():
                    roi_and[y + j + 1, (x + i + k)] = (0, 0, 255)
        i += 1

edges = cv2.Canny(cv2.resize(roi_and, (0, 0), fx=4.0, fy=4.0),
                  50,
                  150,
                  apertureSize=5)
#cv2.imshow('norm', closed) #cv2.resize(cv2.resize(img_rgb2, (0, 0), fx=0.5, fy=0.5), (0,0), fx=8.0, fy=8.0))
cv2.imshow('norm', thinning(cv2.resize(roi_mask, (0, 0), fx=2.0,
                                       fy=2.0)))  #, cv2.COLOR_BGR2GRAY)))
cv2.imshow(
    'norm2', img_dbg
)  # thinning(cv2.resize(closed, (0, 0), fx=4.0, fy=4.0))) #, cv2.COLOR_BGR2GRAY)))
ley = cv2.waitKey(0)
Ejemplo n.º 7
0
def main(opt):
    # basic settings
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = True
    else:
        device = "cpu"
    ##################### Get Dataloader ####################
    dataloader_train, dataloader_val = custom_get_dataloaders(opt)
    # dummy_input is sample input of dataloaders
    if hasattr(dataloader_val, "dataset"):
        dummy_input = dataloader_val.dataset.__getitem__(0)
        dummy_input = dummy_input[0]
        dummy_input = dummy_input.unsqueeze(0)
    else:
        # for imagenet dali loader
        dummy_input = torch.rand(1, 3, 224, 224)

    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    net.load_checkpoint(opt.checkpoint)
    flops_before, params_before = model_summary(net.get_compress_part(),
                                                dummy_input)

    #####################  Load Pruning Strategy ###############
    compression_scheduler = distiller.file_config(net.get_compress_part(),
                                                  net.optimizer,
                                                  opt.compress_schedule_path)

    channel_config = get_channel_config(opt.search_result,
                                        opt.strategy_id)  # pruning strategy

    compression_scheduler = random_compression_scheduler(
        compression_scheduler, channel_config)

    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=dummy_input)

    flops_after, params_after = model_summary(net.get_compress_part(),
                                              dummy_input)
    ratio = flops_after / flops_before
    print("FLOPs ratio:", ratio)
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    with torch.no_grad():
        for index, sample in enumerate(tqdm(dataloader_train, leave=False)):
            _ = net.get_loss(sample)
            if index > 100:
                break

    strategy_score = net.get_eval_scores(dataloader_val)["accuracy"]

    print("Result file:{}, Strategy ID:{}, Evaluation score:{}".format(
        opt.search_result, opt.strategy_id, strategy_score))

    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
        net.optimizer, opt.epoch)
    reporter = Reporter(opt)
    best_acc = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(
            net,
            dataloader_train,
            net.optimizer,
        )
        reporter.log_metric("train_loss", train_loss, epoch)

        lr_scheduler.step()

        scores = net.get_eval_scores(dataloader_val)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))

        reporter.log_metric("eval_acc", scores["accuracy"], epoch)

        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
        reporter.log_metric("best_acc", best_acc, epoch)

        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )

        print("==> Training epoch %d" % epoch)
Ejemplo n.º 8
0
                        flags[y,x] = 2
        flags[np.where(flags==2)] = 1
        return np.array(flags[1:-1,1:-1], dtype = np.uint8)

if __name__ == "__main__":

    filename = sys.argv[1]
    threshold = int(sys.argv[2])

    print("load data")
    reader = ImageReader(filename)
    ArrayWriter(reader.getrawarray()).save("data/0raw.png")

    print("threshhold = {0}".format(threshold))
    img = reader.get2tonearray(lambda x: x < threshold)
    TwotoneArrayWriter(img).save("data/1twotone.png")
    print("filtering")

    print(" opening")
    img = Morphology.opening(img, 2, 0, 4)

    print(" delete cluster")
    img = ClusterDetector.deletecluster(img,4000)
    TwotoneArrayWriter(img).save("data/temp/t3.png")

    print(" thinning")
    ans = thinning.thinning(img)
    TwotoneArrayWriter(ans).save("data/2filtered.png")


Ejemplo n.º 9
0
            i += 1
            continue
        color = roi_and[y,(x+i+2)]
        fill = roi_mask[y+1,(x+i):(x+i+FW)]
        sh = roi_mask[y+2,(x+i):(x+i+FW)]
        sh2 = roi_mask[y+3,(x+i):(x+i+FW)]
        off = 1
        if (fill == ones).any():
            i += 1
            continue
        if not (sh == ones).all():
            if not (sh2 == ones).all():
                i += 1
                continue
            sh = sh2
            off = 2
        if y > 16:
            print(roi_mask[y,(x+i):(x+i+FW)], fill, sh)
            print('FND', x+i, y)
        for j in range(off):
            for k in range(3):
                if (sh[k] == [255,255,255]).all():
                    roi_and[y+j+1,(x+i+k)] = (0,0,255)
        i += 1

edges = cv2.Canny(cv2.resize(roi_and, (0, 0), fx=4.0, fy=4.0),50,150,apertureSize = 5)
#cv2.imshow('norm', closed) #cv2.resize(cv2.resize(img_rgb2, (0, 0), fx=0.5, fy=0.5), (0,0), fx=8.0, fy=8.0))
cv2.imshow('norm', thinning(cv2.resize(roi_mask, (0, 0), fx=2.0, fy=2.0))) #, cv2.COLOR_BGR2GRAY)))
cv2.imshow('norm2', img_dbg) # thinning(cv2.resize(closed, (0, 0), fx=4.0, fy=4.0))) #, cv2.COLOR_BGR2GRAY)))
ley = cv2.waitKey(0)
Ejemplo n.º 10
0
def main(opt, channel_config, dataloader_train, dataloader_val, path):
    # basic settings
    torch.backends.cudnn.enabled = False
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = False
    else:
        device = "cpu"
    ##################### Get Dataloader ####################

    # dummy_input is sample input of dataloaders
    if hasattr(dataloader_val, "dataset"):
        dummy_input = dataloader_val.dataset.__getitem__(0)
        dummy_input = dummy_input[0]
        dummy_input = dummy_input.unsqueeze(0)
    else:
        # for imagenet dali loader
        dummy_input = torch.rand(1, 3, 224, 224)

    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    load(net, path)
    #net.load_checkpoint(opt.checkpoint)
    #####################  Load Pruning Strategy ###############
    compression_scheduler = distiller.file_config(net.get_compress_part(),
                                                  net.optimizer,
                                                  opt.compress_schedule_path)
    compression_scheduler = setCompressionScheduler(compression_scheduler,
                                                    channel_config)
    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=dummy_input)
    flops_after, params_after = model_summary(net.get_compress_part(),
                                              dummy_input)
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    t = tqdm(dataloader_train, leave=False)
    with torch.no_grad():
        for index, sample in enumerate(t):
            _ = net.get_loss(sample)
            if index > 100:
                break
    strategy_score = net.get_eval_scores(dataloader_val)["accuracy"]
    old = strategy_score
    print("Evaluation score:{}".format(strategy_score))
    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(net.optimizer,
                                                        100,
                                                        eta_min=5e-5)
    #lr_scheduler=optim.lr_scheduler.StepLR(net.optimizer,5,0.9)
    reporter = Reporter(opt)
    best_acc = strategy_score
    best_kappa = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        net.confusion_matrix.reset()
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(
            net,
            dataloader_train,
            net.optimizer,
        )
        reporter.log_metric("train_loss", train_loss, epoch)
        lr_scheduler.step()
        scores = net.get_eval_scores(dataloader_val)
        kappa = CaluKappa(net.confusion_matrix)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))
        reporter.log_metric("eval_acc", scores["accuracy"], epoch)
        reporter.log_metric("kappa", kappa, epoch)
        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
            best_kappa = kappa
            save_checkpoints(
                scores["accuracy"],
                net._net,
                reporter,
                opt.exp_name,
                epoch,
            )
        reporter.log_metric("best_acc", best_acc, epoch)
        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )
        print("==> Training epoch %d" % epoch)
    """将模型转换为torch script保存"""
    ckpt_name = "{}_best.pth".format(opt.exp_name)
    load(net, os.path.join(reporter.ckpt_log_dir, ckpt_name))
    net._net.eval()
    traced_script_module = torch.jit.trace(net._net,
                                           torch.rand(1, 3, 256, 256))
    traced_script_module.save(os.path.join(reporter.log_dir, "model.pt"))
    del net
    return old, best_acc, best_kappa, flops_after, params_after