Esempio n. 1
0
def filterImageCallBack():
    size = tkinter.simpledialog.askinteger("Input",
                                           "Disk size",
                                           parent=root,
                                           minvalue=1,
                                           maxvalue=100)
    data.matrix = utils.mean_filter(data.matrix, size)
    data.frameA = utils.get_frame(data.matrix, 'A')
    data.frameB = utils.get_frame(data.matrix, 'B')
    img = Image.fromarray(data.matrix)
    data.image = img
    logScreen("Imagen filtrada.")
    previewImageCallBack(1)
Esempio n. 2
0
def boundary(img, dst):
    w, h = img.shape
    ns_m = mean_filter(img, size_t=5, dst=dst)
    ns_b = binary_filter(ns_m, percent=0.9, dst=dst)

    x0, y0 = start_point(ns_b)
    b, c = np.array([x0, y0]), np.array([x0 - 1, y0])
    seq1 = np.zeros((2 * (w + h), 2), np.uint)
    seq1[0, :], t = b, c
    while ns_b[c[0], c[1]] != 255:
        t = c
        c = rotate_from(b, t)
    seq1[1, :] = c
    b, c = c, t

    step = 2
    while (b[0] != x0) or (b[1] != y0):
        while ns_b[c[0], c[1]] != 255:
            t = c
            c = rotate_from(b, t)
        seq1[step, :] = c
        b, c = c, t
        step += 1

    bound = np.zeros((w, h), np.uint8)
    for k in range(2 * (w + h)):
        p0 = seq1[k, :]
        bound[p0[0], p0[1]] = 255
        if ns_b[p0[0], p0[1]] != 255:
            break

    granularity = 25.0
    g_rows = np.floor(w / granularity)
    g_cols = np.floor(h / granularity)
    Gmax = np.zeros((w, h), np.uint8)
    Gmin = np.zeros((int(g_rows), int(g_cols)), np.uint8)
    for i in range(w):
        for j in range(h):
            if bound[i, j] == 255:
                grid_x = np.floor(g_rows * i / float(w))
                grid_y = np.floor(g_cols * j / float(h))
                Gmax_x = np.floor(grid_x / g_rows * float(w))
                Gmax_y = np.floor(grid_y / g_cols * float(h))
                Gmin[int(grid_x), int(grid_y)] = 255
                Gmax[int(Gmax_x), int(Gmax_y)] = 255

    if dst:
        cv2.imwrite(dst % 'noisy_outline', bound)
        cv2.imwrite(dst % 'noisy_Gmin', Gmin)
        cv2.imwrite(dst % 'noisy_Gmax', Gmax)
    return Gmax
Esempio n. 3
0
def getThermalImage(matrix, satellite_NOAA="19", frame_name="A"):
    temp_min = 1000
    temp_max = 0
    if satellite_NOAA == "19":
        satellite = sat.NOAA_19()
    elif satellite_NOAA == "18":
        satellite = sat.NOAA_18()
    elif satellite_NOAA == "15":
        satellite = sat.NOAA_15()
    matrix = cal.calibrate(utils.mean_filter(matrix, 2), frame_name)
    #matrix = utils.mean_filter(matrix,2)
    frame = utils.get_frame(matrix, frame_name)
    mayor_frame = tlmtry.get_mayor_frame(matrix, frame_name)
    Cs = tlmtry.compute_CS(matrix, frame_name)
    print("Cs:", Cs)
    print("mayor frame:", mayor_frame)
    thermal_matrix = get_temp_3A(frame, mayor_frame, satellite, Cs)
    imgt = Image.fromarray(thermal_matrix)
    imgt = np.array(imgt)
    numrows, numcols = imgt.shape
    return thermal_matrix
Esempio n. 4
0
img_list_impulsive_neavf = []
index = 0
for img in img_list_impulsive:
    img_list_impulsive_median.append(
        utils.median_filter(img_list_impulsive[index]))
    img_list_impulsive_neavf.append(NEAVF.NEAVF(img_list_impulsive[index]))
    index += 1
# utils.show_image_list(img_list_impulsive_median, "Image with salt and pepper after median filter")
# utils.show_image_list(img_list_impulsive_neavf, "Image with salt and pepper after NEAVF filter")

img_list_gauss = img_list_gauss + img_list_gauss_005 + img_list_gauss_015
img_list_gauss_mean = []
img_list_gauss_neavf = []
index = 0
for img in img_list_gauss:
    img_list_gauss_mean.append(utils.mean_filter(img_list_gauss[index]))
    img_list_gauss_neavf.append(NEAVF.NEAVF(img_list_gauss[index]))
    index += 1
# utils.show_image_list(img_list_gauss_mean, "Image with gauss after mean filter!")
# utils.show_image_list(img_list_gauss_neavf, "Image with gauss after NEAVF filter!")

# Noise measure
psnr_gauss = []
psnr_impulsive = []
psnr_NEAVF_impulsive = []
psnr_NEAVF_gauss = []
mae_gauss = []
mae_impulsive = []
mae_NEAVF_impulsive = []
mae_NEAVF_gauss = []
img_list.append(img_list[0])
Esempio n. 5
0
matrix = apt.decode('wav/am_demod/sample.wav', cache=True)

satellite_NOAA = "15"

if satellite_NOAA == "19":
    satellite = sat.NOAA_19()
elif satellite_NOAA == "18":
    satellite = sat.NOAA_18()
elif satellite_NOAA == "15":
    satellite = sat.NOAA_15()
'''
Normalize image with Telemetry Frame
'''

frame = "A"
matrix_norm = cal.calibrate(utils.mean_filter(matrix, 2), frame_name="A")
'''
Display Telemetry Frames (comparing )
'''
frame = "A"
matrix_filtered = utils.mean_filter(matrix, 2)
telemetry_norm = tlmtry.get_frame(
    cal.calibrate(matrix_filtered, frame_name="A"), frame)
telemetry = tlmtry.get_frame(matrix_filtered, frame)

print "telemetry norm:", telemetry_norm
print "telemetry:", telemetry
tel_image = Image.fromarray(telemetry)
tel_image_norm = Image.fromarray(telemetry_norm)

frame_A = utils.get_frame(matrix_filtered, "A")
Esempio n. 6
0
def active(folderpath, Gmodel, Cmodel, dataset=None):
    #data path
    if dataset == None:
        testfpath = os.path.join(folderpath, "g100feature.csv")
        testlpath = os.path.join(folderpath, "g100label.csv")
        ufeaturepath = os.path.join(folderpath, "gufeature.csv")
        featurepath = os.path.join(folderpath, "r60feature.csv")
        labelpath = os.path.join(folderpath, "r60label.csv")
        feature = np.loadtxt(featurepath, delimiter="\t")
        label = np.loadtxt(labelpath, delimiter="\t")
        ufeature = np.loadtxt(ufeaturepath, delimiter="\t")
        """tfeature = np.loadtxt(testfpath, delimiter = "\t")
        tlabel = np.loadtxt(testlpath, delimiter = "\t")"""
        feature, tfeature, label, tlabel = train_test_split(feature,
                                                            label,
                                                            test_size=0.33)
    else:
        feature = dataset[0].data.float() / 255
        label = dataset[0].targets
        kepti = [
            i for i in range(len(label)) if (label[i] == 3 or label[i] == 8)
        ]
        feature = feature[kepti]
        label = label[kepti]
        knownfeature, unknownfeature, knownlabel, unknownlabel = map(
            torch.from_numpy,
            train_test_split(feature.numpy(), label.numpy(), test_size=0.999))
        tfeature = dataset[1].data.float() / 255
        tlabel = dataset[1].targets
        tkepti = [
            i for i in range(len(tlabel)) if (tlabel[i] == 3 or tlabel[i] == 8)
        ]
        tfeature = tfeature[tkepti]
        tlabel = tlabel[tkepti]
        ufeature = feature

    if Cmodel == "end_to_end":
        Gscaler = MinMaxScaler()
        ufeature_sc = Gscaler.fit_transform(
            ufeature)  #using large unlabeled data to normalize
        feature_sc = Gscaler.transform(feature)
        tfeature_sc = Gscaler.transform(tfeature)
        Gmo = ENDmodel(feature_sc, label, args)
        Gmo.train()
        Gmo.module.eval()
        with torch.no_grad():
            feature_sc = torch.tensor(feature_sc,
                                      dtype=torch.float32).to(device)
            recon, mu, _, _ = Gmo.module(feature_sc)
            print("train recon error")
            utils.recon_error(Gscaler.inverse_transform(feature_sc),
                              Gscaler.inverse_transform(recon))
            tfeature_sc = torch.tensor(tfeature_sc,
                                       dtype=torch.float32).to(device)
            trecon, tmu, _, _ = Gmo.module(tfeature_sc)
            print("test recon error")
            utils.recon_error(Gscaler.inverse_transform(tfeature_sc),
                              Gscaler.inverse_transform(trecon))
            ufeature_sc = torch.tensor(ufeature_sc,
                                       dtype=torch.float32).to(device)
            urecon, umu, _, _ = Gmo.module(ufeature_sc)
            print("unlabeled recon error")
            utils.recon_error(Gscaler.inverse_transform(ufeature_sc),
                              Gscaler.inverse_transform(urecon))
            _, _, _, ans = Gmo.module(feature_sc)
            ans = np.array(ans.detach())
            print(
                "train classify acc",
                sum([label[i] == round(ans[i][0])
                     for i in range(len(label))]) / len(ans))
            _, _, _, tans = Gmo.module(tfeature_sc)
            tans = np.array(tans.detach())
            t_acc = sum(
                [tlabel[i] == round(tans[i][0])
                 for i in range(len(tlabel))]) / len(tans)
            print("test classify acc", t_acc)
        return t_acc

    if Cmodel == "non_vae_end_to_end":
        Gscaler = MinMaxScaler()
        ufeature_sc = Gscaler.fit_transform(
            ufeature)  #using large unlabeled data to normalize
        feature_sc = Gscaler.transform(feature)
        tfeature_sc = Gscaler.transform(tfeature)
        Gmo = nENDmodel(feature_sc, label, args)
        Gmo.train()
        Gmo.module.eval()
        with torch.no_grad():
            feature_sc = torch.tensor(feature_sc,
                                      dtype=torch.float32).to(device)
            ans = Gmo.module(feature_sc)
            ans = np.array(ans.detach())
            print(
                "train classify acc",
                sum([label[i] == round(ans[i][0])
                     for i in range(len(label))]) / len(ans))
            tfeature_sc = torch.tensor(tfeature_sc,
                                       dtype=torch.float32).to(device)
            tans = Gmo.module(tfeature_sc)
            tans = np.array(tans.detach())
            t_acc = sum(
                [tlabel[i] == round(tans[i][0])
                 for i in range(len(tlabel))]) / len(tans)
            print("test classify acc", t_acc)
        return t_acc

    if Gmodel == "pca":
        Gmo = PCA(9)
        usvmfeature = Gmo.fit_transform(ufeature)
        svmfeature = Gmo.transform(feature)
        tsvmfeature = Gmo.transform(tfeature)
        recon = Gmo.inverse_transform(svmfeature)
        trecon = Gmo.inverse_transform(tsvmfeature)
        urecon = Gmo.inverse_transform(usvmfeature)
        print("train")
        utils.recon_error(feature, recon)

        #return utils.recon_error(feature,recon)

        print("test")
        utils.recon_error(tfeature, trecon)
        print("unlabel")
        utils.recon_error(ufeature, urecon)

    if Gmodel == "ae":
        Gscaler = MinMaxScaler()
        ufeature_sc = Gscaler.fit_transform(
            ufeature)  #using large unlabeled data to normalize
        feature_sc = Gscaler.transform(feature)
        tfeature_sc = Gscaler.transform(tfeature)
        Gmo = AEmodel(ufeature_sc, args)
        Gmo.train()
        Gmo.module.eval()
        with torch.no_grad():
            feature_sc = torch.tensor(feature_sc,
                                      dtype=torch.float32).to(device)
            recon, mu = Gmo.module(feature_sc)
            print("train recon error")
            utils.recon_error(Gscaler.inverse_transform(feature_sc),
                              Gscaler.inverse_transform(recon))

            #return utils.recon_error(Gscaler.inverse_transform(feature_sc), Gscaler.inverse_transform(recon))

            tfeature_sc = torch.tensor(tfeature_sc,
                                       dtype=torch.float32).to(device)
            trecon, tmu = Gmo.module(tfeature_sc)
            print("test recon error")
            utils.recon_error(Gscaler.inverse_transform(tfeature_sc),
                              Gscaler.inverse_transform(trecon))
            ufeature_sc = torch.tensor(ufeature_sc,
                                       dtype=torch.float32).to(device)
            urecon, umu = Gmo.module(ufeature_sc)
            print("unlabeled recon error")
            utils.recon_error(Gscaler.inverse_transform(ufeature_sc),
                              Gscaler.inverse_transform(urecon))
        svmfeature = mu
        tsvmfeature = tmu
        usvmfeature = umu
        """#看AE每个variable都表示什么
        d=20
        a=mu[d]
        c=np.array(Gscaler.inverse_transform(feature_sc)[d],dtype=np.uint8)
        b=5
        delta=np.ones(a.shape)
        img=np.tile(np.reshape(c,(-1,1)),50)
        cv2.imshow("i",img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        for i in range(10):
            delta[b]+=2*i
            with torch.no_grad():
                tes=torch.tensor(np.multiply(a,delta),dtype=torch.float32).to(device)
                img=np.tile(np.reshape(Gmo.module.decode(tes).detach().numpy(),(-1,1)),50)
            cv2.imshow("i",img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()"""

    if Gmodel == "vae":
        Gscaler = MinMaxScaler()
        ufeature_sc = Gscaler.fit_transform(
            ufeature)  #using large unlabeled data to normalize
        feature_sc = Gscaler.transform(feature)
        tfeature_sc = Gscaler.transform(tfeature)
        Gmo = VAEmodel(ufeature_sc, args)
        Gmo.train()
        Gmo.module.eval()
        with torch.no_grad():
            feature_sc = torch.tensor(feature_sc,
                                      dtype=torch.float32).to(device)
            recon, mu, _ = Gmo.module(feature_sc)
            print("train recon error")
            train_recon = utils.recon_error(
                Gscaler.inverse_transform(feature_sc),
                Gscaler.inverse_transform(recon))

            #return train_recon

            tfeature_sc = torch.tensor(tfeature_sc,
                                       dtype=torch.float32).to(device)
            trecon, tmu, _ = Gmo.module(tfeature_sc)
            print("test recon error")
            utils.recon_error(Gscaler.inverse_transform(tfeature_sc),
                              Gscaler.inverse_transform(trecon))
            ufeature_sc = torch.tensor(ufeature_sc,
                                       dtype=torch.float32).to(device)
            urecon, umu, _ = Gmo.module(ufeature_sc)
            print("unlabeled recon error")
            utils.recon_error(Gscaler.inverse_transform(ufeature_sc),
                              Gscaler.inverse_transform(urecon))
        svmfeature = mu
        tsvmfeature = tmu
        usvmfeature = umu
        """#看VAE每个variable都表示什么
        d=20
        a=mu[d]
        c=np.array(Gscaler.inverse_transform(feature_sc)[d],dtype=np.uint8)
        b=5
        delta=np.ones(a.shape)
        img=np.tile(np.reshape(c,(-1,1)),50)
        cv2.imshow("i",img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        for i in range(10):
            delta[b]+=2*i
            with torch.no_grad():
                tes=torch.tensor(np.multiply(a,delta),dtype=torch.float32).to(device)
                img=np.tile(np.reshape(Gmo.module.decode(tes).detach().numpy(),(-1,1)),50)
            cv2.imshow("i",img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()"""

    if Gmodel == "cnnae":
        ufeature_sc = ufeature
        knownfeature_sc = knownfeature
        unknownfeature_sc = unknownfeature
        tfeature_sc = tfeature
        Gmo = CNNAEmodel(ufeature_sc)
        if args.trained == False:
            Gmo.train()
            Gmo.save()
        else:
            Gmo.load()
        Gmo.module.eval()
        with torch.no_grad():
            knownfeature_sc = knownfeature_sc.float().to(device).view(
                knownfeature_sc.size(0), 1, knownfeature_sc.size(1),
                knownfeature_sc.size(2))
            knownrecon, knownmu = Gmo.module(knownfeature_sc)
            unknownfeature_sc = unknownfeature_sc.float().to(device).view(
                unknownfeature_sc.size(0), 1, unknownfeature_sc.size(1),
                unknownfeature_sc.size(2))
            unknownrecon, unknownmu = Gmo.module(unknownfeature_sc)
            """for i in range(5):
                img=to_img(recon[i])
                save_image(img,"reconstructed_pics/train"+str(i)+".png")"""
            tfeature_sc = tfeature_sc.float().to(device).view(
                tfeature_sc.size(0), 1, tfeature_sc.size(1),
                tfeature_sc.size(2))
            trecon, tmu = Gmo.module(tfeature_sc)
            ufeature_sc = ufeature_sc.float().to(device).view(
                ufeature_sc.size(0), 1, ufeature_sc.size(1),
                ufeature_sc.size(2))
            urecon, umu = Gmo.module(ufeature_sc)
        knownsvmfeature = knownmu
        unknownsvmfeature = unknownmu
        tsvmfeature = tmu
        usvmfeature = umu

    elif Gmodel == "gmm":
        Gmo = GMMmodel(visualization=0)
        svmfeature = Gmo.module.encode(feature)
        recon = Gmo.module.decode(svmfeature)
        tsvmfeature = Gmo.module.encode(tfeature)
        trecon = Gmo.module.decode(tsvmfeature)
        print("train")
        utils.recon_error(feature, recon)

        #return utils.recon_error(feature,recon)

        print('test')
        utils.recon_error(tfeature, trecon)

        ans = Gmo.module.rule_classication(svmfeature)
        tans = Gmo.module.rule_classication(tsvmfeature)
        #print([label[i] == ans[i] for i in range(len(label))])
        acc = sum([label[i] == ans[i]
                   for i in range(len(label))]) / ans.shape[0]
        tacc = sum([tlabel[i] == tans[i]
                    for i in range(len(tlabel))]) / tans.shape[0]
        print("rule based train acc: ", acc)
        print("rule based test acc: ", tacc)
        ans = utils.derivative_filter(feature)
        tans = utils.derivative_filter(tfeature)
        mans = utils.mean_filter(feature)
        mtans = utils.mean_filter(tfeature)
        acc = sum([label[i] == mans[i] for i in range(len(label))]) / len(mans)
        tacc = sum([tlabel[i] == mtans[i]
                    for i in range(len(tlabel))]) / len(mtans)
        print("mean rule based train acc: ", acc)
        print("mean rule based test acc: ", tacc)
        acc = sum([label[i] == ans[1][i]
                   for i in range(len(label))]) / len(ans[1])
        tacc = sum([tlabel[i] == tans[1][i]
                    for i in range(len(tlabel))]) / len(tans[1])
        print("2nd derivative rule based train acc: ", acc)
        print("2nd derivative rule based test acc: ", tacc)

    elif Gmodel == "bigan":
        pass

    #compute the cov matrix
    """np.set_printoptions(precision=3)
    plt.imshow(np.cov(svmfeature,rowvar=False))
    plt.show()"""

    #scaling
    svmscaler = StandardScaler()
    svmscaler.fit(
        usvmfeature)  #should be using large unlabeled data to normalize
    knownsvmfeature_sc = svmscaler.transform(knownsvmfeature)
    unknownsvmfeature_sc = svmscaler.transform(unknownsvmfeature)
    tsvmfeature_sc = svmscaler.transform(tsvmfeature)

    if Cmodel == "linear_svm":
        classifier = SVC(kernel="linear")
        classifier.fit(knownsvmfeature_sc, knownlabel.numpy())
        print(classifier.coef_)
        print("train score: ",
              classifier.score(knownsvmfeature_sc, knownlabel.numpy()),
              "test score: ", classifier.score(tsvmfeature_sc, tlabel))

    if Cmodel == "rbf_svm":
        classifier = SVC()
        classifier.fit(knownsvmfeature_sc, knownlabel.numpy())
        print("train score: ",
              classifier.score(knownsvmfeature_sc, knownlabel.numpy()),
              "test score: ", classifier.score(tsvmfeature_sc, tlabel.numpy()))
        return classifier.score(tsvmfeature_sc, tlabel.numpy())

    if Cmodel == "decision_tree":
        classifier = DecisionTreeClassifier()
        classifier.fit(knownsvmfeature_sc, knownlabel.numpy())
        print("train score: ",
              classifier.score(knownsvmfeature_sc, knownlabel.numpy()),
              "test score: ", classifier.score(tsvmfeature_sc, tlabel.numpy()))
        return classifier.score(tsvmfeature_sc, tlabel.numpy())

    if Cmodel == "random_forest":
        classifier = RandomForestClassifier()
        classifier.fit(X=knownsvmfeature_sc, y=knownlabel.numpy())
        print("train score: ",
              classifier.score(knownsvmfeature_sc, knownlabel.numpy()),
              "test score: ", classifier.score(tsvmfeature_sc, tlabel.numpy()))
        return classifier.score(tsvmfeature_sc, tlabel.numpy())

    rclassifier = copy.deepcopy(classifier)
    knownlabel = np.reshape(knownlabel, (-1, 1))
    knownlabeleddata = np.hstack((knownsvmfeature_sc, knownlabel))
    utils.save_in_train_all(knownlabeleddata, os.path.join(folderpath, Gmodel))
    utils.save_in_random(knownlabeleddata, os.path.join(folderpath, Gmodel))
    """traindata = np.loadtxt(os.path.join(folderpath, Gmodel) + "/knowndata.txt")
    feature_sc=traindata[:,:-1]
    label=traindata[:,-1]"""

    expert_batch_num = 10
    iter_num = 10
    tscore = [classifier.score(tsvmfeature_sc, tlabel)]
    rscore = [rclassifier.score(tsvmfeature_sc, tlabel)]
    with open(os.path.join(os.path.join(folderpath, Gmodel), "activepic.pkl"),
              "wb") as f:
        activepic = np.zeros((iter_num, expert_batch_num, 28, 28))
        pickle.dump(activepic, f)
    for i in range(iter_num):
        samp = sample(os.path.join(folderpath, Gmodel), classifier,
                      expert_batch_num)  #add data in generated.txt
        expert_label(os.path.join(folderpath, Gmodel), expert_batch_num,
                     svmscaler, Gmo, i)  #put data in knowndata.txt
        random_add(os.path.join(folderpath, Gmodel), expert_batch_num,
                   unknownsvmfeature_sc, unknownlabel)
        knowndata = np.loadtxt(
            os.path.join(os.path.join(folderpath, Gmodel), "knowndata.txt"))
        randomdata = np.loadtxt(
            os.path.join(os.path.join(folderpath, Gmodel), "randomdata.txt"))
        tscore.append(
            train_classifier(classifier, knowndata, tsvmfeature_sc, tlabel))
        rscore.append(
            train_classifier(rclassifier, randomdata, tsvmfeature_sc, tlabel))
    print("active learning score", tscore)
    print("random choose score", rscore)
    x = np.arange(iter_num + 1) * expert_batch_num + knownfeature.size(0)
    plt.plot(x, tscore, label="active learning accuracy")
    plt.plot(x, rscore, label="random pic accuracy")
    plt.xlabel('data amount')
    plt.ylabel('accuracy')
    plt.savefig(
        os.path.join(os.path.join(folderpath, Gmodel),
                     "compare_with_random.png"))

    #save
    with open(os.path.join(os.path.join(folderpath, Gmodel), 'linear_svm.pkl'),
              'wb') as fw:
        pickle.dump((classifier, svmscaler), fw)

    #load
    with open(os.path.join(os.path.join(folderpath, Gmodel), 'linear_svm.pkl'),
              'rb') as fr:
        classifier_n, svmscaler_n = pickle.load(fr)
        tsvmfeature_sc_n = svmscaler_n.transform(tsvmfeature)
        print("testing set score", classifier_n.score(tsvmfeature_sc_n,
                                                      tlabel))

    with open(os.path.join(os.path.join(folderpath, Gmodel), "activepic.pkl"),
              "rb") as f:
        activepic = pickle.load(f)
        bigpic = np.zeros((activepic.shape[0] * 28, activepic.shape[1] * 28))
        for i in range(activepic.shape[0]):
            for j in range(activepic.shape[1]):
                bigpic[i * 28:i * 28 + 28, j * 28:j * 28 + 28] = activepic[i,
                                                                           j]
        activeprocess = torch.from_numpy(bigpic)
        activeprocess.clamp(0, 1)
        save_image(
            activeprocess,
            os.path.join(os.path.join(folderpath, Gmodel),
                         "active_process.png"))
        """cv2.normalize(bigpic, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
        cv2.imwrite(os.path.join(os.path.join(folderpath, Gmodel), "active_process.png"),bigpic)"""
        """im = Image.fromarray(bigpic)
        im.save(os.path.join(os.path.join(folderpath, Gmodel), "active_process.png"))"""

    return classifier
    """#preparing
    label = np.reshape(label, (-1,1))
    labeleddata = np.hstack((svmfeature_sc, label))
    utils.save_in_train_all(labeleddata, 0, os.path.join(folderpath, Gmodel))
    svm = SGDClassifier(max_iter = 10000)

    #begin training process step1
    traindata = np.loadtxt(os.path.join(folderpath, Gmodel) + "/train/balancing.txt")

    feature_sc=traindata[:,:-1]
    label=traindata[:,-1]
    svm.partial_fit(feature_sc,label,classes=[0,1])
    print("coef", svm.coef_)
    pred=svm.predict(feature_sc)
    truelabel=pred==label
    for i in range(len(truelabel)):
        if truelabel[i]==0:
            print("data(from 0)",i,feature_sc[i])
            fea=svmscaler.inverse_transform(feature_sc[i])
            image=utils.toimage(fea[0:3],fea[3:6],fea[6:9])
            cv2.imshow("generated",image)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
    testf=feature_sc
    testl=label
    print("the latest train dataset score",svm.score(testf,testl))

    train_svm(svm,None,traindata)
    dissum=0
    for feature in ufeature_sc:
        dissum+=(utils.decision_distance(svm,feature))
    dismean=dissum/len(ufeature_sc)"""
    """#begin training process step2
Esempio n. 7
0
import cv2
import os
from utils import normalize, sharpen, mean_filter
from utils.laplacian import laplace_operation
from utils.sobel import sobel_operation


if __name__ == '__main__':

    fn = "./img/skeleton_orig.tif"
    if not os.path.isfile(fn):
        raise Exception('__invalid_input_image__')

    enh_laps = laplace_operation(fp=fn, dst="./img/laplace.tif")
    enh_sob = sobel_operation(fp=fn, dst="./img/sobel.tif")

    srp_laplace = sharpen(fp=fn, dst="./img/sharpen_laps.tif", enhancement=enh_laps)
    srp_sobel = sharpen(fp=fn, dst="./img/sharpen_sob.tif", enhancement=enh_sob)

    avg_filtered = mean_filter(enh_sob, size=5)
    cv2.imwrite("./img/avg_flt5.tif", normalize(avg_filtered))

    enh_mask = srp_laplace * avg_filtered
    cv2.imwrite("./img/enh_mask.tif", normalize(enh_mask))
    srp_mask = sharpen(fp=fn, dst="./img/sharpen_mask.tif", enhancement=enh_mask)

    srp_gamma = normalize(srp_mask ** 0.5)
    cv2.imwrite("./img/sharpen_gamma.tif", srp_gamma)