Esempio n. 1
0
 def __getitem__(self, index):
     ##set path
     abs_path = self.path + self.list[index] + '/' + self.list[index] + '_'
     ##read ground truth
     if (self.read_label == True):
         gt_path = abs_path + 'seg.nii.gz'
         gt = nib.load(gt_path)
         gt = gt.get_fdata()
     ##read image and normalize
     if (self.read_image == True):
         feat = nib.load(abs_path + type1[0] + '.nii.gz')
         feat = feat.get_fdata()
         feat = np.expand_dims(feat, axis=0)
         feat = normalize(feat)
         for i in range(1, 4):
             feat1 = nib.load(abs_path + type1[i] + '.nii.gz')
             feat1 = feat1.get_fdata()
             feat1 = normalize(feat1)
             feat1 = np.expand_dims(feat1, axis=0)
             feat = np.concatenate((feat, feat1), axis=0)
         feat = torch.tensor(feat).type('torch.FloatTensor')
     if (self.read_image == False):
         return gt
     elif (self.read_label == False):
         return feat
     else:
         return feat, gt
Esempio n. 2
0
def f_Distance(img0_hsv, img0_grad, img1_hsv, img1_grad):
    a = 0.9
    output0_hsv,output1_hsv = net(Variable(img0_hsv),Variable(img1_hsv))
    output0_grad,output1_grad = net(Variable(img0_grad),Variable(img1_grad))
    output0 = torch.cat((a * output0_hsv, (1-a) * output0_grad), 1)
    output1 = torch.cat((a * output1_hsv, (1-a) * output1_grad), 1)
                
    euclidean_distance = F.pairwise_distance(output0, output1)
    cos_distance = np.linalg.norm(normalize(output0.data.numpy()[0]) - normalize(output1.data.numpy()[0]))
    distance  = cos_distance * euclidean_distance
    return distance
Esempio n. 3
0
def test_normalize():
    x = np.random.rand(1000, 10, 10) * 255
    x_int = x.astype(np.int)
    x_norm = train.normalize(x_int)
    epsilon = 0.01
    assert x_norm.min() < epsilon
    assert x_norm.max() > 1 - epsilon
Esempio n. 4
0
def collect(path, mean, std):
    img = io.imread('./images/' + path + '.bmp')
    hist = exposure.histogram(img)
    th = get_threshold('./images/' + path + '.bmp')
    img_binary = (img < th).astype(np.double)
    img_label = label(img_binary, background=0)
    regions = regionprops(img_label)
    boxes = []
    features = []
    for props in regions:
        box = []
        minr, minc, maxr, maxc = props.bbox
        if maxc - minc < 10 or maxr - minr < 10 or maxc - minc > 120 or maxr - minr > 120:
            continue
        box.append(minr)
        box.append(maxr)
        box.append(minc)
        box.append(maxc)
        boxes.append(box)

        roi = img_binary[minr:maxr, minc:maxc]
        m = moments(roi)
        cr = m[0, 1] / m[0, 0]
        cc = m[1, 0] / m[0, 0]
        mu = moments_central(roi, cr, cc)
        nu = moments_normalized(mu)
        hu = moments_hu(nu)
        features.append(hu)

    feature_arr = normalize(features, mean, std)
    return (boxes, feature_arr)
def classify(image, face_model, svm_model):
    try:
        image = detect_face(image)
        image = np.expand_dims(image, 0)
        image_embedding = get_embedded_data(face_model, image)
        image_normalized = normalize(image_embedding)

        prediction = svm_model.predict(image_normalized)
        pred_proba = svm_model.predict_proba(image_normalized)
        prob = pred_proba[0][prediction[0]]

        label_encode = LabelEncoder()
        label_encode.classes_ = np.load(os.path.join('model', 'classes.npy'))
        prediction = label_encode.inverse_transform(prediction)
        prediction = prediction[0]

        if prob > 0.5:
            prediction = "I know her! It's " + prediction + "!"
        elif prob > 0.40:
            prediction = "I'm not really sure, is it " + prediction + "?"
        else:
            prediction = "I don't know that is. Expand my database, maybe?"

    except Exception as e:
        prediction = "I can't see her face. Try another photo"
    return prediction
Esempio n. 6
0
def predict(mileage):
    """Predict the price given the mileage and the weights."""
    if os.path.exists('weights.csv'):
        print("OUIIIIIII")
        w = np.loadtxt('weights.csv')
    else:
        w = np.zeros(2)
    mileage = normalize(mileage)
    return w[0] + w[1] * float(mileage)
Esempio n. 7
0
def linear_plot(scatter=False):
    X_datas, y_datas = load_datas("data.csv")
    w = np.loadtxt('weights.csv')
    X = np.array(range(60000, 250000, 500))
    X_norm = normalize(X)
    y = w[0] + w[1] * X_norm
    print(X)
    print(y)
    if scatter is True:
        plt.scatter(X_datas, y_datas)
    plt.plot(X, y)
    plt.show()
def main():
    df = load_data('dataset')
    _scales = normalize(df)
    optimize(df)
Esempio n. 9
0
# Adj Close   mean 1482.116 min 676.53 max 2930.75

# mean = 1482.116
# min = 676.53
# max = 2930.75

mean = 0
min = 0
max = 1

model = load_model("v1.0/m12.h5")

train = readTrain()
train_Aug = augFeatures(train)
train_norm = normalize(train_Aug)
X_train, Y_train = buildTrain(train_norm, 3, 1)
X_train, Y_train, X_val, Y_val = splitData(X_train, Y_train, 0.1)

prediction = model.predict(X_val)

print(Y_val.shape, prediction.shape)

for i in range(len(prediction)):
    print(prediction[i] * (max - min) + mean, Y_val[i] * (max - min) + mean)

count1 = 0
for i in range(len(prediction) - 1):
    if np.sign(prediction[i + 1] - Y_val[i]) == np.sign(Y_val[i + 1] -
                                                        Y_val[i]):
        count1 += 1
def get_encode(face_encoder, face, size):
    face = normalize(face)
    face = cv2.resize(face, size)
    encode = face_encoder.predict(np.expand_dims(face, axis=0))[0]
    return encode
Esempio n. 11
0
    test_path_list = glob.glob(settings["test_dir"] + "/*.jpg")
    save_score_path = os.path.join(settings["save_root"],
                                   settings["csv_name"] + ".csv")
    # 使用したい重みのパスを入れる
    weight_epoch = int(
        os.path.basename(settings["g_weight_path"]).replace(
            "generator_", "").replace(".h5", "")) + 1
    print("使用するGeneratorの重みは {} ".format(settings["g_weight_path"]))
    print()
    print("使用するDiscriminatorの重みは {}".format(settings["d_weight_path"]))

    # データ・セットの用意
    X_test, y_test = load_data(test_path_list)

    # データの正規化
    X_test = normalize(X_test)
    input_shape = X_test[0].shape

    # dcganを用意する
    dcgan = DCGAN(settings["input_dim"], input_shape)
    dcgan.load_weights(d_weight=settings["d_weight_path"],
                       g_weight=settings["g_weight_path"])

    # 保存先rootフォルダの指定

    start = time.time()
    # AnoGANインスタンスの生成
    anogan = ANOGAN(settings["input_dim"], dcgan.g)
    anogan.compile(anogan_optim)

    with open(save_score_path, "w") as f:
Esempio n. 12
0
import matplotlib.pyplot as plt
import joblib

if __name__ == '__main__':
    input_dir = 'play'

    images = load_images(input_dir)
    cropped_images = list()

    for i in range(len(images)):
        cropped_images.append(detect_face(images[i]))

    face_model = load_model(os.path.join('model', 'facenet_keras.h5'))
    cropped_images = get_embedded_data(face_model, cropped_images)

    cropped_images = normalize(cropped_images)

    model = joblib.load(os.path.join('model', 'svm_model.sav'))

    pred_test = model.predict(cropped_images)
    pred_proba = model.predict_proba(cropped_images)

    label_encode = LabelEncoder()
    label_encode.classes_ = np.load(os.path.join('model', 'classes.npy'))
    predicted_names = label_encode.inverse_transform(pred_test)

    for i, image in enumerate(images):
        # plt.figure()
        plt.imshow(image)
        plt.title("Predicted: " + predicted_names[i] + " with " +
                  str(round(pred_proba[i][pred_test[i]] * 100, 2)) +
Esempio n. 13
0
from dcgan import DCGAN
from anogan import ANOGAN
import numpy as np
from keras.optimizers import Adam
from data_loader import load_cucumber
from train import normalize, denormalize


if __name__ == '__main__':
    iterations = 100
    input_dim = 30
    anogan_optim = Adam(lr=0.001, amsgrad=True)

    ### 0. prepare data
    X_train, X_test, y_test = load_cucumber()
    X_train = normalize(X_train)
    X_test = normalize(X_test)
    input_shape = X_train[0].shape

    ### 1. train generator & discriminator
    dcgan = DCGAN(input_dim, input_shape)
    dcgan.load_weights('weights/generator_3999.h5', 'weights/discriminator_3999.h5')

    for i, test_img in enumerate(X_test):
        test_img = test_img[np.newaxis,:,:,:]
        anogan = ANOGAN(input_dim, dcgan.g)
        anogan.compile(anogan_optim)
        anomaly_score, generated_img = anogan.compute_anomaly_score(test_img, iterations)
        generated_img = denormalize(generated_img)
        imgs = np.concatenate((denormalize(test_img[0]), generated_img[0]), axis=1)
        cv2.imwrite('predict' + os.sep + str(int(anomaly_score)) + '_' + str(i) + '.png', imgs)