Exemplo n.º 1
0
def main():
    args = get_args()
    depth = args.depth
    k = args.width
    weight_file = args.weight_file

    # load model and weights    
    img_size = 299
    batch_size = 32
    model = get_model(model_name="Xception")
    
    model.load_weights(weight_file)
    dataset_root = Path(__file__).parent.joinpath("GRP-AUD", "GRP-AUD-release")
    validation_image_dir = dataset_root.joinpath("test")
    gt_valid_path = dataset_root.joinpath("gt_avg_test.csv")
    image_paths = list(validation_image_dir.glob("*_face.jpg"))

    faces = np.empty((batch_size, img_size, img_size, 3))
    ages = []
    image_names = []

    for i, image_path in tqdm(enumerate(image_paths)):
        faces[i % batch_size] = cv2.resize(cv2.imread(str(image_path), 1), (img_size, img_size))
        image_names.append(image_path.name[:-9])

        if (i + 1) % batch_size == 0 or i == len(image_paths) - 1:
            results = model.predict(faces)
            ages_out = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results[1].dot(ages_out).flatten()
            ages += list(predicted_ages)
            
    print(len(ages))
    print(len(image_names))
    name2age = {image_names[i]: ages[i] for i in range(len(image_names))}
    df = pd.read_csv(str(gt_valid_path))
    appa_abs_error = 0.0
    real_abs_error = 0.0
    epsilon_error = 0.0
    count1 = 0
    count2 = 0
    iter = 0

    for i, row in df.iterrows():
        #iter += 1
        difference1 = name2age[row.file_name] - row.apparent_age_avg
        difference2 = name2age[row.file_name] - row.real_age
        appa_abs_error += abs(difference1)
        real_abs_error += abs(difference2)
        epsilon_error += error(name2age[row.file_name], row.apparent_age_avg, 0.3)
        ''''if int(difference1) == 0:
            count1 += 1
        if int(difference2) == 0:
            count2 += 1
        if iter < 5:
            print("Predicted age: {}".format(name2age[row.file_name]))'''
    print("MAE Apparent: {}".format(appa_abs_error / len(image_names)))
    print("MAE Real: {}".format(real_abs_error / len(image_names)))
    print("epsilon-error: {}".format(epsilon_error / len(image_names)))
Exemplo n.º 2
0
def plot_accuracy():
    acc_traces = mod2.get_model()[1]
    fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
    fig.suptitle('Horizontally stacked subplots')
    ax1.plot(acc_traces['nb_epochs'], acc_traces['accuracy_train'])
    ax1.plot(acc_traces['nb_epochs'], acc_traces['accuracy_test'])
    ax2.plot(acc_traces['nb_epochs'], acc_traces['loss_train'])
    ax2.plot(acc_traces['nb_epochs'], acc_traces['loss_test'])
    ax3.plot(acc_traces['nb_epochs'], acc_traces['tain_on_test'])
Exemplo n.º 3
0
def main():
    args = get_args()
    model_name = args.model_name
    weight_file = args.weight_file
    margin = args.margin
    image_dir = args.image_dir

    # for face detection
    detector = dlib.get_frontal_face_detector()

    # load model and weights
    model = get_model(model_name=model_name)
    model.load_weights(weight_file)
    img_size = model.input.shape.as_list()[1]

    image_generator = yield_images_from_dir(
        image_dir) if image_dir else yield_images()

    for img in image_generator:
        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        detected = detector(input_img, 1)
        faces = np.empty((len(detected), img_size, img_size, 3))

        if len(detected) > 0:
            for i, d in enumerate(detected):
                x1, y1, x2, y2, w, h = d.left(), d.top(
                ), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                xw1 = max(int(x1 - margin * w), 0)
                yw1 = max(int(y1 - margin * h), 0)
                xw2 = min(int(x2 + margin * w), img_w - 1)
                yw2 = min(int(y2 + margin * h), img_h - 1)
                cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
                faces[i, :, :, :] = cv2.resize(
                    img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))

            # predict ages of the detected faces
            results = model.predict(faces)
            ages = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results.dot(ages).flatten()

            # draw results
            for i, d in enumerate(detected):
                label = str(int(predicted_ages[i]))
                draw_label(img, (d.left(), d.top()), label)

        cv2.imshow("result", img)
        key = cv2.waitKey(-1) if image_dir else cv2.waitKey(30)

        if key == 27:  # ESC
            break
Exemplo n.º 4
0
                                            encoding='latin1')
spectrograms = np.load(open(os.path.join(base_path, spec_file_name), 'rb'))
rng = rng.RNG(
    seed
)  # Note: Even though I seed everything, there is some stochasticity going on. No clue why.

##########Processing the data#################
vectors = np.delete(vectors, delete, axis=0)
spectrograms = np.delete(spectrograms, delete, axis=0)
shuffle_inds = np.arange(vectors.shape[0])
rng.shuffle(shuffle_inds)  # Shuffling the data
vectors = vectors[shuffle_inds][:, :top].astype(theano.config.floatX)
spectrograms = np.expand_dims(spectrograms[shuffle_inds],
                              axis=1)  # Extra axis for the channel
##############################################
model = model2.get_model(model_input_shape, top, rng)
#model = pickle.load(open("TrainedModel.pkl.gz", "rb"))
train_indices = np.arange(int(np.sum(Train_sizes)))
rng.shuffle(train_indices)
###################################Building the optimizer##############################################
optimizer = optimizers.ADAM("bce",
                            True,
                            spectrograms[train_indices[:Train_sizes[0]]],
                            vectors[train_indices[:Train_sizes[0]]],
                            L2=0.0001)  # BCE is binary cross entropy
model.build_optimizer(optimizer)
######################################################################################################

total_train = np.sum(Train_sizes, dtype=np.int32)
validator = model.get_runner(
    spectrograms[total_train:total_train + Validation_size],
Exemplo n.º 5
0
import model2 as mod2
import download as dl
import training_data2 as td2

curr = td2.process_data(2021, "F1")

curr['x_data'].shape

curr['ranking_state']
tdata = td2.get_training_data()

conf = mod2.train_model(10)
conf
print(conf)

mm = mod2.get_model()[0]
mm.summary()

mod2.model_predict()

for k in range(4):
    for i in range(10):
        conf = mod2.train_model(5, False)
        print(conf)
        print(mod2.get_model()[1])
        mod2.model_predict()

    for i in range(10):
        conf = mod2.train_model(5, False)
        print(conf)
        print(mod2.get_model()[1])