Пример #1
0
def nima_classifier(**kwargs):
    # read parameters
    population = kwargs.get('population')
    generation = kwargs.get('generation')
    tensors = kwargs.get('tensors')
    f_path = kwargs.get('f_path')
    objective = kwargs.get('objective')
    _resolution = kwargs.get('resolution')
    _stf = kwargs.get('stf')

    images = True

    fn = f_path + "gen" + str(generation).zfill(5)
    fitness = []
    best_ind = 0

    # set objective function according to min/max
    fit = 0
    if objective == 'minimizing':
        condition = lambda: (fit < max_fit)  # minimizing
        max_fit = float('inf')
    else:
        condition = lambda: (fit > max_fit)  # maximizing
        max_fit = float('-inf')

    number_tensors = len(tensors)
    with tf.device('/CPU:0'):

        # NIMA classifier
        x = np.stack(
            [tensors[index].numpy() for index in range(number_tensors)],
            axis=0)
        x = preprocess_input_mob(x)
        scores = model.predict(x, batch_size=number_tensors, verbose=0)

        # scores
        for index in range(number_tensors):

            if generation % _stf == 0:
                save_image(tensors[index], index, fn, 3)  # save image

            mean = mean_score(scores[index])
            std = std_score(scores[index])
            # fit = mean - std
            fit = mean

            if condition():
                max_fit = fit
                best_ind = index
            fitness.append(fit)
            population[index]['fitness'] = fit

    # save best indiv
    if images:
        save_image(tensors[best_ind], best_ind, fn, 3, addon='_best')
    return population, population[best_ind]
def nima_nasnet(_dir, progress=False):
    target_size = (224, 224)
    imgs = Path(_dir).files('*.png')
    imgs += Path(_dir).files('*.jpg')
    imgs += Path(_dir).files('*.jpeg')

    with tf.device('/GPU:0'):
        base_model = NASNetMobile((224, 224, 3),
                                  include_top=False,
                                  pooling='avg',
                                  weights=None)
        x = Dropout(0.75)(base_model.output)
        x = Dense(10, activation='softmax')(x)

        model = Model(base_model.input, x)
        model.load_weights('weights/nasnet_weights.h5')

        score_list = []
        total_imgs = len(imgs)
        for i, img_path in enumerate(imgs):
            img = load_img(img_path, target_size=target_size)
            x = img_to_array(img)
            x = np.expand_dims(x, axis=0)

            x = preprocess_input(x)

            scores = model.predict(x, batch_size=1, verbose=0)[0]

            mean = mean_score(scores)
            std = std_score(scores)

            file_name = Path(img_path).name.lower()
            score_list.append((file_name, mean, std))

            if progress and i % 100 == 0:
                sys.stdout.write("\r%d/%d" % (i, total_imgs))
                sys.stdout.flush()

            # print("Evaluating : ", img_path)
            # print("NIMA Score : %0.3f +- (%0.3f)" % (mean, std))

        return sorted(score_list, key=lambda x: int(x[0].split('.')[0]))
def main():

    print_debug(DEBUG)
    # for dataset in [train_image_dir, test_image_dir]:
    for dataset in [test_image_dir, train_image_dir]:
        print(
            '========================================================================'
        )
        print('INCEPTIONNET')
        print('PROCESSING', dataset)
        print(
            '========================================================================'
        )

        df = pd.DataFrame(columns=['item_id', 'mean', 'std'])

        print("Loading images from directory : ", dataset)
        imgs = Path(dataset).files('*.png')
        imgs += Path(dataset).files('*.jpg')
        imgs += Path(dataset).files('*.jpeg')

        N = len(imgs)
        i = 0
        # with tf.device("CPU:0"):
        with tf.device("/device:GPU:0"):
            print('>> init')
            base_model = InceptionResNetV2(input_shape=(None, None, 3),
                                           include_top=False,
                                           pooling='avg',
                                           weights=None)
            x = Dropout(0.75)(base_model.output)
            x = Dense(10, activation='softmax')(x)

            print('>> load weights')
            model = Model(base_model.input, x)
            model.load_weights('weights/inception_resnet_weights.h5')

            score_list = []

            df_temp = pd.DataFrame()
            if DEBUG: STEP = 3
            else: STEP = 1000
            if dataset == train_image_dir: todir = train_filename
            else: todir = test_filename

            for img_path in imgs:
                if i % STEP == 0:
                    end_step = time.time()
                    print('----------------------------')
                    print('{}/{}'.format(i, N))
                    if i > 0:
                        print('time elapse:', end_step - start_step)
                        df = pd.concat([df, df_temp], axis=0)
                        save_pickle(df, todir)
                        df_temp = pd.DataFrame()
                    start_step = time.time()
                if DEBUG: print("\n>> Evaluating : ", img_path)

                img = load_img(img_path, target_size=target_size)
                x = img_to_array(img)
                x = np.expand_dims(x, axis=0)

                x = preprocess_input(x)

                scores = model.predict(x, batch_size=1, verbose=0)[0]

                mean = mean_score(scores)
                std = std_score(scores)

                file_name = Path(img_path).name.lower()
                score_list.append((file_name, mean))

                if DEBUG: print("NIMA Score : %0.3f +- (%0.3f)" % (mean, std))

                filename_w_ext = os.path.basename(img_path)
                filename, file_extension = os.path.splitext(filename_w_ext)

                temp = pd.DataFrame({
                    'item_id': [filename],
                    'mean': [mean],
                    'std': [std]
                })
                if DEBUG: print(temp)

                df = pd.concat([df, temp], axis=0)

                i = i + 1

        df = pd.concat([df, df_temp], axis=0)
        df = df.reset_index(drop=True)
        print(df)
        save_pickle(df, todir)
Пример #4
0
    score_list = []

    total_score_mean = []
    total_score_std = []
    for img_path in imgs:
        img = load_img(img_path, target_size=target_size)
        x = img_to_array(img)
        x = np.expand_dims(x, axis=0)

        x = preprocess_input(x)

        scores = model.predict(x, batch_size=1, verbose=0)[0]

        mean = mean_score(scores)
        std = std_score(scores)

        if mean < 1000:
            score_mean = mean
            score_std = std
        file_name = Path(img_path).name.lower()
        score_list.append((file_name, mean))

        print("Evaluating : ", img_path)
        print("NIMA Score : %0.3f +- (%0.3f)" % (mean, std))
        print()
        total_score_mean.append(score_mean)
        total_score_std.append(score_std)

    if rank_images:
        print("*" * 40, "Ranking Images", "*" * 40)
for batch in tqdm(batch(imgs, batch_size), total=len(imgs) // batch_size + 1):
    try:
        images = pool.map(preprocess_for_evaluation, batch)
    except OSError as e:
        images = []
        new_batch = []
        for img_path in batch:
            try:
                images.append(preprocess_for_evaluation(img_path))
                new_batch.append(img_path)
            except OSError as e:
                print("Couldn't process {}".format(img_path))
                print(e)
                continue
        batch = new_batch
    x = np.array(images)
    scores = model.predict(x, batch_size=x.shape[0], verbose=0)
    means = mean_score(scores)
    stds = std_score(scores)
    for mean, std, img_path in zip(means, stds, batch):
        scored_images.append((mean, std, img_path))
scored_images = sorted(scored_images, reverse=True)

# write results to csv file
with open('results.csv', 'w', encoding="utf-8") as csvfile:
    csvwriter = csv.writer(csvfile, delimiter=';', lineterminator='\n')
    csvwriter.writerow(['filename', 'mean', 'std'])
    for mean, std, img_path in scored_images:
        print("{:.3f} +- ({:.3f})  {}".format(mean, std, img_path))
        csvwriter.writerow([img_path, mean, std])
Пример #6
0
def generate_scores(parser):
    
    # get the model name
    model_name = parser.net
    
    # check folder 
    if parser.file is None and parser.folder is None:
        raise Exception('Indicate a file or a folder path')
    
    # list the files to evaluate
    if parser.folder is not None and isdir(parser.folder):
        files = [join(parser.folder, file) for file in listdir(parser.folder)]
    else:
        files = []
    
    # check if the file exists
    if parser.file is not None and len(files) == 0 and isfile(parser.file):
        files = [parser.file]
    
    # if there is not folder or file raise exception
    if len(files) == 0:
        raise Exception('File or folder doesn\'t exists')
    
    # print the files to evaluate
    if parser.vb == 1:
        print('---------- Files to evaluate ----------')
        for i in range(len(files)):
            print('{}: {}'.format(i, files[i]))
        print('---------------------------------------')

    # create vgg, mobilenet or inception
    if model_name == 'vgg_16':
        model = create_VGG()
    elif model_name == 'mobilenet':
        model = create_MobileNet()
    elif model_name == 'inception':
        model = create_Inception()
    
    # load the weights of the network
    model.load_weights('./weights/{}.h5'.format(model_name))
    
    images = []
    # load the images into memmory
    for file in files:
        image = cv2.resize(cv2.cvtColor(cv2.imread(file, 1), cv2.COLOR_BGR2RGB), (224, 224))/255
        images.append(image)
        
    # convert list into numpy array
    images = np.array(images)
    
    y_pred = model.predict(images)
    mean_scores = [mean_score(pred) for pred in y_pred]
    std_scores = [std_score(pred) for pred in y_pred]
    
    print('-------------- Evaluation --------------')
    for i in range(len(files)):
        score = '{} ({}{})'.format(round(mean_scores[i], 3), chr(177), round(std_scores[i], 3))
        print('{}: {} \n\t{}'.format(i, files[i], score))
    print('----------------------------------------')
    
    # save the predictions
    if parser.save:
        data = np.stack([files, mean_scores, std_scores], axis = 1)
        df = pd.DataFrame(data = data, columns = ['file', 'mean', 'std'])
        df.to_csv('evaluations.csv', index = False)