def predict(): ''' Predict the class of an image. We need to run it through the same pipeline as before (run the image through the pre-trained VGG16 model and then run the bottleneck prediction through the trained top model). ''' # load the class_indices saved in the earlier step class_dictionary = np.load('cache/class_indices.npy', allow_pickle=True).item() num_classes = len(class_dictionary) # add the path to your test image below dbTest = Database(DB_dir="CorelDBDataSet/test", DB_csv="CorelDBDataSetTest.csv") print("[INFO] loading and preprocessing image...") for image_path in dbTest.get_data().img: image = load_img(image_path, target_size=(img_width, img_height)) image = img_to_array(image) # important! otherwise the predictions will be '0' image = image / 255 image = np.expand_dims(image, axis=0) # build the VGG16 network model = applications.VGG16(include_top=False, weights='imagenet') # get the bottleneck prediction from the pre-trained VGG16 model bottleneck_prediction = model.predict(image) # build top model model = Sequential() model.add(Flatten(input_shape=bottleneck_prediction.shape[1:])) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='sigmoid')) model.load_weights(top_model_weights_path) # use the bottleneck prediction on the top model to get the final classification class_predicted = model.predict_classes(bottleneck_prediction) inID = class_predicted[0] inv_map = {v: k for k, v in class_dictionary.items()} label = inv_map[inID] probabilities = model.predict_proba(bottleneck_prediction)[0][inID] # get the predicted label print("Image: {}, Predicted label: {}, Probability: {}".format( image_path, label, probabilities))
value = item['index'] link = item['link'] querry = '''UPDATE products set {}={} WHERE LINK='{}' '''.format( tmp.upper(), value, link) cur.execute(querry) self.con.commit() self.con.close() except: return False def close(self): self.con.close() return True if __name__ == '__main__': db = Database() data = db.get_data() sql = SQLite() sql.insertmuti(db) sql.update('10000.jpg', 'res', 1) tmp = sql.select('res', 1) for d in data.itertuples(): d_img, d_cls, d_file = getattr(d, "img"), getattr(d, "cls"), getattr( d, "filename") sql.insert(d_cls, d_file, d_img) sql.close()
d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice) samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist}) cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": #On met en place les deux bases DB_train_dir_param = "../../ReseauDeNeurones/data/train" DB_train_csv_param = "database/data_train.csv" db_train = Database(DB_train_dir_param, DB_train_csv_param) data_train = db_train.get_data() DB_test_dir_param = "../../ReseauDeNeurones/data/test" DB_test_csv_param = "database/data_test.csv" db_test = Database(DB_test_dir_param, DB_test_csv_param) data_test = db_test.get_data() edge = Edge() # check shape assert edge_kernels.shape == (5, 2, 2) # evaluate database APs, prevision = my_evaluate_class(db_train, db_test, f_class=Edge, d_type=d_type,