def representative_data_gen(): found = 0 samples = 10 dn = sys.argv[3] for fn in os.listdir(dn): if not fn.endswith('.jpg'): continue print("Reading calibration image {}".format(fn)) fn = os.path.join(dn, fn) img = image.load_img(fn, target_size=(width, height)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) found += 1 yield [x] if found >= samples: break if found < samples: raise ValueError("Failed to read %d calibration sample images" % samples)
def preprocess_single_img(filepath, rotation=0, show=False, crop=True, dim=(224, 224), preprocess_function=None): ''' Preprocesses a single image, given the full path to the image ''' img = load_img(filepath) img_array = img_to_array(img, dtype='float32') orig_img_size = img_array.shape rotated = rotate_image(img_array, angle=rotation, show=show, crop=crop) img_res = cv2.resize(rotated, dim) img_res = img_res / 255. img_processed = array_to_img(img_res) if preprocess_function is not None: img_processed = preprocess_input(img_processed) return [img_processed, orig_img_size]
def process_path(file_path): print(file_path) file = read_file(file_path) file = image.decode_jpeg(file, channels=3) file = cast(file, float32) file = preprocess_input(file) file = image.resize(file, [ROW, COL]) return file
def is_good_quality(im): """ Given an image (as a np tensor), return True if it is of a good quality, False otherwise :param im: a np tensor 1x256x256x3 :return: True if the image is of a good quality, False otherwise """ im_copy = im.copy() pred = QualityChecker.__classification_model.predict(preprocess_input(im_copy)) return pred[0][0] > config.QUALITYCHECKER_THRESHOLD
def crop_image(image, boxes, resize=None, save_path=None): images = list(map(lambda b : image[b[1]:b[3], b[0]:b[2]], boxes)) if str(type(resize)) == "<class 'tuple'>": try: images = list(map(lambda i: preprocess_input(cv2.resize(i, dsize=resize, interpolation=cv2.INTER_LINEAR)), images)) except Exception as e: print(str(e)) return images
def img_preprocess(main_boxes, empty_boxes, img): resize = (224, 224) image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for index, boxes in enumerate([main_boxes, empty_boxes]): each_column = list(map(lambda j : image[j[1]:j[3],j[0]:j[2]], boxes)) try: # main if index == 0: main_images = list(map(lambda j : cv2.resize(j, resize), each_column)) main_images = list(map(lambda j : preprocess_input(j), main_images)) # empty else: empty_images = list(map(lambda j : cv2.resize(j, resize), each_column)) empty_images = list(map(lambda j : preprocess_input(j), empty_images)) except Exception as e: print(e) return main_images, empty_images
def bin_pred(): data = request.get_json(force=True) img_url = data[0]['url'] image = url_to_image(img_url) result = binary_model.predict( efficientnet.preprocess_input(image).reshape(1, 224, 224, 3)) print(result) prediction = result > 0.5 print(prediction) return f"{prediction[0]}"
def make_single_batch(image_path): total_images = sorted(glob.glob(os.path.join(image_path,'*.jpg'))) main_final = [] for img in total_images: frame = cv2.imread(img) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = preprocess_input(cv2.resize(frame, dsize=(224,224), interpolation=cv2.INTER_LINEAR)) #main_final = main_final + frame main_final.append(frame) print(len(main_final)) main_final = np.array(main_final) return main_final
def map_decode_fn(images): images = tf.io.decode_jpeg(images) images = tf.image.resize(images, (MLConfig.INPUT_WIDTH, MLConfig.INPUT_HEIGHT), method="nearest") images.set_shape((MLConfig.INPUT_WIDTH, MLConfig.INPUT_HEIGHT, 3)) images = tf.cast(images, tf.float32) if MLConfig.BACKBONE == "resnet": images = resnet_v2.preprocess_input(images) elif MLConfig.BACKBONE == "efficientnet": images = efficientnet.preprocess_input(images) else: raise ValueError() return tf.cast(images, tf.float32)
def classify(path_to_model, test_img_dir): x=[] result = {} model = load_model(path_to_model) onlyfiles = [f for f in listdir(test_img_dir) if isfile(join(test_img_dir, f))] # get only files from input directory for fname in onlyfiles: img = image.load_img(join(test_img_dir,fname), target_size=(image_size, image_size)) img = image.img_to_array(img) img = preprocess_input(img) img = np.expand_dims(img, 0) y_prob = model.predict(img, batch_size=1) if y_prob[0][0] >= 0.5: result[fname] = 'male' else: result[fname] = 'female' with open('./process_results.json', 'w') as fp: #save dict as json json.dump(result, fp)
def post(self): imgb64 = request.get_json()['content'] file_path = 'uploads/tmp.jpg' with open(file_path, 'wb') as f: f.write(base64.b64decode(imgb64)) IMG_SIZE = 224 try: img = cv2.resize(cv2.imread(file_path), (IMG_SIZE, IMG_SIZE)) resized_img = np.expand_dims(img, axis=0) proc_img = preprocess_input(resized_img) os.remove(file_path) except: return {'message': "Invalid image"}, 400 prop = model.predict(proc_img) dict_result = {} for i, label in enumerate(self.label_names): dict_result[label] = round(float(prop[0][i]), 5) return {'probabilities': dict_result}
submission = pd.read_csv('c:/LPD_competition/sample.csv') # mob = MobileNet( # include_top=False, # input_shape=(128, 128, 3) # ) eff = EfficientNetB4(include_top=False, input_shape=(128, 128, 3)) # mob.trainable = True eff.trainable = True batch_size = 16 epochs = len(x) // batch_size x = preprocess_input(x) test = preprocess_input(test) count = 0 results = 0 for train_index, val_index in kf.split(x, y): print(str(count) + ' 번째 훈련 시작') x_train = x[train_index] x_val = x[val_index] y_train = y[train_index] y_val = y[val_index] # train_set = datagen.flow( # x_train, y_train, # batch_size = batch_size
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau from tensorflow.keras.models import Model, load_model from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, BatchNormalization, Dense, Activation, Dropout import pandas as pd from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau from tensorflow.keras.applications import VGG19, MobileNet, ResNet101, EfficientNetB7, EfficientNetB2 from tensorflow.keras.optimizers import Adam, SGD from tqdm import tqdm #데이터 지정 및 전처리 x = np.load("C:/LPD_competition/npy/P_project_x4.npy", allow_pickle=True) x_pred = np.load('C:/LPD_competition/npy/test.npy', allow_pickle=True) y = np.load("C:/LPD_competition/npy/P_project_y4.npy", allow_pickle=True) # print(x.shape, x_pred.shape, y.shape) #(48000, 128, 128, 3) (72000, 128, 128, 3) (48000, 1000) x = preprocess_input(x) # (48000, 255, 255, 3) x_pred = preprocess_input(x_pred) # idg = ImageDataGenerator( # rotation_range=10, acc 하락 width_shift_range=(-1, 1), height_shift_range=(-1, 1), rotation_range=40, # shear_range=0.2) # 현상유지 zoom_range=0.2, horizontal_flip=False, fill_mode='nearest') idg2 = ImageDataGenerator() x_train, x_valid, y_train, y_valid = train_test_split(x,
db.connect.commit() train_value = df['2020-09-01' > df['date']] x_train = train_value.iloc[:, 1:-1].astype('int64') y_train = train_value['value'].astype('int64').to_numpy() test_value = df1[df1['date'] >= '2020-09-01'] x_pred = test_value.iloc[:, 1:-1].astype('int64') y_pred = test_value['value'].astype('int64').to_numpy() x_train = pd.get_dummies(x_train, columns=["category", "dong"]).to_numpy() x_pred = pd.get_dummies(x_pred, columns=["category", "dong"]).to_numpy() x_train = preprocess_input(x_train) # x_pred = preprocess_input(x_pred) # def RMSE(y_test, y_predict): return np.sqrt(mean_squared_error(y_test, y_predict)) x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.9, random_state=77, shuffle=True) # x_train = x_train.reshape(38833, 38, 28, 3) print(x_train.shape, x_val.shape, x_pred.shape) # (3124915, 42) (347213, 42) (177408, 42)
from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, BatchNormalization, Dense, Activation from tensorflow.python.keras.applications.efficientnet import EfficientNetB7 SEED = 66 IMAGE_SIZE = (128, 128, 3) EPOCH = 50 OPTIMIZER = Adam(learning_rate=1e-3) #data load x = np.load("C:/LPD_competition/npy/150project_x.npy", allow_pickle=True) y = np.load("C:/LPD_competition/npy/150project_y.npy", allow_pickle=True) x_pred = np.load('C:/LPD_competition/npy/150test.npy', allow_pickle=True) print(x_pred.shape) x = preprocess_input(x) x_pred = preprocess_input(x_pred) idg = ImageDataGenerator(width_shift_range=(-1, 1), height_shift_range=(-1, 1), shear_range=0.2, zoom_range=0.1) idg2 = ImageDataGenerator() idg3 = ImageDataGenerator(width_shift_range=(-1, 1), height_shift_range=(-1, 1), shear_range=0.2, zoom_range=0.1) # y = np.argmax(y, axis=1)
import pandas as pd import tensorflow as tf print("GPUs: ", len(tf.config.experimental.list_physical_devices('GPU'))) x = np.load('../../data/npy/train_x_224_40.npy', allow_pickle=True) y = np.load('../../data/npy/train_y_224_40.npy', allow_pickle=True) target = np.load('../../data/npy/predict_x_224_40.npy', allow_pickle=True) print(x.shape) print(y.shape) print(target.shape) #generagtor from tensorflow.keras.applications.efficientnet import preprocess_input x = preprocess_input(x) target = preprocess_input(target) idg = ImageDataGenerator(zoom_range=0.2, height_shift_range=(-1, 1), width_shift_range=(-1, 1), rotation_range=40) idg2 = ImageDataGenerator() from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(x, y, train_size=0.9, random_state=66, shuffle=True)
# seed=23 # ) # val_set = datagen2.flow( # x_val, y_val, # batch_size=batch_size, # seed=23 # ) # test_set = datagen2.flow( # x_test, y_test, # batch_size=batch_size, # seed=23 # ) x_train = preprocess_input(x_train) x_val = preprocess_input(x_val) x_test = preprocess_input(x_test) test = preprocess_input(test) print(test.shape) # 모델 x = eff.output x = GlobalAveragePooling2D()(x) x = Dropout(0.3)(x) x = Dense(128)(x) x = Activation('relu')(x) x = GaussianDropout(0.4)(x)
def main( root_dir: str, split: str, input_shape: Tuple[int, int, int], n_classes: int, margin: float, scale: float, embedding_dimension: int, momentum: float, weight_decay: float, batch_size: int, epochs: int, seed: int, model_path: str, precision_policy: Optional[str] = None, **kwargs, ): set_gpu_memory_growth() if precision_policy is None: precision_policy = "float32" policy = tf.keras.mixed_precision.Policy(precision_policy) tf.keras.mixed_precision.set_global_policy(policy) read_config = tfds.ReadConfig(shuffle_seed=seed) builder = tfds.ImageFolder(root_dir) ds: tf.data.Dataset = builder.as_dataset( split=split, batch_size=batch_size, shuffle_files=True, decoders={"label": onehot_encoding(depth=n_classes)}, read_config=read_config, as_supervised=True, ) height, width, n_channels = input_shape data_augmentation = tf.keras.Sequential([ RandomRotation(factor=0.05, fill_mode="nearest", seed=seed), RandomTranslation(height_factor=0.1, width_factor=0.1, fill_mode="wrap", seed=seed), RandomZoom(height_factor=0.1, fill_mode="reflect", seed=seed), RandomContrast(factor=0.1, seed=seed), CenterCrop(height=height, width=width), ]) ds = (ds.map(lambda x, y: (preprocess_input(x), y), num_parallel_calls=AUTOTUNE).map( lambda x, y: (data_augmentation(x), y), num_parallel_calls=AUTOTUNE).unbatch()) valid_size = 1000 valid_ds = ds.take(valid_size).batch(batch_size).prefetch(AUTOTUNE) train_ds = (ds.skip(valid_size).shuffle(buffer_size=100000).batch( batch_size, drop_remainder=True).prefetch(AUTOTUNE)) model = create_model( input_shape=input_shape, n_classes=n_classes, embedding_dimension=embedding_dimension, weights_decay=weight_decay, use_pretrain=False, ) optimizer = tf.keras.optimizers.SGD(momentum=momentum) model_checkpoint = ModelCheckpoint( # "./model/weights.{epoch:03d}-{val_loss:.3f}.hdf5", model_path, monitor="val_loss", save_best_only=True, ) def scheduler(epoch, lr): if epoch < 30: return 1e-1 elif epoch < 60: return 1e-2 elif epoch < 90: return 1e-3 else: return 1e-4 lr_scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler) tensorboard_callback = tf.keras.callbacks.TensorBoard(histogram_freq=1) model.compile( optimizer=optimizer, loss=ClippedValueLoss( loss_func=AdditiveAngularMarginLoss( loss_func=tf.keras.losses.CategoricalCrossentropy(), margin=margin, scale=scale, dtype=policy.compute_dtype, ), x_min=tf.keras.backend.epsilon(), x_max=1.0, ), metrics=[tf.keras.metrics.CategoricalAccuracy()], ) model.fit( train_ds, batch_size=batch_size, epochs=epochs, validation_data=valid_ds, callbacks=[ model_checkpoint, lr_scheduler, tensorboard_callback, ], verbose=1, )
from sklearn.model_selection import StratifiedKFold, KFold from keras import Sequential from keras.layers import * from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam,SGD from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB4 from tensorflow.keras.applications.efficientnet import preprocess_input #데이터 지정 및 전처리 x = np.load("../../data/npy/P_project_x8.npy",allow_pickle=True) y = np.load("../../data/npy/P_project_y8.npy",allow_pickle=True) x = preprocess_input(x) # (96000, 128, 128, 3) idg = ImageDataGenerator( width_shift_range=(-1,1), height_shift_range=(-1,1), rotation_range=40, zoom_range=0.2, fill_mode='nearest') idg2 = ImageDataGenerator() ''' - rotation_range: 이미지 회전 범위 (degrees) - width_shift, height_shift: 그림을 수평 또는 수직으로 랜덤하게 평행 이동시키는 범위 (원본 가로, 세로 길이에 대한 비율 값)
def preprocess_image(img, target_height, target_width): img = tf.image.resize(img, (target_height, target_width)) img = preprocess_input(img) return img
from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.applications.efficientnet import preprocess_input from tensorflow.keras.layers import Dense, Flatten from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.datasets import cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = y_train.reshape(y_train.shape[0], ) y_test = y_test.reshape(y_test.shape[0], ) x_train = preprocess_input(x_train) x_test = preprocess_input(x_test) x_train = x_train.astype('float32') / 255. # 전처리 x_test = x_test.astype('float32') / 255. # 전처리 vgg19 = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(32, 32, 3)) vgg19.trainable = True # OneHotEncoding from sklearn.preprocessing import OneHotEncoder y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) ohencoder = OneHotEncoder() ohencoder.fit(y_train) y_train = ohencoder.transform(y_train).toarray() y_test = ohencoder.transform(y_test).toarray()
#model = ResNet50(weights='imagenet') #model = EfficientNetB0( #model = tf.keras.applications.EfficientNetB0( model = EfficientNetB0(weights=None, input_shape=(96, 96, 3), classes=10) checkpoint_path = "training_1/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) latest = tf.train.latest_checkpoint(checkpoint_dir) print(latest) model.load_weights(latest) print(model.summary()) train_images = preprocess_input(train_images) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # 체크포인트 콜백 만들기 cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) #model.fit(train_images, train_labels, epochs=30, # validation_data = (test_images,test_labels), # callbacks = [cp_callback] # ) '''
st.sidebar.write('\n') if st.sidebar.button("Click Here to Classify"): if uploaded_file is None: st.sidebar.write("Please upload an Image to Classify") else: with st.spinner('Classifying ...'): try: image = np.asarray(u_img) if image.shape[2] == 4: image = image[:, :, :3] img_pixels = preprocess_input(image) img_pixels = cv2.resize(img_pixels, (256, 256)) img_pixels = np.expand_dims(img_pixels, axis=0) img_pixels_tensor = tf.convert_to_tensor(img_pixels, dtype=tf.int32) sl_preds = predict_sleeve_length(img_pixels_tensor) dl_preds = predict_dress_length(img_pixels_tensor) sl_prediction = np.argmax(sl_preds) dl_prediction = np.argmax(dl_preds) st.success('Done!') st.sidebar.header("Algorithm Predicts: ") sl_probability = "{:.3f}".format(float(sl_preds[0][sl_prediction] * 100)) dl_probability = "{:.3f}".format(float(dl_preds[0][dl_prediction] * 100)) st.sidebar.write(f'Predicted sleeve length: **{sl_classes[sl_prediction]}**') st.sidebar.write('\n') st.sidebar.write('Probability: ', sl_preds[0][sl_prediction] * 100, '%') st.sidebar.write('\n')
#from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.efficientnet import EfficientNetB0 from tensorflow.keras.preprocessing import image #from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.efficientnet import preprocess_input, decode_predictions import numpy as np #model = ResNet50(weights='imagenet') #model = EfficientNetB0( #model = tf.keras.applications.EfficientNetB0( model = EfficientNetB0( weights='imagenet' ) print(model.summary()) #img_path = 'elephant.jpg' img_path = 'dog.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) # 결과를 튜플의 리스트(클래스, 설명, 확률)로 디코딩합니다 # (배치 내 각 샘플 당 하나의 리스트) print('Predicted:', decode_predictions(preds, top=3)[0]) # 예측결과: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)] print(len(preds[0]))
'total_count': 13} front_len = 3 #num = 2 main_model = tf.keras.models.load_model('./models/cls/ciga_v1.h5') image = cv2.imread(info['image_name']) image = cv2.resize(image, (960, 960)) images = crop_image(image, info['section_1']['front_corr'], (224, 224)) ori_images = crop_image(image, info['section_1']['front_corr'], (0, 0)) for num in range(front_len): img = np.expand_dims(images[num], axis=0) img = preprocess_input(img) main_pred = main_model.predict(img) rst = CLASS_NAMES[np.argmax(main_pred)] if rst == 'bottom': rst = BarCode(ori_images[num]).decode() os.system('clear') print(rst) cv2.namedWindow('ddd', cv2.WINDOW_NORMAL) cv2.resizeWindow('ddd', 960,960) cv2.imshow('ddd',images[num]) #cv2.imshow('ddda',cv2.cvtColor(main_final[0], cv2.COLOR_BGR2RGB))