#Define image transformers print('Shape mean_array : ' + str(mean_array.shape)) print('Shape net: ' + str(net.blobs[input_layer].data.shape)) net.blobs[input_layer].reshape( 1, # batch size 3, # channel IMAGE_WIDTH, IMAGE_HEIGHT) # image size transformer = caffe.io.Transformer( {input_layer: net.blobs[input_layer].data.shape}) transformer.set_mean(input_layer, mean_array) transformer.set_transpose(input_layer, (2, 0, 1)) ''' Making predicitions ''' im_files = imagelist_in_depth(r'c:\Users\szmike\Documents\DATA\studio2') im_file = im_files[1] image = cv2.imread(im_file) print(im_file) img = cv2.imread(im_file, cv2.IMREAD_COLOR) img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT) net.blobs[input_layer].data[...] = transformer.preprocess(input_layer, img) out = net.forward() print(out) cv2.imshow("Output", image) cv2.waitKey(0) #Reading image paths
# Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. with np.load(calib_mtx_file) as X: mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')] fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') out_file='calibration'+'.avi' out = cv2.VideoWriter(out_file,fourcc, 1, th_size,True) image_files=fh.imagelist_in_depth(image_dir,level=1,date_sort=False) image_files=[image_file for image_file in image_files if os.path.basename(image_file).startswith('cam')] gridsize=10 clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(gridsize,gridsize)) i=0 for i, image_file in enumerate(image_files): if not i%1==0: continue print(image_file) im=cv2.imread(image_file)
reader =csv.DictReader(open(typedict_3_file, 'rt'), delimiter=';') for row in reader: type_dict_3[row['type']]=row['label'] # LOAD MODEL pred=load_model(model_file) image_mean = 128 def keysWithValue(aDict, target): return sorted(key for key, value in aDict.items() if target == value) image_list_indir = imagelist_in_depth(image_dir,level=1) preds=[] for i, im_name in enumerate(image_list_indir): # i=0 # im_name=image_list_indir[i] image_file=os.path.join(image_dir,im_name) img = Image.open(image_file).convert('RGB') img_square=crop.crop(img) im=np.asarray(img_square) with warnings.catch_warnings(): warnings.simplefilter("ignore") data = img_as_ubyte(resize(im, (imgSize,imgSize), order=1)) rgb_image=data.astype('float32')
# -*- coding: utf-8 -*- """ Created on Fri Mar 17 20:12:01 2017 @author: SzMike """ import file_helper import os import json #base_folder = r'd:\DATA\Alinari\' base_folder = os.path.curdir user = '******' # picturio image_dir = os.path.join(r'd:\DATA\RealEstate\117094') image_list_file = os.path.join(base_folder, 'input', 'image_list.json') image_list_indir = file_helper.imagelist_in_depth(image_dir, level=0) image_label = {} for image in image_list_indir: image_label[image] = '0' with open(image_list_file, 'w') as imagelistfile: json.dump(image_list_indir, imagelistfile)
def new_list(self, image_dir): self.current_dir = image_dir self.image_list = file_helper.imagelist_in_depth(image_dir, level=0) self.is_Features_ready = False self.is_Scores_ready = False
from sklearn import cluster import aes_Picturio import aes_AADB base_dir=os.path.join('e:','OneDrive','AES','Photo_DB') image_dir=os.path.join(base_dir,'RealEstate') save_dir=os.path.join(r'D:\DATA\RealEstate\AES') score_file_merged=os.path.join(image_dir,'scores_merged.csv') """ Scoring model - manual scores """ image_list=fh.imagelist_in_depth(image_dir,level=1) """ Class names from folder names """ class_names=[os.path.dirname(f).split('\\')[-1] for f in image_list] df_db = pd.DataFrame(data={'Filename':image_list,'Class name':class_names}) df_scores=df_db.copy() """ Scoring model - AADB """ scoring=aes_AADB.scoring() image_all_scores=scoring.get_scores(df_db['Filename'].values) #scores = [image_all_scores[i]['AestheticScore'] for i in range(len(image_all_scores)) if image_all_scores[i]['AestheticScore']!= 'None']