def load_lasa(shape_idx): """Load demonstrations from LASA dataset. The LASA dataset contains 2D handwriting motions recorded from a Tablet-PC. It can be found `here <https://bitbucket.org/khansari/lasahandwritingdataset>`_ Take a look at the `detailed explanation <http://cs.stanford.edu/people/khansari/DSMotions#SEDS_Benchmark_Dataset>`_ for more information. The following plot shows multiple demonstrations for the same shape. .. plot:: import matplotlib.pyplot as plt from datasets import load_lasa X, Xd, Xdd, dt, shape_name = load_lasa(0) plt.figure() plt.title(shape_name) plt.plot(X[0], X[1]) plt.show() Parameters ---------- shape_idx : int Choose demonstrated shape, must be within range(30). Returns ------- X : array-like, shape (n_task_dims, n_steps, n_demos) Positions Xd : array-like, shape (n_task_dims, n_steps, n_demos) Velocities Xdd : array-like, shape (n_task_dims, n_steps, n_demos) Accelerations dt : float Time between steps shape_name : string Name of the Matlab file from which we load the demonstrations (without suffix) """ dataset_path = get_common_dataset_path() if not os.path.isdir(dataset_path + "lasa_data"): url = urllib2.urlopen( "http://bitbucket.org/khansari/lasahandwritingdataset/get/38304f7c0ac4.zip" ) z = zipfile.ZipFile(io.BytesIO(url.read())) z.extractall(dataset_path) os.rename(dataset_path + z.namelist()[0], dataset_path + "lasa_data" + os.sep) dataset_path += "lasa_data" + os.sep + "DataSet" + os.sep demos, shape_name = _load_from_matlab_file(dataset_path, shape_idx) X, Xd, Xdd, dt = _convert_demonstrations(demos) return X, Xd, Xdd, dt, shape_name
def mask_2_base64(mask): img_pil = Image.fromarray(np.array(mask, dtype=np.uint8)) img_pil.putpalette([0, 0, 0, 255, 255, 255]) bytes_io = io.BytesIO() img_pil.save(bytes_io, format='PNG', transparency=0, optimize=0) bytes = bytes_io.getvalue() return base64.b64encode(zlib.compress(bytes)).decode('utf-8')
def upload_and_retrieve_file(): file = request.files['file'] if file and allowed_file(file.filename): file.save( os.path.join(app.config['RESULT_IMAGE_FOLDER'], file.filename)) with open( os.path.join(app.config['RESULT_IMAGE_FOLDER'], app.config['RESULT_FILE']), 'rb') as bites: return send_file(io.BytesIO(bites.read()), attachment_filename=app.config['RESULT_FILE'], mimetype='image/*')
def estimate_channel(): global normalize_mode #global tmp #global network #channel = np.array(json.loads(request.data.decode('utf-8'))) # s = request.data.split('*') # image_str, var_str = s[0], s[1] # image = np.array(np.matrix(image_str)) # var = float(var_str) # y = network.test(image, var) #channelscale= (channel+5)/10.0 #output=network.FindEstiamte(channelscale,fileName='internal_test.jpg') #output= output*10-5 #result = json.dumps(output[0].tolist()) #return result #print(request.data) #print(base64.b64decode(request.data)) b = scipy.io.loadmat(io.BytesIO(base64.b64decode(request.data))) out = b image = b['cel'][0][0] var = b['cel'][0][1][0][0] if normalize_mode == 1: image = (image + 5) / 10.0 elif normalize_mode == 4: image = (image) / 5 print(var) y = Test_network.test(image, 0) if normalize_mode == 1: y = y * 10 - 5 elif normalize_mode == 4: y = y * 5 output = io.BytesIO() o = {} o['out'] = y[0] scipy.io.savemat(output, o) return base64.b64encode(output.getvalue())
def read_from_url(self, url_content, url_style_list): import urllib.request import io import ssl # This restores the same behavior as before. context = ssl._create_unverified_context() print(url_content) with urllib.request.urlopen(url_content, context=context) as url_c: content_file = io.BytesIO(url_c.read()) print("content file type:", type(content_file)) # decoded = cv2.imdecode(np.frombuffer(content_file, np.uint8), -1)) content_image_base = np.array(Image.open(content_file)) # content_image_base = scipy.misc.imread(decoded) print("content image type:", content_image_base.shape) # Resize image to fit model content_image = self.utils.resize_image(content_image_base, self.base_width, self.mod_aspect_ratio) style_images = [] for url_style in url_style_list: with urllib.request.urlopen(url_style, context=context) as url_s: style_file = io.BytesIO(url_s.read()) # decoded = cv2.imdecode(np.frombuffer(content_file, np.uint8), -1)) style_image_base = np.array(Image.open(style_file)) # style_image_base = scipy.misc.imread(decoded) style_images.append( self.utils.resize_image(style_image_base, self.base_width, self.mod_aspect_ratio, target_shape=content_image.shape)) self.assert_image_shape(content_image, style_images) return content_image, style_images
def create_lsun_wide(tfrecord_dir, lmdb_dir, width=512, height=384, max_images=None): assert width == 2**int(np.round(np.log2(width))) assert height <= width print('Loading LSUN dataset from "%s"' % lmdb_dir) import lmdb # pip install lmdb # pylint: disable=import-error import cv2 # pip install opencv-python import io with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn: total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter if max_images is None: max_images = total_images with TFRecordExporter(tfrecord_dir, max_images, print_progress=False) as tfr: for idx, (_key, value) in enumerate(txn.cursor()): try: try: img = cv2.imdecode( np.fromstring(value, dtype=np.uint8), 1) if img is None: raise IOError('cv2.imdecode failed') img = img[:, :, ::-1] # BGR => RGB except IOError: img = np.asarray(PIL.Image.open(io.BytesIO(value))) ch = int(np.round(width * img.shape[0] / img.shape[1])) if img.shape[1] < width or ch < height: continue img = img[(img.shape[0] - ch) // 2:(img.shape[0] + ch) // 2] img = PIL.Image.fromarray(img, 'RGB') img = img.resize((width, height), PIL.Image.ANTIALIAS) img = np.asarray(img) img = img.transpose([2, 0, 1]) # HWC => CHW canvas = np.zeros([3, width, width], dtype=np.uint8) canvas[:, (width - height) // 2:(width + height) // 2] = img tfr.add_image(canvas) print('\r%d / %d => %d ' % (idx + 1, total_images, tfr.cur_images), end='') except: print(sys.exc_info()[1]) if tfr.cur_images == max_images: break print()
def main(): global args args = parse_args() file = request.files['content_image'] if file and allowed_file(file.filename): file.save( os.path.join(app.config['CONTENT_IMAGE_FOLDER'], 'content.png')) file = request.files['style_image'] if file and allowed_file(file.filename): file.save(os.path.join(app.config['STYLE_IMAGE_FOLDER'], 'style.png')) render_single_image() with open('/app/neural-style-tf/result_image/result/result.png', 'rb') as bites: return send_file(io.BytesIO(bites.read()), attachment_filename=app.config['RESULT_FILE'], mimetype='image/*')
def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None, partition=False): print('Loading LSUN dataset from "%s"' % lmdb_dir) import lmdb # pip install lmdb # pylint: disable=import-error import cv2 # pip install opencv-python import io with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn: total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter if max_images is None: max_images = total_images if not partition: tfr = TFRecordExporter(tfrecord_dir, max_images) else: path, name = os.path.split(tfrecord_dir) tfr_train = TFRecordExporter('{}/train/{}'.format(path, name), max_images // 2) tfr_val = TFRecordExporter('{}/val/{}'.format(path, name), max_images // 2) for _idx, (_key, value) in enumerate(txn.cursor()): try: try: img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1) if img is None: raise IOError('cv2.imdecode failed') img = img[:, :, ::-1] # BGR => RGB except IOError: img = np.asarray(PIL.Image.open(io.BytesIO(value))) crop = np.min(img.shape[:2]) img = img[(img.shape[0] - crop) // 2:(img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2:(img.shape[1] + crop) // 2] img = PIL.Image.fromarray(img, 'RGB') img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS) img = np.asarray(img) img = img.transpose([2, 0, 1]) # HWC => CHW if not partition: tfr.add_image(img) else: if _idx % 2: tfr_train.add_image(img) else: tfr_val.add_image(img) except: print(sys.exc_info()[1]) if _idx == max_images - 1: break
def log_image(file_writer, tensor, epoch_no, tag): height, width, channel = tensor.shape tensor = ((tensor + 1) * 255) tensor = tensor.astype('uint8') image = Image.fromarray(tensor) import io output = io.BytesIO() image.save(output, format='PNG') image_string = output.getvalue() output.close() tf_img = Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string) summary = Summary(value=[Summary.Value(tag=tag, image=tf_img)]) file_writer.add_summary(summary, epoch_no) file_writer.flush()
def show_tensor(img, show_img=True): to_pil = transforms.ToPILImage() img = to_pil(img.squeeze()) # we can also use test_set[1121][0].numpy() if show_img: arr = np.unique(np.asarray(img)) print(arr) im = plt.imshow(img.convert('L')) # show the pixel-level annotation plt.show() im = plt.imshow(img.convert('L'), cmap=cmap, vmin=0, vmax=arr[len(arr) - 1]) # show the pixel-level annotation plt.show() plt.axis('off') #plt.show(); #shows the plotted image ''' mat = scipy.io.loadmat('label_list.mat') labels = [] for l in range(len(arr)): labels.append(str(mat['label_list'][0][arr[l]][0])) labels[0] = "background" colors = [ im.cmap(im.norm(value)) for value in arr] patches = [ mpatches.Patch(color=colors[i], label=labels[i]) for i in range(len(arr))] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0) ''' buf = io.BytesIO() plt.savefig(buf, format='png', bbox_inches='tight') buf.seek(0) #img = Image.open(buf) #img.show() #buf.close() # img.show() # img.save('/home/malrawi/GAN_seg_img_414/'+'gg-col'+'.png') # can be used to save the image return img
def getTile(req_target, i, j, zoom_level, targetTileSave=False): input_img_p = Image.new('RGBA', (TILE_SIZE, TILE_SIZE), (0, 0, 0, 0)) error_flg = 0 if req_target['type'] == 'localTile': path_format = req_target['format'] path_format = path_format.replace('{z}', str(zoom_level)) path_format = path_format.replace('{x}', str(i)) path_format = path_format.replace('{y}', str(j)) path_format = path_format.replace('{-y}', str(pow(2, zoom_level) - j - 1)) input_image_path = os.path.join(req_target['path'], path_format) if os.path.isfile(input_image_path): input_img_p = Image.open(input_image_path) input_img_p = input_img_p.resize((TILE_SIZE, TILE_SIZE)) else: print("Can't get tile : %d - %d - %d" % (zoom_level, i, j)) error_flg = 1 return input_img_p, error_flg else: if req_target['type'] == 'tile': url_format = req_target['format'] url_format = url_format.replace('{z}', str(zoom_level)) url_format = url_format.replace('{x}', str(i)) url_format = url_format.replace('{y}', str(j)) input_image_url = req_target['url'] + url_format elif req_target['type'] == 'wms': start_point = tile2latlon(i, j, zoom_level) end_point = tile2latlon(i + 1, j + 1, zoom_level) url_format = req_target['format'] url_format = url_format.replace('{minx}', str(end_point[1])) url_format = url_format.replace('{miny}', str(start_point[0])) url_format = url_format.replace('{maxx}', str(start_point[1])) url_format = url_format.replace('{maxy}', str(end_point[0])) url_format = url_format.replace('{maxy}', str(end_point[0])) url_format = url_format.replace('{output_width}', str(TILE_SIZE)) url_format = url_format.replace('{output_height}', str(TILE_SIZE)) input_image_url = req_target['url'] + url_format print("input : " + input_image_url) res = requests.get(input_image_url, verify=False) if res.status_code == 200: content_type = res.headers["content-type"] if 'image' not in content_type: print("Not image URL : %d - %d - %d" % (zoom_level, i, j)) error_flg = 1 return input_img_p, error_flg #resfile = StringIO(res.content) resfile = io.BytesIO(res.content) input_img_p = Image.open(resfile) input_img_p = input_img_p.resize((TILE_SIZE, TILE_SIZE)) else: print("Can't get tile : %d - %d - %d" % (zoom_level, i, j)) error_flg = 1 return input_img_p, error_flg if targetTileSave: targetTileSavePath = os.path.join(TARGET_TILE_DIR, str(zoom_level), str(i), str(j) + ".png") if not os.path.isdir(os.path.dirname(targetTileSavePath)): os.makedirs(os.path.dirname(targetTileSavePath)) input_img_p.save(targetTileSavePath) return input_img_p, error_flg
][idx] with open('/iri-index/predicted/apf107.tmp', 'w') as out: nlines = 0 with open('/iri-index/chain/apf107.dat') as fh: for line in fh: print(line, file=out, end='') nlines += 1 data = {} with urllib.request.urlopen( "https://spaceweather.gfz-potsdam.de/fileadmin/ruggero/Kp_forecast/forecast_figures/KP_FORECAST_CURRENT.mat" ) as res: content = res.read() bio = io.BytesIO(content) mat = scipy.io.loadmat(bio) for time, kp in zip(mat['t'], mat['kp'][0]): dt = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S') data[dt] = kp_to_ap(kp) keys_sorted = sorted(data.keys()) first = keys_sorted[0] last = keys_sorted[-1] first_day = datetime.date(year=first.year, month=first.month, day=first.day) last_day = datetime.date(year=last.year, month=last.month, day=last.day) day = datetime.date(1958, 1, 1) + datetime.timedelta(days=nlines)
def _get_img_as_bytestring(img): im = Image.fromarray(img) buf = io.BytesIO() im.save(buf, format='JPEG') return buf.getvalue()
plt.ylim([0, 70]) plt.yticks(np.arange(0,70, 20)) # plt.xlabel("Time [s]") # plt.ylabel("RL command") # plt.legend(loc='upper left') ### # ax = plt.subplot2grid((3, 4), (2, 3), rowspan=1, colspan=1) # plt.plot(sim_t, sim_yaw, "-g", label="yaw") # plt.grid(True) # plt.xlabel("Time [s]") # plt.ylabel("Yaw [rad]") ####### transfer png to tiff png1 = io.BytesIO() fig1.savefig(png1, format="png") # Load this image into PIL png2 = Image.open(png1) # # Save as TIFF png2.save("result_plot.tiff") png1.close() # ########################################## # plt.legend(loc='upper left') # ax = plt.subplot(3,1,2) # for i in range(1,6):
def create_tf_example(name, img_dir, ann_dir): IMG_FILENAME = '%s.jpg' % name ANN_FILENAME = '%s.mat' % name IMG_FULL_PATH = os.path.join(img_dir, IMG_FILENAME) ANN_FULL_PATH = os.path.join(ann_dir, ANN_FILENAME) with tf.gfile.GFile(IMG_FULL_PATH, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) if image.format != 'JPEG': raise ValueError('Image format not JPEG') key = hashlib.sha256(encoded_jpg).hexdigest() label = 'hand' width, height = image.size xmin = [] ymin = [] xmax = [] ymax = [] classes = [] classes_text = [] truncated = [] poses = [] difficult_obj = [] coords = coords_from_mat(ANN_FULL_PATH) for coord in coords: x_max, x_min, y_max, y_min = 0, float('inf'), 0, float('inf') for y,x in coord: x_max, x_min = max(x, x_max), min(x, x_min) y_max, y_min = max(y, y_max), min(y, y_min) xmin.append(max(float(x_min) / width, 0.0)) ymin.append(max(float(y_min) / height, 0.0)) xmax.append(min(float(x_max) / width, 1.0)) ymax.append(min(float(y_max) / height, 1.0)) classes_text.append(label.encode('utf8')) classes.append(label_map_dict[label]) truncated.append(0) poses.append('Frontal'.encode('utf8')) difficult_obj.append(0) return tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature( IMG_FILENAME.encode('utf8')), 'image/source_id': dataset_util.bytes_feature( IMG_FILENAME.encode('utf8').encode('utf8')), 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), 'image/object/truncated': dataset_util.int64_list_feature(truncated), 'image/object/view': dataset_util.bytes_list_feature(poses), }))
import requests import scipy.io import io import numpy as np r = requests.get( 'https://teachingfiles.blob.core.windows.net/probml/tennis_data.mat') with io.BytesIO(r.content) as f: data = scipy.io.loadmat(f) W = np.concatenate(data['W'].squeeze()) G = data['G'] - 1 # W[G[i,0]] is winner of game i, W[G[i,1]] is loser M = W.shape[0] # number of players M = 107 N = G.shape[0] # number of games N = 1801
def output_plot(prop, plot_units, csb=os.path.join(os.environ['MUSIC_BOX_BUILD_DIR'], "output.csv"), spc=os.path.join( settings.BASE_DIR, "dashboard/static/config/camp_data/species.json")): matplotlib.use('agg') (figure, axes) = mpl_helper.make_fig(top_margin=0.6, right_margin=0.8) csv_results_path = csb csv = pandas.read_csv(csv_results_path) titles = csv.columns.tolist() csv.columns = csv.columns.str.strip() subset = csv[['time', str(prop.strip())]] #make unit conversion if needed if plot_units: converter = vectorize( create_unit_converter(model_output_units, plot_units)) if is_density_needed(model_output_units, plot_units): subset[str(prop.strip())] = converter( subset[str(prop.strip())], { 'density': csv['ENV.number_density_air'].iloc[[-1]], 'density units': 'mol/m-3 ' }) else: subset[str(prop.strip())] = converter(subset[str(prop.strip())]) subset.plot(x="time", ax=axes) # set labels and title axes.set_xlabel(r"time / s") name = prop.split('.')[1] if prop.split('.')[0] == 'CONC': if 'myrate__' not in prop.split('.')[1]: axes.set_ylabel("(" + plot_units + ")") axes.set_title(beautifyReaction(name)) #unit converter for tolerance if plot_units: ppm_to_plot_units = create_unit_converter('ppm', plot_units) else: ppm_to_plot_units = create_unit_converter( 'ppm', model_output_units) if is_density_needed('ppm', plot_units): density = float(csv['ENV.number_density_air'].iloc[[-1]]) pp = float(tolerance_dictionary(spc)[name]) du = 'density units' units = 'mol/m-3 ' de = 'density' tolerance = ppm_to_plot_units(pp, {de: density, du: units}) else: pp = float(tolerance_dictionary(spc)[name]) tolerance = ppm_to_plot_units(pp) #this determines the minimum value of the y axis range. minimum value of ymax = tolerance * tolerance_yrange_factor tolerance_yrange_factor = 5 ymax_minimum = tolerance_yrange_factor * tolerance property_maximum = subset[str(prop.strip())].max() if ymax_minimum > property_maximum: axes.set_ylim(-0.05 * ymax_minimum, ymax_minimum) else: name = name.split('__')[1] axes.set_ylabel(r"(mol/m^3 s^-1)") axes.set_title(beautifyReaction(name)) elif prop.split('.')[0] == 'ENV': axes.set_title(sub_props_names(name)) if name == 'temperature': axes.set_ylabel(r"K") elif name == 'pressure': axes.set_ylabel(r"Pa") elif name == 'number_density_air': axes.set_ylabel(r"moles/m^3") # axes.legend() axes.grid(True) axes.get_legend().remove() # Store image in a string buffer buffer = io.BytesIO() figure.savefig(buffer, format='png') plt.close(figure) return buffer