def run_algorithms(list, algorithm): # Creating a sort object sort = Algorithms(list) if algorithm == 'insertionsort' or algorithm == 'insertsort': return sort.InsertSort() elif algorithm == 'selectionsort' or algorithm == 'selectsort': return sort.SelectSort() elif algorithm == 'countingsort' or algorithm == 'countsort': return sort.CountSort() elif algorithm == 'shellsort': return sort.ShellSort() elif algorithm == 'quicksort': return sort.QuickSort(list, 0, len(list)) elif algorithm == 'mergesort': return sort.MergeSort(list) elif algorithm == 'heapsort': return sort.HeapSort(list) elif algorithm == 'radixsort': return sort.RadixSort() else: tools.error(algorithm) sys.exit(2)
def define_optimizer(self, opts): # Get graph: graph = tf.get_default_graph() # Define optimizer: if opts.optimizer == 'adam': optimizer = tf.train.AdamOptimizer(opts.initial_learning_rate) elif opts.optimizer == 'sgd': optimizer = tf.train.GradientDescentOptimizer( opts.initial_learning_rate) elif opts.optimizer == 'momentum': optimizer = tf.train.MomentumOptimizer(opts.initial_learning_rate, opts.momentum) elif opts.optimizer == 'rmsprop': optimizer = tf.train.RMSPropOptimizer(opts.initial_learning_rate, momentum=opts.momentum) else: tools.error('Optimizer not recognized.') # Operation to compute the gradients: L_comb = graph.get_tensor_by_name('L_comb:0') gradients = optimizer.compute_gradients(L_comb) # Operation to apply the gradietns: optimizer.apply_gradients(gradients, name='apply_grads_adam') return gradients
def create_protocol(request): import hashlib if request.method == 'POST': protocol_form = CreateProtocolForm(request.POST) if protocol_form.is_valid(): try: cd = protocol_form.cleaned_data if ProtocolList.objects.filter( name=cd['name'], user_id=request.user.id).exists(): return error('Duplicate record!') protocol = ProtocolList(name=cd['name'], user_id=request.user.id) protocol.save() softwares = request.POST.getlist('software', '') parameters = request.POST.getlist('parameter', '') steps = [] for index, software in enumerate(softwares): if parameters[index]: m = hashlib.md5() m.update(software + ' ' + parameters[index].strip()) steps.append( Protocol(software=software, parameter=parameters[index], parent=protocol.id, hash=m.hexdigest(), user_id=request.user.id)) Protocol.objects.bulk_create(steps) return success('Your protocol have been created!') except Exception, e: return error(e) else: return error(str(protocol_form.errors))
def status(ctx): """Display some informations and statistics""" title("Current status") section("Settings") click.echo("GEOZONES_HOME: {0}".format(ctx.obj["home"])) section("Levels") for level in ctx.obj["levels"]: click.echo("{id}: {label}".format(**level.__dict__)) section("downloads") urls = (level.urls for level in ctx.obj["levels"] if level.urls) urls = set([url for lst in urls for url in lst]) for url in urls: filename = basename(url).strip() click.echo("{0} ... ".format(filename), nl=False) if os.path.exists(os.path.join(DL_DIR, filename)): success("present") else: error("absent") section("coverage") zones = DB() total = 0 properties = ("population", "area", "wikipedia") totals = dict((prop, 0) for prop in properties) def countprop(name): results = zones.aggregate( [ {"$match": {name: {"$exists": True}, "level": {"$in": [l.id for l in ctx.obj["levels"]]}}}, {"$group": {"_id": "$level", "value": {"$sum": 1}}}, ] ) return dict((r["_id"], r["value"]) for r in results) def display_prop(name, count, total): click.echo("\t{0}: ".format(name), nl=False) if count == 0: func = error elif count == total: func = success else: func = warning func("{0}/{1}".format(count, total)) counts = dict((p, countprop(p)) for p in properties) for level in ctx.obj["levels"]: count = zones.count({"level": level.id}) total += count click.echo("{0}: {1}".format(level.id, count)) for prop in properties: prop_count = counts[prop].get(level.id, 0) totals[prop] += prop_count display_prop(prop, prop_count, count) click.secho("TOTAL: {0}".format(total), bold=True) for prop in properties: prop_total = totals[prop] display_prop(prop, prop_total, total)
def process_dataset(self, workdir, db, url, extractor): '''Extract territories from a given file for a given level with a given extractor function''' loaded = 0 filename = join(workdir, basename(url)) with fiona.open('/', vfs='zip://{0}'.format(filename), encoding='utf8') as collection: info('Extracting {0} elements from {1} ({2} {3})'.format( len(collection), basename(filename), collection.driver, to_string(collection.crs) )) for polygon in collection: try: zone = extractor(polygon) if not zone: continue zone['keys'] = dict((k, v) for k, v in zone.get('keys', {}).items() if v is not None) geom = shape(polygon['geometry']) if extractor.simplify: geom = geom.simplify(extractor.simplify) if geom.geom_type == 'Polygon': geom = MultiPolygon([geom]) elif geom.geom_type != 'MultiPolygon': warning('Unsupported geometry type "{0}" for "{1}"'.format(geom.geom_type, zone['name'])) continue zoneid = '/'.join((self.id, zone['code'])) zone.update(_id=zoneid, level=self.id, geom=geom.__geo_interface__) db.find_one_and_replace({'_id': zoneid}, zone, upsert=True) loaded += 1 except Exception as e: error('Error extracting polygon {0}: {1}', polygon['properties'], str(e)) info('Loaded {0} zones for level {1} from file {2}'.format(loaded, self.id, filename)) return loaded
def add_job(request): if request.method == 'POST': job_form = SingleJobForm(request.POST) if job_form.is_valid(): cd = job_form.cleaned_data try: if cd['parameter'].find(';') == -1: cd['parameter'] += ';' protocol = ProtocolList.objects.get(id=cd['protocol']) if protocol.check_owner( request.user.id) or request.user.is_superuser: job = Queue( protocol_id=cd['protocol'], parameter=cd['parameter'], run_dir=get_config('env', 'workspace'), user_id=request.user.id, input_file=cd['input_files'], ) if check_disk_quota_lock(request.user.id): job.save() return success('Successfully added job into queue.') else: return error( 'You have exceed the disk quota limit! Please delete some files!' ) else: return error('You are not owner of the protocol.') except Exception, e: return error(e) return error(str(job_form.errors))
def manage_reference(request): if request.method == 'POST': reference_form = CreateReferenceForm(request.POST) if reference_form.is_valid(): cd = reference_form.cleaned_data if References.objects.filter(user_id=request.user.id, name=cd['name']).exists(): return error('Duplicate record!') ref = References( name=cd['name'], path=cd['path'], user_id=request.user.id, ) ref.save() return success(ref.id) else: return error(str(reference_form.errors)) else: if request.user.is_superuser: reference_list = References.objects.all() else: reference_list = References.objects.filter( user_id=request.user.id).all() return render(request, 'ui/manage_reference.html', {'references': reference_list})
def joint_path(self, p1_flat, p2_flat): # Parameters if self.load_torch: if self.dirmodel == None: tools.error('Missing dirmodel.') dirbase = self.dirmodel + 'tramo3/' [pj_dense_weight, pj_dense_bias] = read_weights_dense(dirbase + 'l1_dense.txt', varname='pj_dense') [pj_bn_mean, pj_bn_variance, pj_bn_offset, pj_bn_scale] = \ read_weights_bn(dirbase+'l2_bn.txt', varname='pj_bn') else: if self.xavier_init: pj_dense_weight = weight_variable_xavier( [768, 256], 768, 256, name='pj_dense_weight') pj_dense_bias = bias_variable([256], name='pj_dense_bias') # pj_bn_mean = bias_variable([256], name='pj_bn_mean') # pj_bn_variance = weight_variable_xavier([256], 256, 256, name='pj_bn_variance') # pj_bn_offset = bias_variable([256], name='pj_bn_offset') # pj_bn_scale = weight_variable_xavier([256], 256, 256, name='pj_bn_scale') else: pj_dense_weight = weight_variable([768, 256], name='pj_dense_weight') pj_dense_bias = bias_variable([256], name='pj_dense_bias') # pj_bn_mean = weight_variable([256], name='pj_bn_mean') # pj_bn_variance = weight_variable([256], name='pj_bn_variance') # pj_bn_offset = weight_variable([256], name='pj_bn_offset') # pj_bn_scale = weight_variable([256], name='pj_bn_scale') pj_bn_mean, \ pj_bn_variance, \ pj_bn_offset, \ pj_bn_scale = self.bn_variables(256, basename='pj_bn_scale') # Probability of keeping units in the dropout layer: keep_prob = tf.placeholder(tf.float32, name='keep_prob') # Model pj_join = tf.concat([p1_flat, p2_flat], 1, name='pj_join') pj_dense = tf.add(tf.matmul(pj_join, pj_dense_weight), pj_dense_bias, name='pj_dense') pj_bn = tf.nn.batch_normalization(pj_dense, pj_bn_mean, pj_bn_variance, pj_bn_offset, pj_bn_scale, BN_EPS, name='pj_bn') pj_relu1 = tf.nn.relu(pj_bn, name='pj_relu1') pj_dropout = tf.nn.dropout(pj_relu1, keep_prob, name='pj_dropout') return pj_dropout
def process_dataset(self, workdir, db, url, extractor): '''Extract territories from a given file for a given level with a given extractor function''' loaded = 0 filename = join(workdir, basename(url)) # Identify the shapefile to avoid multiple file error on GDAL 2 with ZipFile(filename) as z: candidates = [n for n in z.namelist() if n.endswith('.shp')] if len(candidates) != 1: raise ValueError( 'Unable to find a unique shpaefile into {0}'.format( filename)) shp = candidates[0] with fiona.open('/{0}'.format(shp), vfs='zip://{0}'.format(filename), encoding='utf8') as collection: info('Extracting {0} elements from {1} ({2} {3})'.format( len(collection), basename(filename), collection.driver, to_string(collection.crs))) for polygon in collection: try: zone = extractor(polygon) if not zone: continue zone['keys'] = dict( (k, v) for k, v in zone.get('keys', {}).items() if v is not None) geom = shape(polygon['geometry']) if extractor.simplify: geom = geom.simplify(extractor.simplify) if geom.geom_type == 'Polygon': geom = MultiPolygon([geom]) elif geom.geom_type != 'MultiPolygon': warning( 'Unsupported geometry type "{0}" for "{1}"'.format( geom.geom_type, zone['name'])) continue zoneid = '/'.join((self.id, zone['code'])) zone.update(_id=zoneid, level=self.id, geom=geom.__geo_interface__) db.find_one_and_replace({'_id': zoneid}, zone, upsert=True) loaded += 1 except Exception as e: error('Error extracting polygon {0}: {1}', polygon['properties'], str(e)) info('Loaded {0} zones for level {1} from file {2}'.format( loaded, self.id, filename)) return loaded
def body_path(self): # Full image path: x_b = tf.placeholder(tf.float32, shape=[None, 128, 128, 3], name='x_b') # Check we are not applying any correction if loading the Torch model: if self.load_torch and (self.correct_block2 or self.correct_avgpool): tools.error( 'Not posible to load Torch model and apply any correction.') # Parameters if self.load_torch: if self.dirmodel == None: tools.error('Missing dirmodel.') var_dict = self.load_weights_body() else: var_dict = self.random_weights_body() # Model block_configurations = [[2, 1, 1, 0], [2, 1, 1, 0], [2, 1, 1, 0]] # Loop adding all blocks: ini = 0 x = x_b for block_idx in range(3): var_dict_block = tools.get_subdictionary(ini, 8, var_dict) strideW = block_configurations[block_idx][0] strideH = block_configurations[block_idx][1] padW = block_configurations[block_idx][2] padH = block_configurations[block_idx][3] if (not self.correct_block2) and block_idx == 1: x = add_block_v2(x, var_dict_block, strideW, strideH, padW, padH, 2, block_idx + 1) else: x = add_block(x, var_dict_block, strideW, strideH, padW, padH, 2, block_idx + 1) ini = ini + 8 # average pooling if self.correct_avgpool: p2_avgpool = tf.layers.average_pooling2d(x, pool_size=16, strides=1, padding="VALID", name='p2_avgpool') else: p2_avgpool = tf.layers.average_pooling2d(x, pool_size=3, strides=16, padding="VALID", name='p2_avgpool') # flattening body_path_end = tf.reshape(p2_avgpool, [-1, 128], name='body_path_end') return body_path_end
def delete_reference(request): if request.method == 'GET': if 'ref' in request.GET: ref = References.objects.get(id=request.GET['ref']) if ref.check_owner(request.user.id) or request.user.is_superuser: ref.delete() return success('Your reference has been deleted.') else: return error('You are not owner of the reference.') else: return error('Missing parameter.') else: return error('Error Method.')
def evaluate(xvals, coeff, data, variance, spec): """ Evaluate polynomial at xvals, repairing values where variance==0 with data/spec. Parameters: ----------- xvals: 1D float ndarray X values for data. coeff: 1D float ndarray The polynomial coefficients (highest to lowest power). data: 1D float ndarray The data vector to fit. variance: 1D float ndarray The variance vector. spec: 1D float ndarray The spectrum vector. deg: Integer Degree of the polynomial. Returns: -------- fiteval: 1D float ndarray The estimated spectrum Example: -------- See example in polyfunc.fit() """ # Check inputs: nx = np.size(xvals) nd = np.size(data) nv = np.size(variance) ns = np.size(spec) if nx != nd or nx != nv or nx != ns: tools.error("The length of data ({:d}), variance ({:d}), spec ({:d}), " "and/or xvals ({:d}) are incompatible.".format(nd, nv, ns, nx)) # Find values with zero variance: zerov = (variance == 0.0) # Evaluate coefficients: fiteval = np.polyval(coeff, xvals) # Use actual data where varv is 0: fiteval[zerov] = data[zerov] / spec[zerov] return fiteval
def delete_step(request): if request.method == 'GET': if 'id' in request.GET: try: step = Protocol.objects.get(id=int(request.GET['id'])) if step.check_owner( request.user.id) or request.user.is_superuser: step.delete() return success('Your step has been deleted.') else: return error('You are not owner of the step.') except Exception, e: return error(e) else: return error('Unknown parameter.')
def terminate_job(request): if request.method == 'POST': terminate_form = JobManipulateForm(request.POST) if terminate_form.is_valid() or request.user.is_superuser: cd = terminate_form.cleaned_data try: job = Queue.objects.get(id=cd['job']) if job.check_owner(request.user.id): job.terminate_job() return success('Your job will be terminated soon.') else: return error('Your are not the owner of the job.') except Exception, e: return error(e) else: return error(str(terminate_form.errors))
def import_learning(request): if request.session['learning']: if request.session['learning']['a'] != 'no records': learn = Prediction( step_hash=request.session['learning']['hash'], type=request.session['learning']['type'], a=request.session['learning']['a'], b=request.session['learning']['b'], r=request.session['learning']['r'], ) learn.save() return success('Imported.') else: return error('Can not import records!') else: return error('Error')
def on_all_m_click(self, do=0): # 0 = get current memory or 1 = plus memory or 2 = minus memory or 3 = clear memory if do == 3: self.memory = 0.0 elif not self.exp.is_error(): self.update_expression() if do == 0: self.history.out() r = str(self.memory) if r.endswith('.0'): r = r.replace('.0', '') self.exp.put_data_on_exp(r) self.update_monitor() elif do in (1, 2): res = get_result(self.exp.exp, self.ui.radio_bt_1.isChecked()) if error(res): self.history.out() self.last_invalid_exp = self.exp.exp self.exp.set_exp(res) self.update_monitor() else: try: exp_float = float(res) self.memory += exp_float if do == 1 else -exp_float except: self.history.out() self.last_invalid_exp = self.exp.exp self.exp.set_exp(ERRORS[2]) self.update_monitor()
def fetch_learning(request): import urllib2, json query_string = request.GET['hash'] + ',' + request.GET['type'] + ',' + str( get_config('env', 'cpu')) + ',' + str(get_config( 'env', 'memory')) + ',' + str(os_to_int()) api_bus = get_config('ml', 'api') + '/Index/share/q/' + query_string try: req = urllib2.Request(api_bus) res_data = urllib2.urlopen(req) res = json.loads(res_data.read()) session_dict = { 'hash': request.GET['hash'], 'type': request.GET['type'], 'a': res['a'], 'b': res['b'], 'r': res['r'], } request.session['learning'] = session_dict template = loader.get_template('ui/fetch_learning.html') context = RequestContext(request, { 'step': res, }) return success(template.render(context)) except Exception, e: return error(api_bus)
def save_score(self): """Saves the current score""" nick = nick_selection.get_nick() with open(self.SCORES_FILE, 'a') as fh: fh.write(nick+"\t"+str(self.score)) # Send to remote server params = self.REMOTE_SCORES_PARAMS params["do"] = "add" params["nick"] = nick params["score"] = self.score try: r = requests.get(self.REMOTE_SCORES_URL, params=params) if r.status_code != 200 or 'ack' not in r.text: tools.error("Unable to post high score.") except requests.exceptions.ConnectionError as e: tools.error("Network error when submitting high score:"+str(e))
def delete_protocol(request): if request.method == 'GET': if 'id' in request.GET: protocol_parent = ProtocolList.objects.get( id=int(request.GET['id'])) if protocol_parent.check_owner( request.user.id) or request.user.is_superuser: protocol_parent.delete() steps = Protocol.objects.filter(parent=int(request.GET['id'])) steps.delete() return success('Your protocol has been deleted.') else: return error('You are not owner of the protocol.') else: return error('Unknown parameter.') else: return error('Method error.')
def get_learning_result(request): if request.method == 'GET': learning_form = QueryLearningForm(request.GET) if learning_form.is_valid(): cd = learning_form.cleaned_data try: train = Prediction.objects.get(step_hash=cd['stephash'], type=cd['type']) template = loader.get_template('ui/get_learning_result.html') context = RequestContext(request, { 'hit': train, }) return success(template.render(context)) except Exception, e: return error(e) else: return error(str(learning_form.errors))
def __init__(self, name, dict, ast_params): self.ast_params = ast_params self.attributes = [] self.clone_by_ref = False self.cloner_prologue = "" # Is the class concrete? (Default is false.) self.concrete = False self.declAttribute = "" self.derived = [] self.desc = "" self.hide = False self.includes = {} self.inline = {} self.name = name self.super = "" self.super_non_nodes = [] for key in dict: # Catch duplicate keys. if isinstance(key, tuple): (realkey, value) = key error('duplicate key: ' + name + "::" + realkey) if key not in [ 'attributes', 'concrete', 'declAttribute', 'default', 'desc', 'hide', 'inline', 'printer', 'super', 'cloner_prologue', 'clone_by_ref', ]: warning('unknown Node attribute: ' + name + "::" + key) self.__dict__[key] = dict[key] # If we have only one super-class, it has been parsed as a single # value, but we want a list. if (not isinstance(self.super, list)): self.super = [self.super] self.attributes = map(self.attribute_of_dict, self.attributes)
def __init__(self, name, dict, ast_params): self.ast_params = ast_params self.attributes = [] self.clone_by_ref = False self.cloner_prologue = "" # Is the class concrete? (Default is false.) self.concrete = False self.declAttribute = "" self.derived = [] self.desc = "" self.hide = False self.includes = {} self.inline = {} self.name = name self.super = "" self.super_non_nodes = [] for key in dict: # Catch duplicate keys. if isinstance(key, tuple): (realkey, value) = key error("duplicate key: " + name + "::" + realkey) if key not in [ "attributes", "concrete", "declAttribute", "default", "desc", "hide", "inline", "printer", "super", "cloner_prologue", "clone_by_ref", ]: warning("unknown Node attribute: " + name + "::" + key) self.__dict__[key] = dict[key] # If we have only one super-class, it has been parsed as a single # value, but we want a list. if not isinstance(self.super, list): self.super = [self.super] self.attributes = map(self.attribute_of_dict, self.attributes)
def update_parameter(request): if request.method == 'GET': from urllib import unquote update_parameter_form = StepManipulateForm(request.GET) if update_parameter_form.is_valid(): cd = update_parameter_form.cleaned_data step = Protocol.objects.get(id=cd['id']) if (step.check_owner(request.user.id) or request.user.is_superuser) and step.check_parent( cd['parent']): step.update_parameter(unquote(cd['parameter'])) step.save() return success('Your step has been updated.') else: return error('Your are not owner of the step.') else: return error(str(update_parameter_form.errors)) else: return error('Method error')
def load_batch_with_labels(self, opts): # Initialize arrays: im_prep_batch = np.zeros([opts.batch_size, 128, 128, 3], dtype=np.float32) true_labels = np.zeros((opts.batch_size, NDIM_DISC), dtype=np.float32) # Fill the batches: for idx_in_batch in range(opts.batch_size): # Corresponding image index: im_idx = self.curr_batch * opts.batch_size + idx_in_batch if im_idx >= self.n_images_per_epoch: tools.error('Image index over number of images per epoch') # Load images (full and body): im_full = tools.load_images_onepath( self.annotations[self.indexes[im_idx]], opts) # Add one dimension (for batch) im_prep_batch[idx_in_batch, :, :, :] = im_full # Build the batch with the true labels: # Discrete: for cat_idx in range(NCAT_IMAGENET): if tools.category_in_annotation( self.annotations[self.indexes[im_idx]], cat_idx): true_labels[idx_in_batch, cat_idx] = 1 # Update batch index: self.curr_batch = self.curr_batch + 1 # If we have completed a whole epoch, prepare a new one and restart the batch index: if self.curr_batch == self.n_batches_per_epoch: self.curr_batch = 0 self.prepare_epoch(opts) # inputs = [im_full_prep_batch, im_body_prep_batch] # labels = [true_labels_cont, true_labels_disc] inputs = {'im_prep_batch': im_prep_batch} labels = {'true_labels': true_labels} return inputs, labels
def show_step(request): if request.method == 'POST': query_protocol_form = ProtocolManipulateForm(request.POST) if query_protocol_form.is_valid(): cd = query_protocol_form.cleaned_data if request.user.is_superuser: step_list = Protocol.objects.filter(parent=cd['parent']).all() else: step_list = Protocol.objects.filter( parent=cd['parent']).filter(user_id=request.user.id).all() template = loader.get_template('ui/show_steps.html') context = RequestContext(request, { 'step_list': step_list, }) return success(template.render(context)) else: return error(str(query_protocol_form.errors)) else: return error('Method error.')
def show_learning_steps(request): if request.method == 'GET': if 'parent' in request.GET: if request.user.is_superuser: step_list = Protocol.objects.filter( parent=int(request.GET['parent'])).all() else: step_list = Protocol.objects.filter( parent=int(request.GET['parent'])).filter( user_id=request.user.id).all() template = loader.get_template('ui/show_learning_steps.html') context = RequestContext(request, { 'step_list': step_list, }) return success(template.render(context)) else: return error('Wrong parameter.') else: return error('Method error.')
def show_job_log(request): if request.method == 'POST': query_job_form = JobManipulateForm(request.POST) if query_job_form.is_valid(): cd = query_job_form.cleaned_data log_path = os.path.join(get_config('env', 'log'), str(cd['job'])) try: log_file = open(log_path, mode='r') log = log_file.readlines() log.reverse() log = log[:100] # log_content = [line+'<br />' for line in log] log_content = '<br />'.join(log) log_file.close() return success(log_content) except Exception, e: return error(e) else: return error(str(query_job_form.errors))
def call(self, x): prev_prediction = prev_reconstruction = x prev_code = self.code errors = [] for n in range(self.n_loops): prediction = self.fn(self.decode(prev_code)) code = self.fn(self.encode(prev_reconstruction)) reconstruction = self.fn(self.decode(code)) errors.extend([ error(prev_prediction, prediction), error(x, prediction) * 420., error(prev_reconstruction, reconstruction), error(x, reconstruction) * 420., error(prev_code, code) ]) prev_reconstruction = reconstruction prev_prediction = prediction prev_code = code y = self.out(errors) return y
def __init__( self, serial_port, nb_leds, corrections={"r": 1.0, "g": 1.0, "b": 1.0}, brightness=1.0, nb_steps_fading=25 ): """Params: * ser is an open pySerial object * nb_leds is the total number of LEDs * corrections is used to correct the differences between colors intensity * brightness is used to reduce the global brightess * nb_steps_fading is the number of steps to use by default for fading """ self.corrections = {k: float(brightness) * corrections[k] for k in corrections} self.serial_port = serial_port self.ser = serial.Serial(port=self.serial_port, baudrate=115200) if not self.ser.isOpen(): try: self.ser.open() except Exception, e: tools.error("Error opening serial port: " + str(e))
def handle(self): self.data = self.rfile.readline().strip() print "Got from {}:".format(self.client_address[0]) print self.data # ACK self.wfile.write(len(self.data)) # Send to the LEDs try: self.data = json.loads(self.data) try: fading_duration = self.data["fading_duration"] except: fading_duration = 0 self.control.send_colors(self.data["colors"], fading=self.data["fading"], fading_duration=fading_duration) except: tools.error("Invalid packet")
def build_model(opts): if opts.modelname == 'cnn_emotic_1': cnn_builder = cnn_emotic_1.cnn_builder_class(opts.cnn_opts[opts.modelname], opts) else: tools.error('modelname not recognized.') # Define network: if opts.net_arch == 'orig': cnn_builder.define_network() elif opts.net_arch == 'fullpath': cnn_builder.define_fullpath() elif opts.net_arch == 'bodypath': cnn_builder.define_bodypath() else: tools.error('Network architecture not recognized.') # Define loss: if opts.loss_type == 'orig' or opts.loss_type == 'onlycont' or opts.loss_type == 'onlydisc': cnn_builder.define_loss_orig(opts) elif opts.loss_type == 'simple1': cnn_builder.define_loss_simple1() elif opts.loss_type == 'fullpath': cnn_builder.define_loss_fullpath() elif opts.loss_type == 'bodypath': cnn_builder.define_loss_bodypath() else: tools.error('Loss type not recognized.') # Define optimizer: gradients = cnn_builder.define_optimizer(opts) return gradients
def on_equal_click(self): self.update_expression() if not self.exp.is_error(): self.history.out() res = get_result(self.exp.exp, self.ui.radio_bt_1.isChecked()) if self.make_power_enabled: self.on_power_click() if error(res): self.last_invalid_exp = self.exp.exp else: self.history.history.append(self.exp.exp) self.exp.set_exp(res) self.update_monitor()
def show_job_folder(request): import time, base64 if request.method == 'POST': query_job_form = JobManipulateForm(request.POST) if query_job_form.is_valid(): cd = query_job_form.cleaned_data try: job = Queue.objects.get(id=cd['job']) if job.check_owner( request.user.id) or request.user.is_superuser: result_folder = job.get_result() user_path = os.path.join(get_config('env', 'workspace'), str(request.user.id), result_folder) user_files = [] for root, dirs, files in os.walk(user_path): for file_name in files: file_full_path = os.path.join(root, file_name) file_path = file_full_path.replace(user_path+'\\', '')\ .replace(user_path+'/', '').replace(user_path, '') tmp = dict() tmp['name'] = file_path tmp['file_size'] = os.path.getsize(file_full_path) tmp['file_create'] = time.ctime( os.path.getctime(file_full_path)) tmp['trace'] = base64.b64encode( os.path.join(result_folder, file_path)) user_files.append(tmp) template = loader.get_template('ui/show_job_folder.html') context = RequestContext(request, { 'user_files': user_files, }) return success(template.render(context)) else: return error('Your are not the owner of the job.') except Exception, e: return error(e) else: return error(str(query_job_form.errors))
def full_path(self): # Full image path: x_f = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='x_f') # Parameters if self.load_torch: if self.dirmodel == None: tools.error('Missing dirmodel.') var_dict = self.load_weights_full() else: var_dict = self.random_weights_full() # Model block_configurations = [[4, 1, 2, 0], [2, 1, 2, 0], [2, 1, 1, 0], [1, 1, 1, 0], [2, 1, 1, 0], [1, 1, 1, 0], [2, 1, 1, 0], [1, 1, 1, 0]] # Loop adding all blocks: ini = 0 x = x_f for block_idx in range(8): var_dict_block = tools.get_subdictionary(ini, 8, var_dict) strideW = block_configurations[block_idx][0] strideH = block_configurations[block_idx][1] padW = block_configurations[block_idx][2] padH = block_configurations[block_idx][3] x = add_block(x, var_dict_block, strideW, strideH, padW, padH, 1, block_idx + 1) ini = ini + 8 # average pooling p1_avgpool = tf.layers.average_pooling2d(x, pool_size=4, strides=1, padding="VALID", name='p1_avgpool') # flattening full_path_end = tf.reshape(p1_avgpool, [-1, 640], name='full_path_end') return full_path_end
def add_step(request): import hashlib if request.method == 'POST': step_form = CreateStepForm(request.POST) if step_form.is_valid(): cd = step_form.cleaned_data try: protocol = ProtocolList.objects.get(id=cd['parent']) if protocol.check_owner( request.user.id) or request.user.is_superuser: m = hashlib.md5() m.update(cd['software'] + ' ' + cd['parameter'].strip()) step = Protocol(software=cd['software'], parameter=cd['parameter'], parent=cd['parent'], user_id=request.user.id, hash=m.hexdigest()) step.save() return success('Your step have been created.') else: return error('You are not owner of the protocol.') except Exception, e: return error(str(e))
def download_upload_file(request, f): import base64 file_path = os.path.join(get_config('env', 'workspace'), str(request.user.id), 'uploads', base64.b64decode(f.replace('f/', ''))) try: response = FileResponse(open(file_path, 'rb')) response['Content-Type'] = 'application/octet-stream' response['Content-Disposition'] = 'attachment;filename="{0}"'.format( os.path.basename(file_path)) response['Content-Length'] = os.path.getsize(file_path) return response except Exception, e: return error(e)
def predictions_path(self, pj_dropout): # Parameters if self.load_torch: if self.dirmodel == None: tools.error('Missing dirmodel.') dirbase = self.dirmodel + 'tramo3/' [yc_weight, yc_bias] = read_weights_dense(dirbase + 'l6_dense.txt', varname='yc') [yd_weight, yd_bias] = read_weights_dense(dirbase + 'l7_dense.txt', varname='yd') else: if self.xavier_init: yc_weight = weight_variable_xavier([256, NDIM_CONT], 256, NDIM_CONT, name='yc_weight') yc_bias = bias_variable([NDIM_CONT], name='yc_bias') yd_weight = weight_variable_xavier([256, NDIM_DISC], 256, NDIM_DISC, name='yd_weight') yd_bias = bias_variable([NDIM_DISC], name='yd_bias') else: yc_weight = weight_variable([256, NDIM_CONT], name='yc_weight') yc_bias = bias_variable([NDIM_CONT], name='yc_bias') yd_weight = weight_variable([256, NDIM_DISC], name='yd_weight') yd_bias = bias_variable([NDIM_DISC], name='yd_bias') # Model yc = tf.add(tf.matmul(pj_dropout, yc_weight), yc_bias, name='yc') yd = tf.add(tf.matmul(pj_dropout, yd_weight), yd_bias, name='yd') # yc = tf.sigmoid(tf.add(tf.matmul(pj_dropout, yc_weight), yc_bias), name='yc') # yd = tf.sigmoid(tf.add(tf.matmul(pj_dropout, yd_weight), yd_bias), name='yd') return yc, yd
def send_instructions(self, data): """Sends instructions to the LEDs server to display the current configuration params: data is a dict {fading: bool, colors: {}}, cf get_diff """ if len(data["colors"]) == 0: return sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(self.server_address) data = json.dumps(data) + "\n" sock.sendall(data) received = sock.recv(1024) try: if int(received) != len(data.strip()): tools.error("Error while sending instructions for LEDs") except: tools.error("Error while sending instructions for LEDs") finally: sock.close()
def stdextr(data, x1, x2, variance=None, mask=None, interp=False): """ Standard box extraction of spectrum. Step 4 of Horne (1989). Parameters: ----------- data: 2D float ndarray Sky-subtracted spectrum image of shape [nwavelength, nposition]. x1: Integer Left X boundary of region to extract the spectrum. x2: Integer Right X boundary of region to extract the spectrum. Note that: 0 <= x1 <= x2 <= nx variance: 2D float ndarray Variance image from processed image. mask: 2D integer ndarray Mask of the data image (1 = good pixel, 0 = bad pixel). interp: Bool If True, lineraly interpolate the data for bad pixels. Returns: -------- stdspec: 1D float ndarray The extracted spectrum. stdvar: 1D float ndarray Variance of extracted spectrum. Example: -------- >>> import sys >>> import astropy.io.fits as fits >>> import matplotlib.pyplot as plt >>> sys.path.append("./src/") >>> import stdextr as se >>> data = fits.getdata("./images/ex1.fits") >>> spec, sstd = se.stdextr(data, 230, 270) >>> plt.plot(spec) """ # Check inputs: nwave, nx = np.shape(data) if variance is None: variance = np.ones((nwave, nx), np.double) if mask is None: mask = np.ones((nwave, nx), np.byte) if x1 < 0 or x2 <= x1 or nx < x2: tools.error("Invalid x1, x2 boundaries (={:d}, {:d}), the values must " "satisfy:\n 0 <= x1 < x2 <= nx (={:d}).".format(x1, x2, nx)) if np.shape(variance) != (nwave, nx): tools.error("Incompatible shapes between data image ({:d}, {:d}) and " "variance image ({:d}, {:d}).".format(nwave, nx, *np.shape(variance))) if np.shape(mask) != (nwave, nx): tools.error("Incompatible shapes between data image ({:d}, {:d}) and " "mask image ({:d}, {:d}).".format(nwave, nx, *np.shape(mask))) # Interpolate over bad pixels: if interp: stdspec = np.zeros(nwave) for i in np.arange(nwave): bad = np.where(mask[i, x1:x2] == 0) good = np.where(mask[i, x1:x2] == 1) datav = np.copy(data[i, x1:x2]) if len(bad) != 0: interpol = si.interp1d(datav[good], good[0], kind="linear") datav[bad] = interpol(bad[0]) stdspec[i] = np.sum(datav) return stdspec, np.zeros(nwave) # Standard extraction: stdspec = np.sum((data * mask)[:, x1:x2], axis=1) stdvar = np.sum((variance * mask)[:, x1:x2], axis=1) return stdspec, stdvar
def error(self, text): error(text) self.errors += 1
else: print('Game over') self.game_over_animation() self.save_score() return self.score def close(self): """Handle the cleaning, if necessary""" if hasattr(self.game_controller, 'close'): self.game_controller.close() def __exit__(self, type, value, traceback): self.close() if __name__ == "__main__": if len(sys.argv) < 2: tools.error("Usage: "+sys.argv[0]+" SERVER_IP") sys.exit(1) HOST = sys.argv[1] PORT = 4242 try: while True: print("Let's go !") with Game((HOST, PORT), ctrl.Controller) as game: print("Score " + str(game.loop())) except KeyboardInterrupt: print("\nExit…") sys.exit()
def status(ctx): '''Display some informations and statistics''' title('Current status') section('Settings') click.echo('GEOZONES_HOME: {0}'.format(ctx.obj['home'])) section('Levels') for level in ctx.obj['levels']: click.echo('{id}: {label}'.format(**level.__dict__)) section('downloads') urls = (level.urls for level in ctx.obj['levels'] if level.urls) urls = set([url for lst in urls for url in lst]) for url in urls: filename = basename(url).strip() click.echo('{0} ... '.format(filename), nl=False) if os.path.exists(os.path.join(DL_DIR, filename)): success('present') else: error('absent') section('coverage') zones = DB() total = 0 properties = ('population', 'area', 'wikipedia') totals = dict((prop, 0) for prop in properties) def countprop(name): results = zones.aggregate([ {'$match': { name: {'$exists': True}, 'level': {'$in': [l.id for l in ctx.obj['levels']]} }}, {'$group': {'_id': '$level', 'value': {'$sum': 1}}} ]) return dict((r['_id'], r['value']) for r in results) def display_prop(name, count, total): click.echo('\t{0}: '.format(name), nl=False) if count == 0: func = error elif count == total: func = success else: func = warning func('{0}/{1}'.format(count, total)) counts = dict((p, countprop(p)) for p in properties) for level in ctx.obj['levels']: count = zones.count({'level': level.id}) total += count click.echo('{0}: {1}'.format(level.id, count)) for prop in properties: prop_count = counts[prop].get(level.id, 0) totals[prop] += prop_count display_prop(prop, prop_count, count) click.secho('TOTAL: {0}'.format(total), bold=True) for prop in properties: prop_total = totals[prop] display_prop(prop, prop_total, total)
def fit(xvals, data, variance, spec, deg=2): """ Fit data/spectrum using the variances as weights, using a polynomial function. Parameters: ----------- xvals: 1D float ndarray X values for data. data: 1D float ndarray The data vector to fit. variance: 1D float ndarray The variance vector. spec: 1D float ndarray The spectrum vector. deg: Integer Degree of the polynomial. Returns: -------- est: 1D float ndarray The estimated spectrum. coeff: 1D float ndarray The polynomial coefficients (from highest to lowest power). Example: -------- >>> nx = 100 >>> xvals = np.arange(nx, dtype=np.double) >>> prof = ((-(xvals/nx*2 - 1)**2 + 1)*0.75)/nx*2 >>> sbump = 10 >>> spec = np.sin(xvals/nx * np.pi * 6) + sbump >>> gain = 10 >>> basey = profv*specv >>> data = basey + basey * np.random.normal(0.0, 1.0, nx)/gain >>> variance = basey/gain >>> badloc = np.random.randint(0, 50, 4) >>> data[badloc] = np.random.uniform(0, 2*np.amax(data), 4) >>> prof[badloc] = 0 >>> deg = 2 >>> est, coeffv = pf.fit(xvals, data, variance, spec, deg) >>> plt.figure(1) >>> plt.clf() >>> plt.plot(data/spec, 'or') >>> plt.plot(xvals, est, 'b') >>> est = pf.evaluate(xvals, coeffv, data, variance, spec) >>> plot(xvals, est, 'ob') """ # Check inputs: nd = np.size(data) nv = np.size(variance) ns = np.size(spec) nx = np.size(xvals) if nx != nd or nx != nv or nx != ns: tools.error("The length of data ({:d}), variance ({:d}), spec ({:d}), " "and/or xvals ({:d}) are incompatible.".format(nd, nv, ns, nx)) if deg < 0 or nx <= deg: tools.error("Invalid polynomial degree ({:d}), must be: 0 < deg < nx-1 " "= {:d}.".format(deg, nx-1)) # Initial estimate: est = data / spec # Find where the variance has non-zero values: nzerov = (variance != 0.0) # Fit polynomial where variance != 0: merrors = variance/spec**2 # Use all pixels for estimation merrors = np.clip(merrors, 1e-8, np.amax(merrors)) coeff = np.polyfit(xvals, data/spec, deg, w=merrors) est[nzerov] = np.polyval(coeff, xvals[nzerov]) return est, coeff
def run(env, start_response): """Main.""" if 'PATH_INFO' not in env: env['PATH_INFO'] = '/' path = filter(None, os.path.normpath(env['PATH_INFO']).split('/')) if env['REQUEST_METHOD'] == 'GET': if len(path) == 0: path.append('_') dataset, path = path[0], path[1:] try: dataset = int(dataset) if not tools.exists(dataset): raise ValueError except ValueError: ret = tools.error(404, "Invalid dataset ID.") else: if len(path) == 0: path.append('json') handler, command = path[0], path[1:] if handler not in tools.get_handlers: ret = tools.error(501, "Didn't understand (sorry).") else: ret = tools.get_handlers[handler](dataset, command) elif env['REQUEST_METHOD'] == 'POST' or env['REQUEST_METHOD'] == 'PUT': if len(path) == 0: # Create new dataset ret = tools.post_handlers['create'](env) elif len(path) == 1 and path[0] == 'load': # Load from RDF ret = tools.post_handlers['load'](env) elif len(path) == 1: # Update existing dataset try: dataset = int(path[0]) if not tools.exists(dataset): raise ValueError except ValueError: ret = tools.error(404, "Invalid dataset ID.") else: ret = tools.post_handlers['update'](dataset, env) else: # Handlers dataset, handler, command = path[0], path[1], path[2:] try: dataset = int(path[0]) if not tools.exists(dataset): raise ValueError except ValueError: ret = tools.error(404, "Invalid dataset ID.") else: if handler in tools.post_handlers: ret = tools.post_handlers[handler](dataset, env, command) else: ret = tools.error(400, "Bad request.") else: # Unsupported action ret = tools.error(501, "Didn't understand (sorry).") status, headers, content = ret start_response(status, headers) return content