def forward_asn(self, x): # ACTION SELECTION NETWORK - FUZZY INFERENCE w = np.zeros(self.h_asn) m = np.zeros(self.h_asn) for i, rule in enumerate(self.rule_set): j1, j2 = rule[0][0], rule[1][0] # input indexes u1, u2 = self.imf[j1][rule[0][1]], self.imf[j2][ rule[1][1]] # input membership functions w[i] = min(self.D[j1, i] * u1.fuzzify(x[j1]), self.D[j2, i] * u2.fuzzify(x[j2])) w[i] = clip( w[i], 0, 1 ) # Make sure the degree of satisfaction of the rule is between 0 and 1 m[i] = self.omf[rule[2]].defuzzify(w[i]) denominator_temp = sum([self.F[i] * w[i] for i in range(self.h_asn)]) if abs(denominator_temp) < 0.00001: u = 0 else: u = sum([self.F[i] * m[i] * w[i] for i in range(self.h_asn)]) / denominator_temp # ACTION SELECTION NETWORK - NEURAL NETWORK self.z, p = nn_doubly_connected_forward_pass(self.D, self.F, self.E, x) p = clip(p, 0, 1) up = self.o_func(u, p) s = self.k_func(u, up, p) return up, s # NOTE: return u instead of up to bypass the stochastic modification as suggested in the report
def main(): parser = get_parser() options, args = parser.parse_args() default_name = os.environ.get('RCFILE_NAME') default_server = os.environ.get('RCFILE_SERVER') if len(args) == 0: if default_name and default_server: name, server = default_name, default_server else: parser.error('Missing parameters') elif len(args) == 1: arg = args[0] if arg in servers.keys(): if default_name: name, server = default_name, arg else: parser.error('No name specified') elif arg not in servers.keys(): if default_server: name, server = arg, default_server else: parser.error('No server specified') elif len(args) == 2: name, server = args else: parser.error("Incorrect arguments") if server not in servers.keys(): parser.error("Invalid server.") version = options.version if version not in versions: parser.error("Invalid crawl version.") url = servers[server].rcfile(name, version) if options.copy or options.url: if options.copy: clip(url) if options.url: sys.stdout.write(url + '\n') sys.exit() else: request = Request(url) request.add_header('User-agent', USER_AGENT) try: response = urlopen(request) except HTTPError, e: sys.exit('{0} {1}'.format(e.code, e.reason)) for line in response.read(): sys.stdout.write(line)
def draw(self, dt): #if self._mixer.is_onset(): # self.hue_inner = math.fmod(self.hue_inner + self.hue-step(), 1.0) # self.luminance_offset += self.hue-step() self.hue_inner += dt * self.speed() self.wave1_offset += self.wave1_speed() * dt self.wave2_offset += self.wave2_speed() * dt self.luminance_offset += self.luminance_speed() * dt luminance_table = [] luminance = 0.0 for input in range(self.luminance_steps()): if input > self.blackout() * self.luminance_steps(): luminance -= 0.01 luminance = clip(0, luminance, 1.0) elif input < self.whiteout() * self.luminance_steps(): luminance += 0.1 luminance = clip(0, luminance, 1.0) else: luminance -= 0.01 luminance = clip(0.5, luminance, 1.0) luminance_table.append(luminance) luminance_table = np.asarray(luminance_table) wave1_period = self.wave1_period() wave1_amplitude = self.wave1_amplitude() wave2_period = self.wave2_period() wave2_amplitude = self.wave2_amplitude() luminance_scale = self.luminance_scale() wave1 = np.abs( np.cos(self.wave1_offset + self.pixel_angles * wave1_period) * wave1_amplitude) wave2 = np.abs( np.cos(self.wave2_offset + self.pixel_angles * wave2_period) * wave2_amplitude) hues = self.pixel_distances + wave1 + wave2 luminance_indices = np.mod( np.abs( np.int_((self.luminance_offset + hues * luminance_scale) * self.luminance_steps())), self.luminance_steps()) luminances = luminance_table[luminance_indices] hues = np.fmod(self.hue_inner + hues * self.hue_width(), 1.0) self.setAllHLS(hues, luminances, 1.0)
def coord_descent_exp_loss(sum_1_1, sum_1_m1, sum_0_1, sum_0_m1, max_weight): m = 1e-10 # if sum_0_1 + sum_0_m1 == 0 or sum_1_1 + sum_1_m1 == 0: # return np.inf, np.inf # w_l = (sum_0_1 - sum_0_m1) / (sum_0_1 + sum_0_m1) # w_r = (sum_1_1 - sum_1_m1) / (sum_1_1 + sum_1_m1) - w_l # 1e-4 up to 20-50 iters; 1e-6 up to 100-200 iters which leads to a significant slowdown in practice eps_precision = 1e-4 # We have to properly handle the cases when the optimal leaf value is +-inf. if sum_1_m1 < m and sum_0_1 < m: w_l, w_r = -max_weight, 2 * max_weight elif sum_1_1 < m and sum_0_m1 < m: w_l, w_r = max_weight, -2 * max_weight elif sum_1_m1 < m: w_r = max_weight w_l = 0.5 * math.log((math.exp(-w_r) * sum_1_1 + sum_0_1) / (math.exp(w_r) * sum_1_m1 + sum_0_m1)) elif sum_1_1 < m: w_r = -max_weight w_l = 0.5 * math.log((math.exp(-w_r) * sum_1_1 + sum_0_1) / (math.exp(w_r) * sum_1_m1 + sum_0_m1)) elif sum_0_1 < m: w_l = -max_weight w_r = 0.5 * math.log(sum_1_1 / sum_1_m1) - w_l elif sum_0_m1 < m: w_l = max_weight w_r = 0.5 * math.log(sum_1_1 / sum_1_m1) - w_l else: # main case w_r = 0.0 w_l = 0.0 w_r_prev, w_l_prev = np.inf, np.inf i = 0 # Note: ideally one has to calculate the loss, but O(n) factor would slow down everything here while (np.abs(w_r - w_r_prev) > eps_precision) or (np.abs(w_l - w_l_prev) > eps_precision): i += 1 w_r_prev, w_l_prev = w_r, w_l w_r = 0.5 * math.log(sum_1_1 / sum_1_m1) - w_l w_l = 0.5 * math.log((math.exp(-w_r) * sum_1_1 + sum_0_1) / (math.exp(w_r) * sum_1_m1 + sum_0_m1)) if i == 50: break left_leaf = clip(w_l, -max_weight, max_weight) right_leaf = clip(left_leaf + w_r, -max_weight, max_weight) w_l, w_r = left_leaf, right_leaf - left_leaf return w_l, w_r
def fuzzify(self, val): val = abs(val) if self.y_symmetry else val y = self.a * val + self.b if y > 1 and self.sink_beyond_1: return 0 else: return clip(y, 0, 1)
def __call__(self, x, deterministic, train_clip=False, thresh=3): # Alpha is the dropout rate log_alpha = clip(self.log_sigma2 - tf.log(self.W**2 + eps)) # Values of log_alpha that are above the threshold clip_mask = tf.greater_equal(log_alpha, thresh) def true_path(): # For inference # If log_alpha >= thresh, return 0 # If log_alpha < thresh, return tf.matmul(x,self.W) return tf.matmul( x, tf.where(clip_mask, tf.zeros_like(self.W), self.W)) def false_path(): # For training # Sample from a normal distribution centred on tf.matmul(x,W) # and with variance roughly proportional to the size of tf.matmul(x,W)*tf.exp(log_alpha) W = self.W if train_clip: raise NotImplementedError mu = tf.matmul(x, W) si = tf.matmul(x * x, tf.exp(log_alpha) * self.W * self.W) si = tf.sqrt(si + eps) return mu + tf.random_normal(tf.shape(mu), mean=0.0, stddev=1.0) * si h = tf.cond(deterministic, true_path, false_path) return self.nonlinearity(h + self.b)
def basic_case_two_intervals(y, gamma, guaranteed_right, uncertain, sum_1, sum_m1, max_weight): loss_best, w_r_best, w_l_best = np.inf, np.inf, np.inf for sign_w_r in (-1, 1): # Calculate the indicator function based on the known `sign_w_r` ind = guaranteed_right + (y * sign_w_r < 0) * uncertain # Calculate all partial sums sum_1_1, sum_1_m1 = np.sum(ind * (y == 1) * gamma), np.sum( ind * (y == -1) * gamma) sum_0_1, sum_0_m1 = sum_1 - sum_1_1, sum_m1 - sum_1_m1 # Minimizer of w_l, w_r on the current interval w_l, w_r = coord_descent_exp_loss(sum_1_1, sum_1_m1, sum_0_1, sum_0_m1, max_weight) # if w_r is on the different side from 0, then sign_w_r*w_r < 0 => w_r:=0 w_r = sign_w_r * max(sign_w_r * w_r, 0) # If w_r now become 0, we need to readjust w_l if sum_1_m1 != 0 and sum_0_m1 != 0: w_l = 0.5 * math.log((math.exp(-w_r) * sum_1_1 + sum_0_1) / (math.exp(w_r) * sum_1_m1 + sum_0_m1)) w_l = clip(w_l, -max_weight, max_weight) else: # to prevent a division over zero w_l = max_weight * math.copysign( 1, 0.5 * math.log((math.exp(-w_r) * sum_1_1 + sum_0_1))) preds_adv = w_l + w_r * ind loss = np.mean(gamma * np.exp(-y * preds_adv)) # also O(n) if loss < loss_best: loss_best, w_l_best, w_r_best = loss, w_l, w_r return loss_best, w_l_best, w_r_best
def __call__(self, x, deterministic, train_clip=False, thresh=3): # Alpha is the dropout rate log_alpha = clip(self.log_sigma2 - tf.log(self.W**2 + eps)) # Values of log_alpha that are above the threshold clip_mask = tf.greater_equal(log_alpha, thresh) def true_path(): # For inference return tf.nn.conv2d(x, tf.where(clip_mask, tf.zeros_like(self.W), self.W), strides=self.strides, padding=self.padding) def false_path(): # For training W = self.W if train_clip: raise NotImplementedError mu = tf.nn.conv2d(x, W, strides=self.strides, padding=self.padding) si = tf.nn.conv2d(x * x, tf.exp(log_alpha) * W * W, strides=self.strides, padding=self.padding) si = tf.sqrt(si + eps) return mu + tf.random_normal(tf.shape(mu), mean=0.0, stddev=1.0) * si h = tf.cond(deterministic, true_path, false_path) return self.nonlinearity(h + self.b)
def train_step(self, image): with tf.GradientTape() as tape: outputs = self.extractor(image) loss = self.style_content_loss(outputs) grad = tape.gradient(loss, image) self.opt.apply_gradients([(grad, image)]) image.assign(clip(image))
def eval_reg(log_sigma2, W): # Approximates the negative of the KL-divergence according to eqn 14. # This is a key part of the loss function (see eqn 3). k1, k2, k3 = 0.63576, 1.8732, 1.48695 C = -k1 log_alpha = clip(log_sigma2 - tf.log(W**2)) mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p( tf.exp(-log_alpha)) + C return -tf.reduce_sum(mdkl)
def _len2NS(py, ns, getCount): '''(INTERNAL) Check the inital Python C{len} vs the final C{NS...} instance C{count}. ''' n, m = len(py), getCount(ns) if m != n: t = (ns.objc_classname, m, clip(repr(py)), n) raise RuntimeError('%s[%s] vs %s[%s]' % t) return ns
def changeColor(r, g, b, address=0xF): global COLOR global PWM global GPIOMapping_BCM for i in range(0, len(config.LED_PINS)): if ((i+1) & address) != 0: COLOR[i].R = r COLOR[i].G = g COLOR[i].B = b # if lower than min value turn LEDs off r = 0.0 if r<=0 else utils.clip(r, config.MIN_VALUE) g = 0.0 if g<=0 else utils.clip(g, config.MIN_VALUE) b = 0.0 if b<=0 else utils.clip(b, config.MIN_VALUE) setDutyCycle(PWM[config.LED_PINS[i][0]], r) setDutyCycle(PWM[config.LED_PINS[i][1]], g) setDutyCycle(PWM[config.LED_PINS[i][2]], b)
def estimate_noise(diff_img): """ Estimate background noise in the given image. :param diff_img: given image :return: background noise """ arr = diff_img[~np.isnan(diff_img)] arr = arr[np.nonzero(arr)] _, noise_level = clip(arr, nsigma=5) return noise_level
def fixup(self, input): """Restricts the value to the valid range defined by setTop() and setBottom(). Limits the precision as well.""" # restrict the value to the valid range try: input = self.parent().fmt.format( clip(float(input), self.bottom(), self.top())) except ValueError: pass # do nothing if float conversion fails return str(input)
def changeColor(r, g, b, address=0xF): global COLOR global PIGPIO global GPIOMapping_BCM for i in range(0, len(config.LED_PINS)): if ((i + 1) & address) != 0: COLOR[i].R = r COLOR[i].G = g COLOR[i].B = b # if lower than min value turn LEDs off r = 0.0 if r <= 0.0 else utils.clip(r, config.MIN_VALUE) g = 0.0 if g <= 0.0 else utils.clip(g, config.MIN_VALUE) b = 0.0 if b <= 0.0 else utils.clip(b, config.MIN_VALUE) # pigpio works with values between 0-255 PIGPIO.set_PWM_dutycycle(GPIOMapping_BCM[config.LED_PINS[i][0]], r * 255) PIGPIO.set_PWM_dutycycle(GPIOMapping_BCM[config.LED_PINS[i][1]], g * 255) PIGPIO.set_PWM_dutycycle(GPIOMapping_BCM[config.LED_PINS[i][2]], b * 255)
def render(self): self.canvas.delete('all') (faces, colors) = self.map.render() faces, colors = list(faces), list(colors) for i in range(len(faces)): face = [] for (x, y, z) in faces[i]: # Camera position x -= cam.x y -= cam.y z -= cam.z # Camera rotation (x, z) = utils.rotate2D(x, z, cam.yaw) (y, z) = utils.rotate2D(y, z, cam.pitch) face.append((x, y, z)) faces[i] = face #Face clipping faces = [utils.clip(face) for face in faces] #Face sorting order = sorted(range(len(faces)), key=lambda i: utils.calculateDepth(faces[i])) #Face display for i in order: face = faces[i] if len(face) > 0: polygon = [] for (x, y, z) in face: # Projection f = (self.width / 2) / z x = x * f + (self.width / 2) y = -y * f + (self.height / 2) polygon += [x, y] # de = ("%02x"%random.randint(0,255)) # re = ("%02x"%random.randint(0,255)) # we = ("%02x"%random.randint(0,255)) # color= "#" + de + re + we self.canvas.create_polygon(polygon, outline='white', fill=colors[i])
def draw(self, dt): #if self._mixer.is_onset(): # self.hue_inner = math.fmod(self.hue_inner + self.hue-step(), 1.0) # self.luminance_offset += self.hue-step() self.hue_inner += dt * self.speed() self.wave1_offset += self.wave1_speed() * dt self.wave2_offset += self.wave2_speed() * dt self.luminance_offset += self.luminance_speed() * dt luminance_table = [] luminance = 0.0 for input in range(self.luminance_steps()): if input > self.blackout() * self.luminance_steps(): luminance -= 0.01 luminance = clip(0, luminance, 1.0) elif input < self.whiteout() * self.luminance_steps(): luminance += 0.1 luminance = clip(0, luminance, 1.0) else: luminance -= 0.01 luminance = clip(0.5, luminance, 1.0) luminance_table.append(luminance) luminance_table = np.asarray(luminance_table) wave1_period = self.wave1_period() wave1_amplitude = self.wave1_amplitude() wave2_period = self.wave2_period() wave2_amplitude = self.wave2_amplitude() luminance_scale = self.luminance_scale() wave1 = np.abs(np.cos(self.wave1_offset + self.pixel_angles * wave1_period) * wave1_amplitude) wave2 = np.abs(np.cos(self.wave2_offset + self.pixel_angles * wave2_period) * wave2_amplitude) hues = self.pixel_distances + wave1 + wave2 luminance_indices = np.mod(np.abs(np.int_((self.luminance_offset + hues * luminance_scale) * self.luminance_steps())), self.luminance_steps()) luminances = luminance_table[luminance_indices] hues = np.fmod(self.hue_inner + hues * self.hue_width(), 1.0) self.setAllHLS(hues, luminances, 1.0)
def progress(self): if self.type == constants.CONDITION_TIME: return utils.clip((time.time() - self.startTime) / self.time.seconds) if self.type == constants.CONDITION_ITERATE: return (self.startIterations - self.iterations) * 1.0 / self.iterations if self.type == constants.CONDITION_COLOR: return 0 if led.COLOR[0] != self.color else 1 if self.type == constants.CONDITION_BOOL: return 1.0 if self.condition else 0.0
def updatePower(self, x, y): """ Compute the power!!! Measured as the distance from the tip of the turret. """ refDist = 250.0 xTip, yTip = self.tip # Velocity is dependent on the distance the pointer is from the turret. _ = CrudeVec self.power = distance(_(x, y), _(xTip, yTip)) / refDist self.power = clip(1.0, 0.05)(self.power)
def __mul__(self, other): if type(other) is Color: return Color(utils.clip(self.R * other.R), utils.clip(self.G * other.G), utils.clip(self.B * other.B)) if type(other) is float or type(other) is int or type(other) is long: return Color(utils.clip(self.R * other), utils.clip(self.G * other), utils.clip(self.B * other)) raise ValueError("unknown type for multiply operation")
def __add__(self, other): if type(other) is Color: return Color(utils.clip(self.R + other.R), utils.clip(self.G + other.G), utils.clip(self.B + other.B)) if type(other) is float or type(other) is int or type(other) is long: return Color(utils.clip(self.R + other), utils.clip(self.G + other), utils.clip(self.B + other)) raise ValueError("unknown type for add operation " + type(other))
def __sub__(self, other): if type(other) is Color: return Color(utils.clip(self.R - other.R), utils.clip(self.G - other.G), utils.clip(self.B - other.B)) if type(other) is float or type(other) is int or type(other) is long: return Color(utils.clip(self.R - other), utils.clip(self.G - other), utils.clip(self.B - other)) raise ValueError("unknown type for subtract operation " + type(other))
def __div__(self, other): if type(other) is Color: return Color(utils.clip(self.R / other.R), utils.clip(self.G / other.G), utils.clip(self.B / other.B)) if type(other) is float or type(other) is int or type(other) is long: return Color(utils.clip(self.R / other), utils.clip(self.G / other), utils.clip(self.B / other)) raise ValueError("unknown type for division operation")
def dream_process(input_img_np, target_img_np, model, lr, mode, iteration, epoch, device): """Dreaming Iteration Process""" _, _, h, w = input_img_np.shape if h > 400 and w > 400: deep = 8 elif h > 300 and w > 300: deep = 6 elif h > 200 and w > 200: deep = 4 else: deep = 2 for _ in tqdm.tqdm(range(iteration), desc='Epoch ' + str(epoch) + ' :'): input_tensor = torch.from_numpy(input_img_np).type(dtype=torch.float32) input_tensor = input_tensor.to(device) input_tensor.requires_grad = True model.zero_grad() out_feature = model(input_tensor) dst_feature = None if target_img_np is not None: guide_tensor = torch.from_numpy(target_img_np).type( dtype=torch.float32) guide_tensor = guide_tensor.to(device) guide_tensor.requires_grad = False dst_feature = model(guide_tensor) matched_data = match_features_product_loss(out_feature, dst_feature, device) out_feature.backward(matched_data) # Update input tensor with different mode. if mode == "lapnorm": normed_grad = utils.normalize_grad(input_tensor.grad.data, deep=deep, device=device) input_tensor.data.add_(lr * normed_grad) else: avg_grad = np.abs(input_tensor.grad.data.cpu().numpy()).mean() norm_lr = lr / avg_grad input_tensor.data.add_(input_tensor.grad.data * norm_lr) input_tensor.grad.data.zero_() # Convert to numpy for clipping, It is not differentiable. input_img_np = input_tensor.cpu().detach().numpy() input_img_np = utils.clip(input_img_np) return input_img_np
def dream(image, model, iterations, lr): """ Updates the image to maximize outputs for n iterations """ for i in range(iterations): model.zero_grad() out = model(image) loss = out.norm() loss.backward() avg_grad = np.abs(image.grad.data.cpu().numpy()).mean() norm_lr = lr / avg_grad image.data += norm_lr * image.grad.data image.data = clip(image.data) image.grad.data.zero_() return image.cpu().data.numpy()
def dream(image, model, iterations, lr): """ Updates the image to maximize outputs for n iterations """ image = Variable(image, requires_grad=True) for i in range(iterations): model.zero_grad() out = model(image) loss = out.norm() loss.backward() avg_grad = float(image.grad.data.abs().mean()) norm_lr = lr / avg_grad image.data += norm_lr * image.grad.data image.data = clip(image.data) image.grad.data.zero_() return image
def forward(self, model, image, z, d_img=None, mask=None): """Summary Args: model (TYPE): Description image (TYPE): Description z (TYPE): Description d_img (None, optional): Description mask (None, optional): Description Returns: TYPE: Description """ model.zero_grad() if self.config["fp16"]: with torch.cuda.amp.autocast(): out = model(image) else: out = model(image) if self.config["guided"]: target = self.get_target(self.config, z, out) loss = -self.loss(out, target) else: loss = out.norm() loss.backward() avg_grad = np.abs(image.grad.data.cpu().numpy()).mean() norm_lr = self.lr / avg_grad grad = image.grad.data dream_grad = grad * (norm_lr * self.norm_str) if self.depth: d_img = torch.from_numpy(d_img) d_img = d_img[0, 0].to(self.device) dream_grad *= d_img * self.depth_w if mask is not None: mask = torch.from_numpy(mask) dream_grad *= mask.to(self.device) image.data += dream_grad image.data = clip(image.data) image.grad.data.zero_() return image
def dream(image, model, iterations, lr): """ Updates the image to maximize outputs for n iterations """ Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor image = Variable(Tensor(image), requires_grad=True) for i in range(iterations): model.zero_grad() out = model(image) loss = out.norm() loss.backward() avg_grad = np.abs(image.grad.data.cpu().numpy()).mean() norm_lr = lr / avg_grad image.data += norm_lr * image.grad.data image.data = clip(image.data) image.grad.data.zero_() return image.cpu().data.numpy()
def update(self): # get points as percentage of maximum self.pointpc = float(self.points) / self.maxPoints # produce intermediate color pointpc, inverse = self.pointpc, 1 - self.pointpc self.color = ( self.minColor[0] * inverse + self.maxColor[0] * pointpc, self.minColor[1] * inverse + self.maxColor[1] * pointpc, self.minColor[2] * inverse + self.maxColor[2] * pointpc) # not sure if necessary, but in case of rounding errors self.color = map(clip(1, 0), self.color)
def screen_distorsion(self, screen): # Screen shake shake = random.randint(-6, 6) width = 500 + shake height = 700 + shake screen.blit(pygame.transform.scale(screen, (width, height)), (-shake, -shake)) # Screen distortion for _ in range(random.randint(5, 25)): x = random.randint(0, 500) y = random.randint(0, 700) width = random.randint(20, 200) height = random.randint(5, 25) surface = clip(screen, x - width, y - height, width, height) screen.blit( surface, (x + random.randint(-20, 20), y + random.randint(-20, 20)))
def train(dataset): examples, target = dataset.examples, dataset.target N = len(examples) epsilon = 1. / (2 * N) w = [1. / N] * N h, z = [], [] for k in range(K): h_k = L(dataset, w) h.append(h_k) error = sum(weight for example, weight in zip(examples, w) if example[target] != h_k(example)) # Avoid divide-by-0 from either 0% or 100% error rates: error = clip(error, epsilon, 1 - epsilon) for j, example in enumerate(examples): if example[target] == h_k(example): w[j] *= error / (1. - error) w = normalize(w) z.append(math.log((1. - error) / error)) return WeightedMajority(h, z)
def calculate(self): if self.calculate_per_pixel() == False: e = self._calculate_full_energy() if not isinstance(e, ndarray): raise Exception, "Return value of _calculate_full_energy should be of type numpy.ndarray" e.clip(utils.MIN_PIXEL_VALUE, utils.MAX_PIXEL_VALUE) self._energy = e else: h = len(self._image) if h < 1: raise Exception, "Invalid image size" w = len(self._image[0]) for y in range(0, h): for x in range(0, w): e = self._calculate_pixel_energy(x, y) if not isinstance(e, int): raise Exception, "Return value of _calculate_pixel_energy should be of type int" e = utils.clip(e) self._energy[y, x] = e
def ada_boost(dataset, L, K): """[Figure 18.34]""" examples, target = dataset.examples, dataset.target N = len(examples) epsilon = 1 / (2 * N) w = [1 / N] * N h, z = [], [] for k in range(K): h_k = L(dataset, w) h.append(h_k) error = sum(weight for example, weight in zip(examples, w) if example[target] != h_k(example)) # avoid divide-by-0 from either 0% or 100% error rates error = clip(error, epsilon, 1 - epsilon) for j, example in enumerate(examples): if example[target] == h_k(example): w[j] *= error / (1 - error) w = normalize(w) z.append(math.log((1 - error) / error)) return weighted_majority(h, z)
def dream(self, image, iterations, lr, neuron, offset): """ Updates the image to maximize outputs for n iterations """ Tensor = torch.cuda.FloatTensor if torch.cuda.is_available else torch.FloatTensor image = Variable(Tensor(image), requires_grad=True) for i in range(iterations): self.model.zero_grad() out = self.model(image) if neuron is None: loss = out.norm() else: loss = out[:, neuron, :, :].norm() loss.backward() avg_grad = max( np.abs(image.grad.data.cpu().numpy()).mean(), offset) norm_lr = lr / avg_grad image.data += norm_lr * image.grad.data image.data = clip(image.data) image.grad.data.zero_() return image.cpu().data.numpy()
def dream(image, model, iterations, lr, filter=-1): print(type(image), image.shape) #Tensor = torch.cuda.FloatTensor if torch.cuda.is_available else torch.FloatTensor image = torch.from_numpy(image).cuda() image.requires_grad_(True) #image = torch.from_numpy(image).requires_grad_(True).cuda() for i in range(iterations): model.zero_grad() if filter == -1: out = model(image) else: out = model(image)[0][filter] loss = out.norm() loss.backward() avg_grad = np.abs(image.grad.data.cpu().numpy()).mean() norm_lr = lr / avg_grad image.data += norm_lr * image.grad.data image.data = utils.clip(image.data) image.grad.data.zero_() return image.cpu().data.numpy()
def pgd(self, x_nat, x, y, network, loss): """ Perform projected gradient descent from Madry et al 2018 :param x_nat: starting image :param x: starting point for optimization :param y: true label of image :param network: network :param loss: loss function :return: x, the maximum found """ for i in range(self.gradSteps): # if self.cuda: # x = x.cuda() jacobian, ell = utils.get_jacobian(network, copy.deepcopy(x), y, loss, cuda=self.cuda) # get jacobian x += self.alpha * torch.sign(jacobian) # take gradient step # if self.cuda: # x = x.cpu() # x_nat = x_nat.cpu() # xT = x.detach().numpy() xT = x.detach() xT = utils.clip(xT, x_nat.detach() - self.eps, x_nat.detach() + self.eps) xT = torch.clamp(xT, 0, 1) x = xT # xT = np.clip(xT, x_nat.detach().numpy() - self.eps, x_nat.detach().numpy() + self.eps) # x = torch.from_numpy(xT) # if self.cuda: # x = x.cuda() ell = loss(x, y) return x, ell.item()
def dreamchapter(model, img, lr=0.01, iters=20, verbose=True, interval=5, jitter=32, layer_n=10): plt.ion() for i in range(iters): with torch.no_grad(): rx, ry = torch.randint(-jitter, jitter + 1, (2, )) img = torch.roll(img, (rx, ry), (-1, -2)) img.requires_grad_(True) actvs = model(utils.norm(img), layer_n) lss = l2_norm(actvs) lss.backward() with torch.no_grad(): img += lr / torch.abs(img.grad).mean() * img.grad img.grad.zero_() img = utils.clip(img) img = torch.roll(img, (-rx, -ry), (-1, -2)) if verbose and (i % interval == 0): plt.imshow(utils.to_img(img)) plt.title('Partial-Dream[iter#{:04d}]'.format(i)) plt.pause(1e-3) plt.close('all') plt.ioff() return img
def setPin(pin, value): global PIGPIO PIGPIO.set_PWM_dutycycle(GPIOMapping_BCM[pin], utils.clip(value) * 255)
def DPVI(model, T, n_mc, N, batch_size, train_data, sigma, C, optimizer, use_cuda=False): input_dim = model.input_dim for i in range(T): ## Take minibatch minibatch = train_data.sample(batch_size, replace=False) ## Reset optimizer and ELBO optimizer.zero_grad() elbo = 0 ## Draws for mc integration draws = torch.randn(n_mc, input_dim) ## MC integration for likelihood part of ELBO for j in range(n_mc): draw = model.forward(draws[j]) log_likelihood_loss = -1./n_mc*log_likelihood(minibatch.iloc[:, :-1],\ minibatch.iloc[:, -1].astype('double'), draw, use_cuda) elbo += log_likelihood_loss log_likelihood_loss.backward(retain_graph=True) ## Clip and add noise if sigma > 0: noise_w = sigma * C * torch.randn(input_dim) noise_b = sigma * C * torch.randn(input_dim) clip(model, C) g = torch.cat( (model.reparam.weight.grad.data, model.reparam.bias.grad.data), 1).clone() if not torch.all(g.norm(dim=1) < (C + 1e-9)): print(g.norm(dim=1).max()) print(torch.any(torch.isnan(g))) return model model.reparam.weight.grad.add_(noise_w / batch_size) model.reparam.bias.grad.add_(noise_b / batch_size) ## MC integration for prior part of ELBO for j in range(n_mc): draw = model.forward(draws[j]) log_prior_loss = -(batch_size / N) * log_prior(draw) / n_mc elbo += log_prior_loss log_prior_loss.backward(retain_graph=True) ## Add entropy to ELBO entropy = -(batch_size / N) * mvn_entropy(model.reparam) elbo += entropy entropy.backward(retain_graph=True) ## Take step optimizer.step() if i % 10 == 0: sys.stdout.write('\r{}% : ELBO = {}'.format( int(i * 100 / T), -1. * elbo.data.tolist())) if i == T - 1: sys.stdout.write('\rDone : ELBO = {}\n'.format( (-1. * elbo.data.tolist()))) sys.stdout.flush() return model
def setPin(pin, value): global PWM PWM[pin].ChangeDutyCycle(utils.clip(value)*100)
def __init__(self, redFloat_Or_colorString, greenFloat=None, blueFloat=None, address=None): if greenFloat is None or blueFloat is None: self.colorString = string.strip(redFloat_Or_colorString) self.R = 0.0 self.G = 0.0 self.B = 0.0 self.Address = 0 if self.colorString[0] != "{" or self.colorString[len(self.colorString) - 1] != "}": raise ValueError("color must defined within {} brackets" + self.colorString) colorString = self.colorString[1 : len(self.colorString) - 1] colorParts = string.split(colorString, ":") if not (colorParts[0] in ["x", "b", "f", "r", "hsv", "hsl"]): raise ValueError("unknown color type: " + colorParts[0]) # extracting Address if len(colorParts) > 2: self.Address = int(colorParts[2], 16) elif len(colorParts) <= 2: self.Address = 0xF # extracting RGB if colorParts[0] == "x": rgbcomps = utils.getIntComponents(colorParts[1]) self.R = rgbcomps[0] / 255.0 self.G = rgbcomps[1] / 255.0 self.B = rgbcomps[2] / 255.0 if colorParts[0] == "b": rgbcomps = string.split(colorParts[1], ",") self.R = int(rgbcomps[0]) / 255.0 self.G = int(rgbcomps[1]) / 255.0 self.B = int(rgbcomps[2]) / 255.0 if colorParts[0] == "f": rgbcomps = string.split(colorParts[1], ",") self.R = float(rgbcomps[0]) self.G = float(rgbcomps[1]) self.B = float(rgbcomps[2]) if colorParts[0] == "r": rndValues = string.split(colorParts[1], ",") fromRed = float(string.split(rndValues[0], "-")[0]) toRed = float(string.split(rndValues[0], "-")[1]) fromGreen = float(string.split(rndValues[1], "-")[0]) toGreen = float(string.split(rndValues[1], "-")[1]) fromBlue = float(string.split(rndValues[2], "-")[0]) toBlue = float(string.split(rndValues[2], "-")[1]) self.R = utils.randfloat(fromRed, toRed) self.G = utils.randfloat(fromGreen, toGreen) self.B = utils.randfloat(fromBlue, toBlue) if colorParts[0] == "hsv": hsvcomps = string.split(colorParts[1], ",") h = float(hsvcomps[0]) s = float(hsvcomps[1]) v = float(hsvcomps[2]) self.R, self.G, self.B = colorsys.hsv_to_rgb(h / 360.0, s / 100.0, v / 100.0) if colorParts[0] == "hsl": hslcomps = string.split(colorParts[1], ",") h = float(hslcomps[0]) s = float(hslcomps[1]) l = float(hslcomps[2]) self.R, self.G, self.B = colorsys.hls_to_rgb(h / 360.0, l / 100.0, s / 100.0) self.R = utils.clip(self.R) self.G = utils.clip(self.G) self.B = utils.clip(self.B) else: self.R = float(redFloat_Or_colorString) self.G = float(greenFloat) self.B = float(blueFloat) if address is None: self.Address = 0xF else: self.Address = address
def move(self, walls): """ Move the robot while keeping track of all walls. Can update the walls so it only keeps track of the walls present in a neighbouring grid based on the distance the robot travels in one frame. :param walls: :return: """ update = False collision = False j = 0 new_position = self.position if self.velocity_right > self.max_vel: self.velocity_right = round(self.max_vel, 1) if self.velocity_right < -self.max_vel: self.velocity_right = -round(self.max_vel, 1) if self.velocity_left > self.max_vel: self.velocity_left = round(self.max_vel, 1) if self.velocity_left < -self.max_vel: self.velocity_left = -round(self.max_vel, 1) # calculate force sum_vels = self.velocity_right + self.velocity_left sign = -1 if sum_vels < 0 else 1 mag = math.sqrt(self.velocity_right ** 2 + self.velocity_left ** 2) self.force = mag * sign # if wheels are not moving the equal velocity, resolve their movement differently if self.velocity_right != self.velocity_left: # motion model step new_x, new_y, theta = motion.Step(self.velocity_right, self.velocity_left, self.radius * 2, self.position[0], self.position[1], np.radians(self.orientation)) # discrete collision detection and resolution, trying to loop over all walls as many times as necessary until all collision resolved # note that this does not handle collisions with boundary walls - boundary wall collisions take precedence and "break" the simulation # a trivial solution would be to place secondary boundary walls on top (but slightly closer to the inside) of the existing boundary walls and treat them as normal walls # I do not know why I did not do that, but if it becomes necessary, I will add it for i in range(len(walls)): if not update: j += 1 if j >= len(walls): break else: j = 0 update = False wall = walls[j] is_intersection, new_P = physics.resolve_wall_collision(wall[0], wall[1], new_position, self.force, self.radius, self.orientation) if is_intersection: new_position = new_P collision = True # update = True # determine new position after accounting for parallel velocity component self.frame += 1 self.collisions += 1 if collision and self.frame % 2 == 0 else 0 new_position = [new_position[0] + self.force * np.cos(np.radians(self.orientation)), new_position[1] + self.force * np.sin(np.radians( self.orientation))] # utils.rotate(self.position, self.position+[self.velocity_left/2+self.velocity_right/2],np.radians(self.orientation)) # if it moves too quickly, try to resolve continuous collisions # resolve collisions using continuous collision detection - if no collisions, just returns the new_position itself new_position = physics.resolve_past_collision(walls, [], self.position, new_position, self.radius, self.force, self.orientation) # set the robot position to the new position self.position = new_position # utils.rotate(new_position, point_of_rotation, np.radians(self.orientation)) # clip the robot's position to within the boundaries - can do it earlier utils.clip(self.position, [self.radius + 1, self.radius + 1], [config.WIDTH - int(config.HEIGHT / 3) - self.radius - 1, config.HEIGHT - int(config.HEIGHT / 3) - self.radius - 1], self) # set the orientation to the new orientation determined by the motion model self.orientation = np.degrees(theta) # update the sensors when done for sensor in self.sensors: sensor.update_sensor(self.position, np.radians(self.orientation - self.orientation_history[-1]), None) else: # discrete collision detection and resolution, trying to loop over all walls as many times as necessary until all collision resolved # note that this does not handle collisions with boundary walls - boundary wall collisions take precedence and "break" the simulation # a trivial solution would be to place secondary boundary walls on top (but slightly closer to the inside) of the existing boundary walls and treat them as normal walls # I do not know why I did not do that, but if it becomes necessary, I will add it for i in range(len(walls)): if not update: j += 1 if j >= len(walls): break else: j = 0 update = False wall = walls[j] is_intersection, new_P = physics.resolve_wall_collision(wall[0], wall[1], new_position, self.force, self.radius, self.orientation) if is_intersection: new_position = new_P collision = True # update = True collisions = [] # determine new position after accounting for parallel velocity component self.frame += 1 self.collisions += 1 if collision and self.frame % 2 == 0 else 0 new_position = [new_position[0] + self.force * np.cos(np.radians(self.orientation)), new_position[1] + self.force * np.sin(np.radians( self.orientation))] # utils.rotate(self.position, self.position+[self.velocity_left/2+self.velocity_right/2],np.radians(self.orientation)) # if it moves too quickly, try to resolve continuous collisions # resolve collisions using continuous collision detection - if no collisions, just returns the new_position itself new_position = physics.resolve_past_collision(walls, [], self.position, new_position, self.radius, self.force, self.orientation) # set the robot position to the new position self.position = new_position # utils.rotate(new_position, point_of_rotation, np.radians(self.orientation)) # clip the robot's position to within the boundaries - can do it earlier utils.clip(self.position, [self.radius + 1, self.radius + 1], [config.WIDTH - int(config.HEIGHT / 3) - self.radius - 1, config.HEIGHT - int(config.HEIGHT / 3) - self.radius - 1], self) # update the sensors when done for sensor in self.sensors: sensor.update_sensor(self.position, 0, None) # update the robot's orientation accordingly self.rotate() # save position and orientation to their respective history lists self.save_position(self.position) self.save_orientation(self.orientation)