def draw_lab_b_slice(image, b):
    width, height = image.size
    pixels = image.load()
    for x in range(width):
        a = interpolate(-1.0, 1.0, x / width)
        for y in range(height):
            l = interpolate(99.9, 0.0, y / height)
            color = Color.NewFromLab(l, a, b)
            pixels[x,y] = color_to_ints(color)
def draw_lab_l_cylinder(image, l):
    width, height = image.size
    pixels = image.load()
    for x in range(width):
        hue = interpolate(0, 360, x / width)
        for y in range(height):
            chroma = interpolate(1.4, 0.0, y / height)
            a, b = chroma_hue_to_ab(chroma, hue)
            color = Color.NewFromLab(l, a, b)
            pixels[x,y] = color_to_ints(color)
def draw_lab_hue_spoke(image, hue):
    width, height = image.size
    pixels = image.load()
    for x in range(width):
        chroma = interpolate(-1.5, 1.5, x / width)
        for y in range(height):
            l = interpolate(99.9, 0.0, y / height)
            a, b = chroma_hue_to_ab(chroma, hue)
            color = Color.NewFromLab(l, a, b)
            pixels[x,y] = color_to_ints(color)
Beispiel #4
0
 def set(self, percentage):
   """
   Updates this servo's port on the board with the raw PPM signal value
   that corresponds to the given percentage value.
   @param percentage: a servo setting between -100 and 100
   """
   percentage = clamp(percentage, -100, 100)
   raw_ppm = 0
   if percentage < 0:
     raw_ppm = int(interpolate(percentage, -100, 0, self.min, self.center))
   else:
     raw_ppm = int(interpolate(percentage, 0, 100, self.center, self.max))
   self.board.servos[self.id] = raw_ppm
def draw_cylinder_surface(image):
    from limits import max_chroma
    width, height = image.size
    pixels = image.load()
    for x in range(width):
        hue = interpolate(0, 360, x / width)
        for y in range(height):
            l = interpolate(99.9, 0.0, y / height)
            chroma = max_chroma(hue, l)
            if chroma:
                a, b = chroma_hue_to_ab(chroma, hue)
                color = Color.NewFromLab(l, a, b)
            else:
                color = gray
            pixels[x,y] = color_to_ints(color)
Beispiel #6
0
 def set(self, percentage):
     """
 Updates this servo's port on the board with the raw PPM signal value
 that corresponds to the given percentage value.
 @param percentage: a servo setting between -100 and 100
 """
     percentage = clamp(percentage, -100, 100)
     raw_ppm = 0
     if percentage < 0:
         raw_ppm = int(
             interpolate(percentage, -100, 0, self.min, self.center))
     else:
         raw_ppm = int(
             interpolate(percentage, 0, 100, self.center, self.max))
     self.board.servos[self.id] = raw_ppm
Beispiel #7
0
def test_double_step():
    start, end = 1, 0
    steps = 2
    q = list(interpolate(start, end, steps))
    pprint(q)
    assert len(q) == steps
    assert q[0] == start and q[-1] == end
Beispiel #8
0
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
    r = _premerge(repo, toolconf, files, labels=labels)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = {
            'HG_FILE': fcd.path(),
            'HG_MY_NODE': short(mynode),
            'HG_OTHER_NODE': str(fco.changectx()),
            'HG_BASE_NODE': str(fca.changectx()),
            'HG_MY_ISLINK': 'l' in fcd.flags(),
            'HG_OTHER_ISLINK': 'l' in fco.flags(),
            'HG_BASE_ISLINK': 'l' in fca.flags(),
        }

        ui = repo.ui

        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back  # read input from backup, write to original
        replace = {'local': a, 'base': b, 'other': c, 'output': out}
        args = util.interpolate(r'\$', replace, args,
                                lambda s: util.shellquote(util.localpath(s)))
        r = ui.system(toolpath + ' ' + args, cwd=repo.root, environ=env)
        return True, r
    return False, 0
Beispiel #9
0
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
    r = _premerge(repo, toolconf, files, labels=labels)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = {
            "HG_FILE": fcd.path(),
            "HG_MY_NODE": short(mynode),
            "HG_OTHER_NODE": str(fco.changectx()),
            "HG_BASE_NODE": str(fca.changectx()),
            "HG_MY_ISLINK": "l" in fcd.flags(),
            "HG_OTHER_ISLINK": "l" in fco.flags(),
            "HG_BASE_ISLINK": "l" in fca.flags(),
        }

        ui = repo.ui

        args = _toolstr(ui, tool, "args", "$local $base $other")
        if "$output" in args:
            out, a = a, back  # read input from backup, write to original
        replace = {"local": a, "base": b, "other": c, "output": out}
        args = util.interpolate(r"\$", replace, args, lambda s: util.shellquote(util.localpath(s)))
        cmd = toolpath + " " + args
        repo.ui.debug("launching merge tool: %s\n" % cmd)
        r = ui.system(cmd, cwd=repo.root, environ=env)
        repo.ui.debug("merge tool returned: %s\n" % r)
        return True, r
    return False, 0
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
    r = _premerge(repo, toolconf, files, labels=labels)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = {'HG_FILE': fcd.path(),
               'HG_MY_NODE': short(mynode),
               'HG_OTHER_NODE': str(fco.changectx()),
               'HG_BASE_NODE': str(fca.changectx()),
               'HG_MY_ISLINK': 'l' in fcd.flags(),
               'HG_OTHER_ISLINK': 'l' in fco.flags(),
               'HG_BASE_ISLINK': 'l' in fca.flags(),
               }

        ui = repo.ui

        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back # read input from backup, write to original
        replace = {'local': a, 'base': b, 'other': c, 'output': out}
        args = util.interpolate(r'\$', replace, args,
                                lambda s: util.shellquote(util.localpath(s)))
        r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
                        out=ui.fout)
        return True, r
    return False, 0
Beispiel #11
0
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files):
    r = _premerge(repo, toolconf, files)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = dict(HG_FILE=fcd.path(),
                   HG_MY_NODE=short(mynode),
                   HG_OTHER_NODE=str(fco.changectx()),
                   HG_BASE_NODE=str(fca.changectx()),
                   HG_MY_ISLINK='l' in fcd.flags(),
                   HG_OTHER_ISLINK='l' in fco.flags(),
                   HG_BASE_ISLINK='l' in fca.flags())

        ui = repo.ui

        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back # read input from backup, write to original
        replace = dict(local=a, base=b, other=c, output=out)
        args = util.interpolate(r'\$', replace, args,
                                lambda s: '"%s"' % util.localpath(s))
        r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
                        out=ui.fout)
        return True, r
    return False, 0
Beispiel #12
0
def effect(y, num_pixels, row_index):
    """Effect that maps the Mel filterbank frequencies onto the LED strip"""
    global prev_spectrums
    if row_index not in prev_spectrums:
        prev_spectrums[row_index] = np.tile(0.01, num_pixels // 2)

    if row_index not in r_filts:
        r_filts[row_index] = dsp.ExpFilter(np.tile(0.01, num_pixels // 2),
                                           alpha_decay=0.2,
                                           alpha_rise=0.99)
    if row_index not in b_filts:
        b_filts[row_index] = dsp.ExpFilter(np.tile(0.01, num_pixels // 2),
                                           alpha_decay=0.1,
                                           alpha_rise=0.5)
    if row_index not in common_modes:
        common_modes[row_index] = dsp.ExpFilter(np.tile(0.01, num_pixels // 2),
                                                alpha_decay=0.99,
                                                alpha_rise=0.01)

    y = np.copy(util.interpolate(y, num_pixels // 2))
    common_modes[row_index].update(y)
    diff = y - prev_spectrums[row_index]
    prev_spectrums[row_index] = np.copy(y)
    # Color channel mappings
    r = r_filts[row_index].update(y - common_modes[row_index].value)
    g = np.abs(diff)
    b = b_filts[row_index].update(np.copy(y))
    # Mirror the color channels for symmetric output
    r = np.concatenate((r[::-1], r))
    g = np.concatenate((g[::-1], g))
    b = np.concatenate((b[::-1], b))
    output = np.array([r, g, b]) * 255
    return output
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files):
    r = _premerge(repo, toolconf, files)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = dict(HG_FILE=fcd.path(),
                   HG_MY_NODE=short(mynode),
                   HG_OTHER_NODE=str(fco.changectx()),
                   HG_BASE_NODE=str(fca.changectx()),
                   HG_MY_ISLINK='l' in fcd.flags(),
                   HG_OTHER_ISLINK='l' in fco.flags(),
                   HG_BASE_ISLINK='l' in fca.flags())

        ui = repo.ui

        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back  # read input from backup, write to original
        replace = dict(local=a, base=b, other=c, output=out)
        args = util.interpolate(r'\$', replace, args,
                                lambda s: '"%s"' % util.localpath(s))
        r = util.system(toolpath + ' ' + args,
                        cwd=repo.root,
                        environ=env,
                        out=ui.fout)
        return True, r
    return False, 0
Beispiel #14
0
    def visualize(self, board, y):

        y = np.copy(y)
        board.signalProcessor.gain.update(y)
        y /= board.signalProcessor.gain.value
        scale = config.settings["devices"][
            board.board]["effect_opts"]["Energy"]["scale"]
        # Scale by the width of the LED strip
        y *= float((config.settings["devices"][board.board]["configuration"]
                    ["N_PIXELS"] * scale) - 1)
        y = np.copy(
            util.interpolate(
                y, config.settings["devices"][board.board]["configuration"]
                ["N_PIXELS"] // 2))

        # spectrum = np.array([j for i in zip(spectrum, spectrum) for j in i])
        # Color channel mappings
        r = int(
            np.mean(y[:len(y) // 3]**scale) * config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["r_multiplier"])
        g = int(
            np.mean(y[len(y) // 3:2 * len(y) // 3]**scale) *
            config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["g_multiplier"])
        b = int(
            np.mean(y[2 * len(y) // 3:]**scale) * config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["b_multiplier"])
        # Assign color to different frequency regions
        board.visualizer.output[0, :r] = 255
        board.visualizer.output[0, r:] = 0
        board.visualizer.output[1, :g] = 255
        board.visualizer.output[1, g:] = 0
        board.visualizer.output[2, :b] = 255
        board.visualizer.output[2, b:] = 0
        # Apply blur to smooth the edges
        board.visualizer.output[0, :] = gaussian_filter1d(
            board.visualizer.output[0, :],
            sigma=config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["blur"])
        board.visualizer.output[1, :] = gaussian_filter1d(
            board.visualizer.output[1, :],
            sigma=config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["blur"])
        board.visualizer.output[2, :] = gaussian_filter1d(
            board.visualizer.output[2, :],
            sigma=config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["blur"])

        if config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["flip_lr"]:
            p = np.fliplr(board.visualizer.output)
        else:
            p = board.visualizer.output

        if config.settings["devices"][
                board.board]["effect_opts"]["Energy"]["mirror"]:
            p = np.concatenate((p[:, ::-2], p[:, ::2]), axis=1)

        return p
Beispiel #15
0
	def position_at(self, time):
		dt = time - self.src_time

		if dt > 0.0 and self.total_time > 0.0:
			ratio = dt / self.total_time 
			return interpolate(self.src, self.dest, ratio)
		else:
			return self.src
Beispiel #16
0
def process_arc(a: Arc):
    x, y = a.center
    if a.direction == "counterclockwise" and a.start_angle == a.end_angle:
        steps = interpolate(a.end_angle, -a.start_angle, 20)
        for step in steps:
            dx, dy = cos(step), sin(step)
            dx *= a.radius
            dy *= a.radius
            yield round(x + dx, 4), round(y + dy, 4)
Beispiel #17
0
 def fn(ui, *args):
     env = {'HG_ARGS': ' '.join((self.name,) + args)}
     def _checkvar(m):
         if int(m.groups()[0]) <= len(args):
             return m.group()
         else:
             return ''
     cmd = re.sub(r'\$(\d+)', _checkvar, self.definition[1:])
     replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
     replace['0'] = self.name
     replace['@'] = ' '.join(args)
     cmd = util.interpolate(r'\$', replace, cmd)
     return util.system(cmd, environ=env)
Beispiel #18
0
 def fn(ui, *args):
     env = {'HG_ARGS': ' '.join((self.name,) + args)}
     def _checkvar(m):
         if int(m.groups()[0]) <= len(args):
             return m.group()
         else:
             return ''
     cmd = re.sub(r'\$(\d+)', _checkvar, self.definition[1:])
     replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
     replace['0'] = self.name
     replace['@'] = ' '.join(args)
     cmd = util.interpolate(r'\$', replace, cmd)
     return util.system(cmd, environ=env)
Beispiel #19
0
    def n2SP(self, z, nProfile="functional", dataType=""):
        """set the n(z) profile for the south pole, using the functional form. Optional to use the SPICE density profile for depths shallower than 100m"""
        if nProfile == "functional":
            #This is a parameterization based on the SPICE data
            A = 1.78
            B = -.43
            C = -.0132

            func1 = np.full(self.halfZ, 1.0003)

            func2 = A + B * np.exp(C * z[self.halfZ:self.fullZ])

            func3 = np.append(func1, func2)

            n2Vec = func3 * func3

            self.n2Vec = n2Vec

            return n2Vec

        else:
            if (dataType == "phased"):
                return self.n2Phased()
            elif (dataType == "core2"):
                zz, n = np.loadtxt(self.path +
                                   '/share/spice2019_indOfRef_core2_5cm.dat',
                                   unpack=True)
            else:
                zz, n = np.loadtxt(self.path +
                                   '/share/spice2019_indOfRef_core1_5cm.dat',
                                   unpack=True)

            nInterp = n  #for 5cm data, which is default
            if self.nStepsPerMeter != 20:
                nInterp = util.interpolate(
                    n, int(self.nStepsPerMeter /
                           20))  #for anything coarser than 5cm data

            nFlip = np.flip(nInterp, axis=0)

            A = 1.78
            B = -.43
            C = -.0132

            func2 = A + B * np.exp(C * z[int(self.halfZ) + len(nInterp):])

            tmp0 = np.full(int(len(z) / 2), 1.0003)
            tmp1 = np.append(tmp0, nInterp)
            nOut = np.append(tmp1, func2)
            self.n2Vec = nOut * nOut
            return self.n2Vec
Beispiel #20
0
 def visualize(self, board, y):
     y = np.copy(util.interpolate(y, board.config["N_PIXELS"] // 2))
     board.signalProcessor.common_mode.update(y)
     diff = y - board.visualizer.prev_spectrum
     board.visualizer.prev_spectrum = np.copy(y)
     # Color channel mappings
     r = board.signalProcessor.r_filt.update(
         y - board.signalProcessor.common_mode.value)
     g = np.abs(diff)
     b = board.signalProcessor.b_filt.update(np.copy(y))
     r = np.array([j for i in zip(r, r) for j in i])
     output = np.array([
         board.visualizer.multicolor_modes[
             board.effectConfig["Wavelength"]["color_mode"]][0][
                 (board.config["N_PIXELS"] if board.
                  effectConfig["Wavelength"]["reverse_grad"] else 0):
                 (None if board.effectConfig["Wavelength"]["reverse_grad"]
                  else board.config["N_PIXELS"]):] * r,
         board.visualizer.multicolor_modes[
             board.effectConfig["Wavelength"]["color_mode"]][1][
                 (board.config["N_PIXELS"] if board.
                  effectConfig["Wavelength"]["reverse_grad"] else 0):
                 (None if board.effectConfig["Wavelength"]["reverse_grad"]
                  else board.config["N_PIXELS"]):] * r,
         board.visualizer.multicolor_modes[
             board.effectConfig["Wavelength"]["color_mode"]][2][
                 (board.config["N_PIXELS"] if board.
                  effectConfig["Wavelength"]["reverse_grad"] else 0):
                 (None if board.effectConfig["Wavelength"]["reverse_grad"]
                  else board.config["N_PIXELS"]):] * r
     ])
     #board.visualizer.prev_spectrum = y
     board.visualizer.multicolor_modes[
         board.effectConfig["Wavelength"]["color_mode"]] = np.roll(
             board.visualizer.multicolor_modes[
                 board.effectConfig["Wavelength"]["color_mode"]],
             board.effectConfig["Wavelength"]["roll_speed"] *
             (-1
              if board.effectConfig["Wavelength"]["reverse_roll"] else 1),
             axis=1)
     output[0] = gaussian_filter1d(
         output[0], sigma=board.effectConfig["Wavelength"]["blur"])
     output[1] = gaussian_filter1d(
         output[1], sigma=board.effectConfig["Wavelength"]["blur"])
     output[2] = gaussian_filter1d(
         output[2], sigma=board.effectConfig["Wavelength"]["blur"])
     if board.effectConfig["Wavelength"]["flip_lr"]:
         output = np.fliplr(output)
     if board.effectConfig["Wavelength"]["mirror"]:
         output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)
     return output
def make_lab_slices():
    for size in (16, 128, 512, 1024):
        img = Image.new('RGB', (size, size), 'white')

        for l in range(5, 100, 5):
            draw_lab_l_slice(img, l)
            name = 'CIELAB_{}_L{:02d}.png'.format(size, int(l))
            print(name)
            img.save(name)

        for a_index in range(1, 19 + 1):
            a = interpolate(-1.0, 1.0, a_index / 20)
            draw_lab_a_slice(img, a)
            name = 'CIELAB_{}_a{:02d}.png'.format(size, a_index)
            print(name)
            img.save(name)

        for b_index in range(1, 19 + 1):
            b = interpolate(-1.0, 1.0, b_index / 20)
            draw_lab_b_slice(img, b)
            name = 'CIELAB_{}_b{:02d}.png'.format(size, b_index)
            print(name)
            img.save(name)
Beispiel #22
0
 def draw_at(self, P, step):
     """Draw the field vector at the given point."""
     B = self[P]
     if self.avg_mag == 0:
         val = 0
     else:
         #get value in between smallest and largest
         val = 1-((B.mag-self.smallest_mag)\
                  /(self.largest_mag-self.smallest_mag))
     #set size of pointers, radius = step/rad
     rad = 15
     cone(pos = P, axis = B.norm(), radius=step/rad, length=step,
             display = self.scene, color = interpolate(self.color, val),
             opacity = val)
Beispiel #23
0
def inner_product_test():
    print ">> running inner product test ",
    n = 10
    alphas = []
    cs = []
    for i in range(n + 1):
        a = 1.0 * i / n
        c = np.array(util.interpolate(c1, c2, a))
        alphas.append(a)
        cs.append(c)
    for i in range(len(alphas)):
        a = alphas[i]
        p1 = o.inner_product(C1, cs[i])
        p2 = o.inner_product(C2, cs[i])
        print "{0:8f} {1:10f} {2:10f}".format(a, p1, p2)
Beispiel #24
0
def inner_product_test():
   print ">> running inner product test ",
   n = 10
   alphas = []
   cs = []
   for i in range(n+1):
     a = 1.0*i/n
     c = np.array(util.interpolate(c1,c2,a))
     alphas.append(a)
     cs.append(c)
   for i in range(len(alphas)):
     a = alphas[i]
     p1 = o.inner_product(C1,cs[i]) 
     p2 = o.inner_product(C2,cs[i]) 
     print "{0:8f} {1:10f} {2:10f}".format(a,p1,p2)
Beispiel #25
0
 def scale_axis_value(self, value, axis):
     """
 Scales the given value for the given axis to a number from -1 to 1 for
 the stick, and 0 to 1 for the throttle.
 """
     range = None
     low, high = -1.0, 1.0
     if axis == X_AXIS:
         range = self.x_range
     elif axis == Y_AXIS:
         range = self.y_range
     elif axis == T_AXIS:
         range = self.t_range
         low = 0.0
     return interpolate(value, range.low, range.high, low, high)
Beispiel #26
0
 def scale_axis_value(self, value, axis):
     """
 Scales the given value for the given axis to a number from -1 to 1 for
 the stick, and 0 to 1 for the throttle.
 """
     range = None
     low, high = -1.0, 1.0
     if axis == X_AXIS:
         range = self.x_range
     elif axis == Y_AXIS:
         range = self.y_range
     elif axis == T_AXIS:
         range = self.t_range
         low = 0.0
     return interpolate(value, range.low, range.high, low, high)
Beispiel #27
0
    def visualize(self, board, y):
        y = y**4.0
        # signal_processers[board.board].gain.update(y)
        # y /= signal_processers[board.board].gain.value
        # y *= 255.0

        n_pixels = config.settings["devices"][board.board]["configuration"]["N_PIXELS"]
        y = np.copy(util.interpolate(y, n_pixels // 2))
        board.signalProcessor.common_mode.update(y)
        diff = y - board.visualizer.prev_spectrum
        board.visualizer.prev_spectrum = np.copy(y)
        # split spectrum up
        # r = signal_processers[board.board].r_filt.update(y - signal_processers[board.board].common_mode.value)
        # g = np.abs(diff)
        # b = signal_processers[board.board].b_filt.update(np.copy(y))
        y = np.clip(y, 0, 1)
        lows = y[:len(y) // 6]
        mids = y[len(y) // 6: 2 * len(y) // 5]
        high = y[2 * len(y) // 5:]
        # max values
        lows_max = np.max(lows)#*config.settings["devices"][board.board]["effect_opts"]["Scroll"]["lows_multiplier"])
        mids_max = float(np.max(mids))#*config.settings["devices"][board.board]["effect_opts"]["Scroll"]["mids_multiplier"])
        high_max = float(np.max(high))#*config.settings["devices"][board.board]["effect_opts"]["Scroll"]["high_multiplier"])
        # indexes of max values
        # map to colour gradient
        lows_val = (np.array(config.settings["colors"][config.settings["devices"][board.board]["effect_opts"]["Scroll"]["lows_color"]]) * lows_max).astype(int)
        mids_val = (np.array(config.settings["colors"][config.settings["devices"][board.board]["effect_opts"]["Scroll"]["mids_color"]]) * mids_max).astype(int)
        high_val = (np.array(config.settings["colors"][config.settings["devices"][board.board]["effect_opts"]["Scroll"]["high_color"]]) * high_max).astype(int)
        # Scrolling effect window
        speed = config.settings["devices"][board.board]["effect_opts"]["Scroll"]["speed"]
        board.visualizer.output[:, speed:] = board.visualizer.output[:, :-speed]
        board.visualizer.output = (board.visualizer.output * config.settings["devices"][board.board]["effect_opts"]["Scroll"]["decay"]).astype(int)
        board.visualizer.output = gaussian_filter1d(board.visualizer.output, sigma=config.settings["devices"][board.board]["effect_opts"]["Scroll"]["blur"])
        # Create new color originating at the center
        board.visualizer.output[0, :speed] = lows_val[0] + mids_val[0] + high_val[0]
       	board.visualizer.output[1, :speed] = lows_val[1] + mids_val[1] + high_val[1]
        board.visualizer.output[2, :speed] = lows_val[2] + mids_val[2] + high_val[2]
        # Update the LED strip
        #return np.concatenate((vis.prev_spectrum[:, ::-speed], vis.prev_spectrum), axis=1)
        if config.settings["devices"][board.board]["effect_opts"]["Scroll"]["mirror"]:
            p = np.concatenate((board.visualizer.output[:, ::-2], board.visualizer.output[:, ::2]), axis=1)
        else:
            p = board.visualizer.output
        return p

        
Beispiel #28
0
def effect(y):
    global _prev_spectrum
    """Effect that maps the Mel filterbank frequencies onto the LED strip"""
    y = np.copy(util.interpolate(y, config.N_PIXELS // 2))
    common_mode.update(y)
    diff = y - _prev_spectrum
    _prev_spectrum = np.copy(y)
    # Color channel mappings
    r = r_filt.update(y - common_mode.value)
    g = np.abs(diff)
    b = b_filt.update(np.copy(y))
    # Mirror the color channels for symmetric output
    r = np.concatenate((r[::-1], r))
    g = np.concatenate((g[::-1], g))
    b = np.concatenate((b[::-1], b))
    output = np.array([r, g, b]) * 255
    return output
Beispiel #29
0
 def draw_at(self, P, step):
     """Draw the field vector at the given point."""
     B = self[P]
     if self.avg_mag == 0:
         val = 0
     else:
         #get value in between smallest and largest
         val = 1-((B.mag-self.smallest_mag)\
                  /(self.largest_mag-self.smallest_mag))
     #set size of pointers, radius = step/rad
     rad = 15
     cone(pos=P,
          axis=B.norm(),
          radius=step / rad,
          length=step,
          display=self.scene,
          color=interpolate(self.color, val),
          opacity=val)
Beispiel #30
0
 def fn(ui, *args):
     env = {'HG_ARGS': ' '.join((self.name,) + args)}
     def _checkvar(m):
         if m.groups()[0] == '$':
             return m.group()
         elif int(m.groups()[0]) <= len(args):
             return m.group()
         else:
             ui.debug("No argument found for substitution "
                      "of %i variable in alias '%s' definition."
                      % (int(m.groups()[0]), self.name))
             return ''
     cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
     replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
     replace['0'] = self.name
     replace['@'] = ' '.join(args)
     cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True)
     return util.system(cmd, environ=env, out=ui.fout)
Beispiel #31
0
 def fn(ui, *args):
     env = {'HG_ARGS': ' '.join((self.name,) + args)}
     def _checkvar(m):
         if m.groups()[0] == '$':
             return m.group()
         elif int(m.groups()[0]) <= len(args):
             return m.group()
         else:
             ui.debug("No argument found for substitution "
                      "of %i variable in alias '%s' definition."
                      % (int(m.groups()[0]), self.name))
             return ''
     cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
     replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
     replace['0'] = self.name
     replace['@'] = ' '.join(args)
     cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True)
     return util.system(cmd, environ=env, out=ui.fout)
    def solve_nonlinear(self, params, unknowns, resids):

        U0 = params['wind_speed']

        def power_table_LLT(U0):
            v = U0
            if v == 7: return 970.0
            if v == 8: return 1780.0
            if v == 9: return 2770.0
            if v == 10: return 3910.0
            if v == 11: return 5190.0

        if ceil(U0) == floor(U0):
            p = power_table_LLT(U0)
        else:
            p = interpolate(floor(U0), power_table_LLT(floor(U0)), ceil(U0), power_table_LLT(ceil(U0)), U0)

        unknowns['power'] = p
 def interpolation(self, value):
     ii = 0
     lower = []
     upper = []
     if value <= self.x[0]:
         result = self.y[0]
     elif value < self.x[-1]:
         for x in self.x:
             if x <= value:
                 lower = [x, self.y[ii]]
             else:
                 upper = [x, self.y[ii]]
                 break
             ii += 1
         result = interpolate(float(lower[0]), float(lower[1]), float(upper[0]), float(upper[1]), value)
     else:
         result = self.y[-1]
     return result
Beispiel #34
0
    def visualize(self, board, y):
        y = np.copy(util.interpolate(y, board.config["N_PIXELS"] // 2))
        board.signalProcessor.common_mode.update(y)
        diff = y - board.visualizer.prev_spectrum
        board.visualizer.prev_spectrum = np.copy(y)
        # Color channel mappings
        r = board.signalProcessor.r_filt.update(
            y - board.signalProcessor.common_mode.value)
        g = np.abs(diff)
        b = board.signalProcessor.b_filt.update(np.copy(y))

        # Mirror the color channels for symmetric output
        r = np.concatenate((r[::-1], r))
        g = np.concatenate((g[::-1], g))
        b = np.concatenate((b[::-1], b))

        outputGradient = np.array([
            board.visualizer.multicolor_modes[board.effectConfig["Spectrum"][
                "color_mode"]][0][:board.config["N_PIXELS"]],
            board.visualizer.multicolor_modes[board.effectConfig["Spectrum"][
                "color_mode"]][1][:board.config["N_PIXELS"]],
            board.visualizer.multicolor_modes[board.effectConfig["Spectrum"][
                "color_mode"]][2][:board.config["N_PIXELS"]],
        ])

        r = np.multiply(r, outputGradient[0])
        g = np.multiply(g, outputGradient[1])
        b = np.multiply(b, outputGradient[2])

        r = gaussian_filter1d(r,
                              sigma=board.effectConfig["Spectrum"]["blur"] * 2)
        g = gaussian_filter1d(g,
                              sigma=board.effectConfig["Spectrum"]["blur"] * 2)
        b = gaussian_filter1d(b,
                              sigma=board.effectConfig["Spectrum"]["blur"] * 2)

        r = np.minimum(255, np.multiply(r, 2))
        g = np.minimum(255, np.multiply(g, 2))
        b = np.minimum(255, np.multiply(b, 2))

        output = np.array([r, g, b])

        board.visualizer.prev_spectrum = y
        return output
    def solve_nonlinear(self, params, unknowns, resids):

        U0 = params['wind_speed']

        def power_table_LLT(U0):
            v = U0
            if v == 7: return 970.0
            if v == 8: return 1780.0
            if v == 9: return 2770.0
            if v == 10: return 3910.0
            if v == 11: return 5190.0

        if ceil(U0) == floor(U0):
            p = power_table_LLT(U0)
        else:
            p = interpolate(floor(U0), power_table_LLT(floor(U0)), ceil(U0),
                            power_table_LLT(ceil(U0)), U0)

        unknowns['power'] = p
Beispiel #36
0
 def visualize(self, board, y):
     y = np.copy(util.interpolate(y, board.config["N_PIXELS"] // 2))
     board.signalProcessor.common_mode.update(y)
     board.prev_spectrum = np.copy(y)
     # Color channel mappings
     r = board.signalProcessor.r_filt.update(
         y - board.signalProcessor.common_mode.value)
     r = np.array([j for i in zip(r, r) for j in i])
     # Split y into [resulution] chunks and calculate the average of each
     max_values = np.array([
         max(i) for i in np.array_split(
             r, board.effectConfig["Bars"]["resolution"])
     ])
     max_values = np.clip(max_values, 0, 1)
     color_sets = []
     for i in range(board.effectConfig["Bars"]["resolution"]):
         # [r,g,b] values from a multicolour gradient array at [resulution] equally spaced intervals
         color_sets.append([board.visualizer.multicolor_modes[board.effectConfig["Bars"]["color_mode"]]\
                           [j][i*(board.config["N_PIXELS"]//board.effectConfig["Bars"]["resolution"])] for j in range(3)])
     output = np.zeros((3, board.config["N_PIXELS"]))
     chunks = np.array_split(output[0],
                             board.effectConfig["Bars"]["resolution"])
     n = 0
     # Assign blocks with heights corresponding to max_values and colours from color_sets
     for i in range(len(chunks)):
         m = len(chunks[i])
         for j in range(3):
             output[j][n:n + m] = color_sets[i][j] * max_values[i]
         n += m
     board.visualizer.multicolor_modes[
         board.effectConfig["Bars"]["color_mode"]] = np.roll(
             board.visualizer.multicolor_modes[board.effectConfig["Bars"]
                                               ["color_mode"]],
             board.effectConfig["Bars"]["roll_speed"] *
             (-1 if board.effectConfig["Bars"]["reverse_roll"] else 1),
             axis=1)
     if board.effectConfig["Bars"]["flip_lr"]:
         output = np.fliplr(output)
     if board.effectConfig["Bars"]["mirror"]:
         output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)
     return output
Beispiel #37
0
 def visualize(self, board, y):
     #board.effectConfig["Power"]["color_mode"]
     # Bit of fiddling with the y values
     y = np.copy(util.interpolate(y, board.config["N_PIXELS"] // 2))
     board.signalProcessor.common_mode.update(y)
     self.prev_spectrum = np.copy(y)
     # Color channel mappings
     r = board.signalProcessor.r_filt.update(y - board.signalProcessor.common_mode.value)
     r = np.array([j for i in zip(r,r) for j in i])
     output = np.array([board.visualizer.multicolor_modes[board.effectConfig["Power"]["color_mode"]][0, :board.config["N_PIXELS"]]*r,
                        board.visualizer.multicolor_modes[board.effectConfig["Power"]["color_mode"]][1, :board.config["N_PIXELS"]]*r,
                        board.visualizer.multicolor_modes[board.effectConfig["Power"]["color_mode"]][2, :board.config["N_PIXELS"]]*r])
     # if there's a high (eg clap):
     if board.visualizer.current_freq_detects["high"]:
         self.power_brightness = 1.0
         # Generate random indexes
         self.power_indexes = random.sample(range(board.config["N_PIXELS"]), board.config["N_PIXELS"]//6)
         #print("ye")
     # Assign colour to the random indexes
     for index in self.power_indexes:
         output[0, index] = int(config.settings["colors"][board.effectConfig["Power"]["s_color"]][0]*self.power_brightness)
         output[1, index] = int(config.settings["colors"][board.effectConfig["Power"]["s_color"]][1]*self.power_brightness)
         output[2, index] = int(config.settings["colors"][board.effectConfig["Power"]["s_color"]][2]*self.power_brightness)
     # Remove some of the indexes for next time
     self.power_indexes = [i for i in self.power_indexes if i not in random.sample(self.power_indexes, len(self.power_indexes)//4)]
     if len(self.power_indexes) <= 4:
         self.power_indexes = []
     # Fade the colour of the sparks out a bit for next time
     if self.power_brightness > 0:
         self.power_brightness -= 0.05
     # Calculate length of bass bar based on max bass frequency volume and length of strip
     strip_len = int((board.config["N_PIXELS"]//3)*max(y[:int(board.config["N_FFT_BINS"]*0.2)]))
     # Add the bass bars into the output. Colour proportional to length
     output[0][:strip_len] = board.visualizer.multicolor_modes[board.effectConfig["Power"]["color_mode"]][0][strip_len]
     output[1][:strip_len] = board.visualizer.multicolor_modes[board.effectConfig["Power"]["color_mode"]][1][strip_len]
     output[2][:strip_len] = board.visualizer.multicolor_modes[board.effectConfig["Power"]["color_mode"]][2][strip_len]
     if board.effectConfig["Power"]["flip_lr"]:
         output = np.fliplr(output)
     if board.effectConfig["Power"]["mirror"]:
         output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)
     return output
Beispiel #38
0
def Cs(roof, Ct):
    if almost_equal(Ct, 1.0):
        if roof.slippery:
            x1, y1 = 5.0, 1.0
        else:
            x1, y1 = 30.0, 1.0
        x2, y2 = 70.0, 0.0
    elif almost_equal(Ct, 1.1):
        if roof.slippery:
            x1, y1 = 10.0, 1.0
        else:
            x1, y1 = 37.5, 1.0
        x2, y2 = 70.0, 0.0
    elif almost_equal(Ct, 1.2):
        if roof.slippery:
            x1, y1 = 15.0, 1.0
        else:
            x1, y1 = 45.0, 1.0
        x2, y2 = 70.0, 0.0
    else:
        raise ValueError("Ct must be 1.0, 1.1 or 1.2")
    return min(1.0, max(0.0, interpolate(x1, y1, x2, y2, roof.theta())))
Beispiel #39
0
	def calculate_movements(self):
		self.movements = []

		t = self.arrival
		for i in range(len(self.path) - 1):
			m = Movement()
			m.time = t
			m.start = self.path[i]
			m.end = self.path[i+1]
			m.speed = self.speed

			duration = distance(m.start, m.end) / m.speed
			t += duration

			if t > 0:
				if m.time < 0:
					# Interpolate position at t = 0
					ratio   = -m.time / duration
					m.start = interpolate(m.start, m.end, ratio)
					self.arrival = m.time = 0.0

				self.movements.append(m)

		self.leave = t
Beispiel #40
0
def main():
    np.seterr(all='ignore')
    ap = argparse.ArgumentParser()
    ap.add_argument('--data-path', dest='data_path', default="/caps2/tsupinie/")
    ap.add_argument('--exp-name', dest='exp_name', required=True)

    args = ap.parse_args()

    exp_base = args.data_path
    exp_name = args.exp_name
    n_ensemble_members = 40

#   base_time = datetime(2009, 6, 5, 18, 0, 0)
#   epoch = datetime(1970, 1, 1, 0, 0, 0)
#   base_epoch = (base_time - epoch).total_seconds()
#
#   sec_times = np.arange(14400, 18300, 300)
#   times = [ base_time + timedelta(seconds=int(t)) for t in sec_times ]
    temp = goshen_1km_temporal(start=14400)

    bounds = (slice(100, 180), slice(90, 170))
#   bounds = (slice(None), slice(None))

#   proj = setupMapProjection(goshen_1km_proj, goshen_1km_gs, bounds)
#   map = Basemap(**proj)
    grid = goshen_1km_grid(bounds=bounds)

    obs_file_names = ['psu_straka_mesonet.pkl', 'ttu_sticknet.pkl', 'asos.pkl']
    all_obs = loadObs(obs_file_names, temp.getDatetimes(aslist=True), grid, grid.getWidthHeight())

    obs_x, obs_y = grid(all_obs['longitude'], all_obs['latitude'])
    obs_z = all_obs['elevation']

    grdbas_file = "%s/1kmf-%s/ena001.hdfgrdbas" % (exp_base, exp_name)
    grdbas = nio.open_file(grdbas_file, mode='r', format='hdf')
    y_axis = decompressVariable(grdbas.variables['y'])[bounds[1]]
    x_axis = decompressVariable(grdbas.variables['x'])[bounds[0]]

    y_axis = y_axis - y_axis[0]
    x_axis = x_axis - x_axis[0]

#   fcst_files = glob.glob("%s/1km-control-%s/ena???.hdf014[47]00" % (exp_base, exp_name))
#   fcst_files.extend(glob.glob("%s/1km-control-%s/ena???.hdf01[5678]*" % (exp_base, exp_name)))

#   ens, ens_members, ens_times = loadAndInterpolateEnsemble(fcst_files, ['u', 'v', 'pt', 'p', 'qv', 'qr', 'qs', 'qh'], getTempDewpRefl, grdbas_file, 
#       {'z':10}, agl=True, wrap=True)

    ens = loadEnsemble("/caps2/tsupinie/1kmf-%s/" % exp_name, n_ensemble_members, temp.getTimes(), (['u', 'v', 'pt', 'p', 'qv', 'qr', 'qs', 'qh'], getTempDewpRefl), {'sigma':2}, agl=True, wrap=True) 
#   ens = ens[:, :, 2, :, :]

    ens_slice = [ slice(None), slice(None) ]
    ens_slice.extend(bounds[::-1])

    ens_mean = np.empty(ens.shape[1:], dtype=[('t', np.float32), ('td', np.float32), ('u', np.float32), ('v', np.float32)])
    for var in ens_mean.dtype.fields.iterkeys():
        ens_mean[var] = ens[var].mean(axis=0)

    cPickle.dump(ens_mean, open("cold_pool_1kmf-%s.pkl" % exp_name, 'w'), -1)

    ens = ens[tuple(ens_slice)]

    ens_refl = np.maximum(0, ens['refl'].mean(axis=0)) #probMatchMean(ens['refl'])

    ens_obs = np.empty(ens.shape[:1] + all_obs.shape, dtype=ens_mean.dtype)

    for lde in xrange(ens.shape[0]):
        for var in ens_obs.dtype.fields.iterkeys():
            for ob_idx, (ob_x, ob_y) in enumerate(zip(obs_x, obs_y)):
                wdt = temp.getEpochs(aslist=True).index(int(all_obs['nom_time'][ob_idx]))
                ens_obs[var][lde, ob_idx] = interpolate(ens[var][lde, wdt, np.newaxis], {'y':y_axis, 'x':x_axis}, {'y':ob_y, 'x':ob_y})

#   print ens_obs.shape
    ens_obs_std = np.empty(ens_obs.shape[1:], dtype=ens_obs.dtype)
    ens_obs_mean = np.empty(ens_obs.shape[1:], dtype=ens_obs.dtype)

    for var in ens_obs_std.dtype.fields.iterkeys():
        ens_obs_std[var] = ens_obs[var].std(ddof=1, axis=0)
        ens_obs_mean[var] = ens_obs[var].mean(axis=0)

    cPickle.dump(ens_obs_mean, open("cold_pool_obs_1kmf-%s.pkl" % exp_name, 'w'), -1)

#   print ens_obs_std.shape

#   for wdt, (time_sec, time_epoch) in enumerate(zip(temp, temp.getEpochs())):
#       time_ob_idxs = np.where(all_obs['time'] == time_epoch)[0]
#
#       ob_locations = (all_obs[time_ob_idxs]['longitude'], all_obs[time_ob_idxs]['latitude'])
#
#       temp_K = 5. / 9. * (all_obs[time_ob_idxs]['temp'] - 32) + 273.15
#       dewp_K = 5. / 9. * (all_obs[time_ob_idxs]['dewp'] - 32) + 273.15
#
#       wdir = all_obs[time_ob_idxs]['wind_dir']
#       wspd = all_obs[time_ob_idxs]['wind_spd']
#
#       u = -wspd * np.sin(np.radians(wdir))
#       v = -wspd * np.cos(np.radians(wdir))
#
#       print "Plotting temperature ..."
#       plotComparison(ens_mean['t'][wdt], ens_obs_mean['t'][time_ob_idxs], ens_obs_std['t'][time_ob_idxs], temp_K, ob_locations, ens_refl[wdt], grid, np.arange(289., 298., 1.), matplotlib.cm.get_cmap('Blues_r'),
#           "Ensemble Mean/Obs Comparison at Time %s" % time_sec, "cold_pool_t_%s.png" % time_sec)
#       print "Plotting dewpoint ..."
#       plotComparison(ens_mean['td'][wdt], ens_obs_mean['td'][time_ob_idxs], ens_obs_std['td'][time_ob_idxs], dewp_K, ob_locations, ens_refl[wdt], grid, np.arange(277., 290., 1.), matplotlib.cm.get_cmap('YlGn'),
#           "Ensemble Mean/Obs Comparison at Time %s" % time_sec, "cold_pool_td_%s.png" % time_sec)
#
#       print "Plotting u ..."
#       plotComparison(ens_mean['u'][wdt], ens_obs_mean['u'][time_ob_idxs], ens_obs_std['u'][time_ob_idxs], u, ob_locations, ens_refl[wdt], grid, np.arange(-20., 22., 2.), matplotlib.cm.get_cmap('RdBu_r'),
#           "Ensemble Mean/Obs Comparison at Time %s" % time_sec, "cold_pool_u_%s.png" % time_sec)
#       print "Plotting v ..."
#       plotComparison(ens_mean['v'][wdt], ens_obs_mean['v'][time_ob_idxs], ens_obs_std['v'][time_ob_idxs], v, ob_locations, ens_refl[wdt], grid, np.arange(-20., 22., 2.), matplotlib.cm.get_cmap('RdBu_r'),
#           "Ensemble Mean/Obs Comparison at Time %s" % time_sec, "cold_pool_v_%s.png" % time_sec)
    return
Beispiel #41
0
 def _interpolate_axis(self, axis, v):
     i = self._find_interval(v)
     v = rinterpolate(self.intervals[i - 1], self.intervals[i], v)
     return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v)
Beispiel #42
0
def ct_LLT(U0):
    if ceil(U0) == floor(U0):
        return ct_table_LLT(U0)
    else:
        return interpolate(floor(U0), ct_table_LLT(floor(U0)), ceil(U0), ct_table_LLT(ceil(U0)), U0)
Beispiel #43
0
 c2p = [c2[1],c2[2],c2[3],c2[0]]
 d12,R12 = d.distance(c1,c2,p) 
 d12p,R12p = d.distance(c1,c2,p) 
 print "{0:10f}".format(d12)
 print R12
 print "-- permutation --"
 print "{0:10f}".format(d12p)
 print R12p
 print 

 sigmas = [0.1,0.5,1.0]
 print "# triangle test: alpha, d1 d2, ... for sigmas=",sigmas
 n = 10
 for i in range(n+1):
   a = 1.0*i/n
   c = util.interpolate(c1,c2,a)
   print "{0:4g}".format(a),
   for sig in sigmas:
     d1,R1 = d.distance(c1,c,p,sig) 
     d2,R2 = d.distance(c2,c,p,sig)
     print "{0:10f}{1:10f}".format(d1,d2),
   print 
 print 
   

 print "# d vs p: p d12 .. for sigmas=",sigmas
 for p in range(10):
   print "{0:2d}".format(p),
   for sig in sigmas:
     #d11 = compute_distance(s1,s1,p)
     d12,R12 = d.distance(c1,c2,p,sig)
Beispiel #44
0
 def _interpolate_axis(self, axis, v):
     i = self._find_interval(v)
     v = rinterpolate(self.intervals[i-1], self.intervals[i], v)
     return interpolate(self.colors[i-1][axis], self.colors[i][axis], v)
Beispiel #45
0
    print('EVs Overnight', nevs_h)
    print('EVs Work', nevs_w)
    if (nevs_h == 0) or (nevs_w == 0):
        continue
    # Distributions of work and home distances
#    params_h = util.compute_lognorm_cdf(hhome.loc[ss], params=True)
#    params_w = util.compute_lognorm_cdf(hwork.loc[ss], params=True)

# compute base load for worst week, adding 7 days of buffer on each side
    folder_profiles = r'c:\user\U546416\Documents\PhD\Data\Mobilité\Data_Traitee\Conso\SS_profiles\\'
    conso_profile = pd.read_csv(folder_profiles + ss + '.csv',
                                engine='python',
                                index_col=0)
    load = util.interpolate(util.get_max_load_week(conso_profile.squeeze(),
                                                   buffer_before=7,
                                                   buffer_after=1,
                                                   extra_t=1),
                            step=step)
    load = load.iloc[:-1]

    # Create grid
    grid = EVmodel.Grid(name=ss,
                        ndays=ndays,
                        step=step,
                        load=load,
                        ss_pmax=SS.Pmax[ss],
                        verbose=False)
    # Add EVs
    grid.add_evs('Overnight',
                 nevs_h,
                 ev_type=ev_type,
Beispiel #46
0
def go(arg):

    tbw = SummaryWriter(log_dir=arg.tb_dir)

    transform = Compose([
        Lambda(lambda x: CenterCrop(min(x.size))(x)),
        Resize(size=(arg.img_size, arg.img_size)),
        ToTensor()
    ])

    imdir = arg.data_dir + os.sep + 'val2017'
    anfile = arg.data_dir + os.sep + 'annotations' + os.sep + 'captions_val2017.json'

    coco_data = coco.CocoCaptions(root=imdir,
                                  annFile=anfile,
                                  transform=transform)

    ## Make a dictionary

    util.ensure(arg.cache_dir)
    if os.path.isfile(arg.cache_dir + os.sep + 'i2w.pkl'):
        with open(arg.cache_dir + os.sep + 'i2w.pkl', 'rb') as file:
            i2w = pickle.load(file)
        with open(arg.cache_dir + os.sep + 'w2i.pkl', 'rb') as file:
            w2i = pickle.load(file)
        print('Word indices loaded.')
    else:
        print('Creating word indices')  # Why is this so slow?

        dist = Counter()
        for i in tqdm.trange(len(coco_data)):
            for caption in coco_data[i][1]:
                dist.update(util.tokenize(caption))

        vocab = dist.most_common(arg.max_vocab - len(EXTRA_SYMBOLS))

        i2w = EXTRA_SYMBOLS + [w[0] for w in vocab]
        w2i = {word: ix for ix, word in enumerate(i2w)}

        with open(arg.cache_dir + os.sep + 'i2w.pkl', 'wb') as file:
            pickle.dump(i2w, file)
        with open(arg.cache_dir + os.sep + 'w2i.pkl', 'wb') as file:
            pickle.dump(w2i, file)

    vocab_size = len(i2w)
    print('vocabulary size', vocab_size)
    print('top 100 words:', i2w[:100])

    def decode(indices):

        sentence = ''
        for id in indices:
            # if id == PAD:
            #     break
            sentence += i2w[id] + ' '

        return sentence

    ## Set up the models
    embedding = torch.nn.Embedding(num_embeddings=vocab_size,
                                   embedding_dim=arg.embedding_size)

    if arg.mode != Mode.style:
        img_enc = models.ImEncoder(in_size=(arg.img_size, arg.img_size),
                                   zsize=arg.latent_size)
        img_dec = models.ImDecoder(in_size=(arg.img_size, arg.img_size),
                                   zsize=arg.latent_size)

        seq_enc = models.SeqEncoder(vocab_size=vocab_size,
                                    embedding=embedding,
                                    zsize=arg.latent_size)
        seq_dec = models.SeqDecoder(vocab_size=vocab_size,
                                    embedding=embedding,
                                    zsize=arg.latent_size)

        mods = [img_enc, img_dec, seq_enc, seq_dec]
    else:
        img_enc = models.ImEncoder(in_size=(arg.img_size, arg.img_size),
                                   zsize=arg.latent_size)
        img_sty = models.ImEncoder(in_size=(arg.img_size, arg.img_size),
                                   zsize=arg.latent_size)
        img_dec = models.ImDecoder(in_size=(arg.img_size, arg.img_size),
                                   zsize=arg.latent_size * 2)

        seq_enc = models.SeqEncoder(vocab_size=vocab_size,
                                    embedding=embedding,
                                    zsize=arg.latent_size)
        seq_sty = models.SeqEncoder(vocab_size=vocab_size,
                                    embedding=embedding,
                                    zsize=arg.latent_size)
        seq_dec = models.SeqDecoder(vocab_size=vocab_size,
                                    embedding=embedding,
                                    zsize=arg.latent_size * 2)

        mods = [img_enc, img_dec, img_sty, seq_enc, seq_dec, seq_sty]

    if torch.cuda.is_available():
        for model in mods:
            model.cuda()

    #- The standard dataloader approach doesn't seem to work with the captions, so we'll do our own batching.
    #  It's a little slower, probably, but it won't be the bottleneck
    params = []
    for model in mods:
        params.extend(model.parameters())
    optimizer = Adam(params, lr=arg.lr)

    instances_seen = 0

    for e in range(arg.epochs):
        print('epoch', e)
        for fr in tqdm.trange(0, len(coco_data), arg.batch_size):
            if arg.instance_limit is not None and fr > arg.instance_limit:
                break

            to = min(len(coco_data), fr + arg.batch_size)

            images = []
            captions = []

            for i in range(fr, to):
                images.append(coco_data[i][0].unsqueeze(0))
                captions.append(random.choice(
                    coco_data[i]
                    [1]))  # we choose one of the available captions at random

            imbatch = torch.cat(images, dim=0)
            b, c, w, h = imbatch.size()

            capbatch = []  # to integer sequence
            for caption in captions:
                capbatch.append(util.intseq(util.tokenize(caption), w2i))

            capbatch, lengths = util.pad(capbatch)

            # Created shifted versions
            b, s = capbatch.size()

            # Input for the decoder
            cap_teacher = torch.cat(
                [torch.ones(b, 1, dtype=torch.long), capbatch], dim=1)
            cap_out = torch.cat(
                [capbatch, torch.zeros(b, 1, dtype=torch.long)], dim=1)

            lengths = torch.LongTensor(lengths)

            if torch.cuda.is_available():
                imbatch = imbatch.cuda()

                capbatch = capbatch.cuda()
                cap_teacher = cap_teacher.cuda()
                cap_out = cap_out.cuda()

                lengths = lengths.cuda()

            imbatch = Variable(imbatch)
            capbatch = Variable(capbatch)
            cap_teacher = Variable(cap_teacher)
            cap_out = Variable(cap_out)
            lengths = Variable(lengths)

            zimg = img_enc(imbatch)
            zcap = seq_enc(capbatch, lengths)

            kl_img = util.kl_loss(*zimg)
            kl_cap = util.kl_loss(*zcap)

            zimg_sample = util.sample(*zimg)
            zcap_sample = util.sample(*zcap)

            if arg.mode == Mode.style:
                zimg_sty = img_sty(imbatch)
                zcap_sty = seq_sty(capbatch, lengths)

                kl_img_sty = util.kl_loss(*zimg_sty)
                kl_cap_sty = util.kl_loss(*zcap_sty)

                zimg_sample_sty = util.sample(*zimg_sty)
                zcap_sample_sty = util.sample(*zcap_sty)

                zimg_sample = torch.cat([zimg_sample, zimg_sample_sty], dim=1)
                zcap_sample = torch.cat([zcap_sample, zcap_sample_sty], dim=1)

            rec_imgimg = img_dec(zimg_sample)
            rl_imgimg = binary_cross_entropy(rec_imgimg, imbatch,
                                             reduce=False).view(b,
                                                                -1).sum(dim=1)

            rec_capcap = seq_dec(zcap_sample, cap_teacher,
                                 lengths + 1).transpose(1, 2)
            rl_capcap = nll_loss(rec_capcap, cap_out,
                                 reduce=False).view(b, -1).sum(dim=1)

            if arg.mode != Mode.independent:
                rec_capimg = img_dec(zcap_sample)
                rl_capimg = binary_cross_entropy(rec_capimg,
                                                 imbatch,
                                                 reduce=False).view(
                                                     b, -1).sum(dim=1)

                rec_imgcap = seq_dec(zimg_sample, cap_teacher,
                                     lengths + 1).transpose(1, 2)
                rl_imgcap = nll_loss(rec_imgcap, cap_out,
                                     reduce=False).view(b, -1).sum(dim=1)

            loss_img = rl_imgimg + kl_img
            loss_cap = rl_capcap + kl_cap

            if arg.mode == Mode.coupled:
                loss_img = loss_img + rl_capimg + kl_img
                loss_cap = loss_cap + rl_imgcap + kl_cap

            if arg.mode == Mode.style:
                loss_img = loss_img + kl_img_sty
                loss_cap = loss_cap + kl_cap_sty

            loss = loss_img.mean() + loss_cap.mean()

            #- backward pass
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            instances_seen += b

            tbw.add_scalar('score/img/kl', float(kl_img.mean()),
                           instances_seen)
            tbw.add_scalar('score/imgimg/rec', float(rl_imgimg.mean()),
                           instances_seen)
            tbw.add_scalar('score/cap/kl', float(kl_cap.mean()),
                           instances_seen)
            tbw.add_scalar('score/capcap/rec', float(rl_capcap.mean()),
                           instances_seen)
            tbw.add_scalar('score/loss', float(loss), instances_seen)

            if arg.mode != Mode.independent:
                tbw.add_scalar('score/capimg/rec', float(rl_capimg.mean()),
                               instances_seen)
                tbw.add_scalar('score/imgcap/rec', float(rl_imgcap.mean()),
                               instances_seen)

        # Interpolate
        zpairs = []
        for r in range(REP):

            print('Interpolation, repeat', r)

            l = arg.latent_size if arg.mode != Mode.style else arg.latent_size * 2
            z1, z2 = torch.randn(2, l)
            if torch.cuda.is_available():
                z1, z2 = z1.cuda(), z2.cuda()

            zpairs.append((z1, z2))

            zs = util.slerp(z1, z2, 10)

            print('== sentences (temp={}) =='.format(TEMPS[r]))
            sentences = seq_dec.sample(z=zs, temperature=TEMPS[r])

            for s in sentences:
                print('   ', decode(s))

        print('== images ==')

        util.interpolate(zpairs, img_dec, name='interpolate.{}'.format(e))
Beispiel #47
0
idx_ini = int((av_w_ini-12) * 60 / step)
idx_end = int((av_w_end-12) * 60 / step)


#%% Base load params
t.append(time.time())
baseload = False
shift = 12
max_load = 5
if baseload:
    load = pd.read_csv(r'c:\user\U546416\Documents\PhD\Data\Mobilité\Data_Base\Conso\conso-inf36_profiles.csv',
                       engine='python', index_col=0)
    load = load['RES1 (+ RES1WE)'] / load['RES1 (+ RES1WE)'].max() * max_load
    load.index = pd.to_datetime(load.index)
    load = util.get_max_load_week(load)
    load = util.interpolate(load, step=step, method='polynomial', order=3)
    n = int(60/step)*24
    
    load = load[int(n*(3-shift/24)):int(n*(4-shift/24))]
else:
    load=0
t.append(time.time())                              
print('More params, t={:.2f} seg'.format(t[-1]-t[-2]))
#%% Evaluation params:

ch_company, dn_company = flex_payment_funcs.get_ev_profs(grid, nameset='Company')
ch_comm_h, dn_comm_h = flex_payment_funcs.get_ev_profs(grid, nameset='Commuter_HP')
ch_comm_l, dn_comm_l = flex_payment_funcs.get_ev_profs(grid, nameset='Commuter_LP')
(nevs, ndays, nsteps) = ch_comm_h.shape

# This is to include (or not) the baseload into the EV baseline computation
Beispiel #48
0
    def visualize(self, board, y):
        maxMel = np.argmax(y)
        r, g, b = colorsys.hsv_to_rgb(maxMel / len(y), 1, 1)
        # Scrolling effect window
        speed = config.settings["devices"][
            board.board]["effect_opts"]["FreqEnergy"]["speed"]
        board.visualizer.output[:,
                                speed:] = board.visualizer.output[:, :-speed]
        board.visualizer.output = (
            board.visualizer.output * config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["decay"]).astype(int)
        board.visualizer.output = gaussian_filter1d(
            board.visualizer.output,
            sigma=config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["scrollBlur"])

        board.visualizer.output[0, :speed] = int(r * 255)
        board.visualizer.output[1, :speed] = int(g * 255)
        board.visualizer.output[2, :speed] = int(b * 255)

        y = np.copy(y)
        board.signalProcessor.gain.update(y)
        y /= board.signalProcessor.gain.value
        scale = config.settings["devices"][
            board.board]["effect_opts"]["FreqEnergy"]["scale"]
        # Scale by the width of the LED strip
        y *= float((config.settings["devices"][board.board]["configuration"]
                    ["N_PIXELS"] * scale) - 1)
        y = np.copy(
            util.interpolate(
                y, config.settings["devices"][board.board]["configuration"]
                ["N_PIXELS"] // 2))

        meanOrMax = np.mean if config.settings["devices"][
            board.board]["effect_opts"]["FreqEnergy"]["mean"] else np.max

        if config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["splitRGB"]:
            r = int(
                meanOrMax(y[:len(y) // 3]**scale) * config.settings["devices"][
                    board.board]["effect_opts"]["FreqEnergy"]["r_multiplier"])
            g = int(
                meanOrMax(y[len(y) // 3:2 * len(y) // 3]**scale) *
                config.settings["devices"][
                    board.board]["effect_opts"]["FreqEnergy"]["g_multiplier"])
            b = int(
                meanOrMax(y[2 * len(y) // 3:]**scale) *
                config.settings["devices"][
                    board.board]["effect_opts"]["FreqEnergy"]["b_multiplier"])
        else:
            r = g = b = int(meanOrMax(y**scale))

        p = np.copy(board.visualizer.output)

        p[0, r:] = 0
        p[1, g:] = 0
        p[2, b:] = 0

        # Apply blur to smooth the edges
        p[0, :] = gaussian_filter1d(
            p[0, :],
            sigma=config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["blur"])
        p[1, :] = gaussian_filter1d(
            p[1, :],
            sigma=config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["blur"])
        p[2, :] = gaussian_filter1d(
            p[2, :],
            sigma=config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["blur"])

        if config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["flip_lr"]:
            p = np.fliplr(p)

        if config.settings["devices"][
                board.board]["effect_opts"]["FreqEnergy"]["mirror"]:
            p = np.concatenate((p[:, ::-2], p[:, ::2]), axis=1)

        return p
Beispiel #49
0
    c2p = [c2[1], c2[2], c2[3], c2[0]]
    d12, R12 = d.distance(c1, c2, p)
    d12p, R12p = d.distance(c1, c2, p)
    print "{0:10f}".format(d12)
    print R12
    print "-- permutation --"
    print "{0:10f}".format(d12p)
    print R12p
    print

    sigmas = [0.1, 0.5, 1.0]
    print "# triangle test: alpha, d1 d2, ... for sigmas=", sigmas
    n = 10
    for i in range(n + 1):
        a = 1.0 * i / n
        c = util.interpolate(c1, c2, a)
        print "{0:4g}".format(a),
        for sig in sigmas:
            d1, R1 = d.distance(c1, c, p, sig)
            d2, R2 = d.distance(c2, c, p, sig)
            print "{0:10f}{1:10f}".format(d1, d2),
        print
    print

    print "# d vs p: p d12 .. for sigmas=", sigmas
    for p in range(10):
        print "{0:2d}".format(p),
        for sig in sigmas:
            #d11 = compute_distance(s1,s1,p)
            d12, R12 = d.distance(c1, c2, p, sig)
            #d22 = compute_distance(s2,s2,p)
def power_LLT(U0):
    if ceil(U0) == floor(U0):
        return power_table_LLT(U0)
    else:
        return interpolate(floor(U0), power_table_LLT(floor(U0)), ceil(U0), power_table_LLT(ceil(U0)), U0)
def experiment():
    model_config: HParams = configs.get_config(FLAGS.model_cfgset)().parse(
        FLAGS.model_cfgs)
    model = models.get_model(FLAGS.model)(FLAGS.dir,
                                          FLAGS.id,
                                          model_config,
                                          training=False,
                                          ckpt=FLAGS.ckpt)

    if FLAGS.natural:
        # If natural images, use miniImageNet.
        logging.info("#######################################################")
        logging.info("#########Natural Images, using mini-ImageNet###########")
        logging.info("#######################################################")
        if FLAGS.sample:
            logging.info(
                "=======================================================")
            logging.info(
                "============Sampling examples from datasets============")
            logging.info(
                "=======================================================")
            logging.info(
                "=====Sampling Decodings of mini-ImageNet Examples...=====")
            sample_dataset_config: HParams = configs.get_config(
                "miniimagenet/sachinravi_test")().parse("")
            gen_dataset_proto = datasets.get_dataset("miniimagenet")(
                FLAGS.data_dir, sample_dataset_config)
            sample_dataset = gen_dataset_proto.load(repeat=False)

            model.test(sample_dataset,
                       "full-eval-mii",
                       40,
                       generation_length=100)

            logging.info("=====Sampling Decodings of Sketchy Examples...=====")

            sample_dataset_config: HParams = configs.get_config("sketchy")(
            ).parse("split=msl100_84_noclash_noflip,shuffle=False")
            gen_dataset_proto = datasets.get_dataset("sketchy")(
                FLAGS.data_dir, sample_dataset_config)
            sample_dataset = gen_dataset_proto.load(repeat=False)

            model.test(sample_dataset,
                       "full-eval-sketchy",
                       20,
                       generation_length=100)

        if FLAGS.usfs:
            logging.info(
                "=======================================================")
            logging.info(
                "==========Unsupervised few-shot mini-ImageNet==========")
            logging.info(
                "=======================================================")

            for cmodel_type in ["lr_fs"]:
                cmodel_config = configs.get_config("lr_fs")().parse("")
                cmodel = models.get_model(cmodel_type)(FLAGS.dir, FLAGS.id,
                                                       cmodel_config)
                for split in ["sachinravi"]:
                    for setup in [
                            "5way1shot", "5way5shot", "5way20shot",
                            "5way50shot"
                    ]:
                        usfs_dataset_config = configs.get_config(
                            "miniimagenet/{}_test/{}".format(
                                split, setup))().parse("")
                        usfs_dataset = datasets.get_dataset("miniimagenet")(
                            FLAGS.data_dir, usfs_dataset_config)

                        logging.info(
                            "===== Running Unsupervised Few-shot | linear_head: %s | split: %s | %s  ======",
                            cmodel_type, split, setup)
                        cmodel.episode(model, usfs_dataset, 1000)

        if FLAGS.checkpoint:
            logging.info("================================================")
            logging.info("==========Performing checkpoint sweep.==========")
            logging.info("================================================")
            ckpts_dir = os.path.join(FLAGS.dir, FLAGS.id, "checkpoints")
            ckpts = os.listdir(
                os.path.join(ckpts_dir,
                             os.listdir(ckpts_dir)[0]))

            ckpts = list(filter(lambda x: x.endswith(".index"), ckpts))
            ckpt_ids = [
                str(y) for y in sorted(
                    list(
                        map(lambda x: int(x.split(".")[0].split("-")[-1]),
                            ckpts)))
            ]

            for ckpt_id in ckpt_ids:
                logging.info("=====Loading Model with ckpt %s=====", ckpt_id)
                model_config: HParams = configs.get_config(
                    FLAGS.model_cfgset)().parse(FLAGS.model_cfgs)
                model = models.get_model(FLAGS.model)(FLAGS.dir,
                                                      FLAGS.id,
                                                      model_config,
                                                      training=False,
                                                      ckpt=ckpt_id)

                for cmodel_type in ["lr_fs"]:
                    cmodel_config = HParams().parse("")
                    cmodel = models.get_model(cmodel_type)(FLAGS.dir, FLAGS.id,
                                                           cmodel_config)
                    for split in ["sachinravi"]:
                        for setup in ["5way1shot"]:
                            usfs_dataset_config = configs.get_config(
                                "miniimagenet/{}_test/{}".format(
                                    split, setup))().parse("")
                            usfs_dataset = datasets.get_dataset(
                                "miniimagenet")(FLAGS.data_dir,
                                                usfs_dataset_config)

                            logging.info(
                                "===== Running Unsupervised Few-shot | linear_head: %s | split: %s | %s  ======",
                                cmodel_type, split, setup)
                            cmodel.episode(model, usfs_dataset, 500)
    else:
        logging.info("#######################################################")
        logging.info("########## Sketches using Omniglot dataset ############")
        logging.info("#######################################################")
        ds = "fs_omniglot_28"
        if FLAGS.sample:
            logging.info("============================")
            logging.info("=====Sampling decodings=====")
            logging.info("============================")

            logging.info("=====Omniglot dataset=====")
            sample_dataset_config: HParams = configs.get_config(
                "fs_omniglot/vinyals_test_fake")().parse("")
            gen_dataset_proto = datasets.get_dataset(ds)(FLAGS.data_dir,
                                                         sample_dataset_config)
            sample_dataset = gen_dataset_proto.load(repeat=False)
            model.test(sample_dataset, "full-eval-sample", 40)

            logging.info("=====Seen quickdraw examples=====")
            sample1_dataset_config: HParams = configs.get_config(
                "quickdraw")().parse("split=T1_msl64_28,shuffle=False")
            gen1_dataset_proto = datasets.get_dataset('quickdraw')(
                FLAGS.data_dir, sample1_dataset_config)
            sample1_dataset = gen1_dataset_proto.load(repeat=False)
            model.test(sample1_dataset, "full-eval-qd-T1", 40)

            logging.info("=====Unseen quickdraw examples=====")
            sample2_dataset_config: HParams = configs.get_config(
                "quickdraw")().parse("split=T2_msl64_28,shuffle=False")
            gen2_dataset_proto = datasets.get_dataset('quickdraw')(
                FLAGS.data_dir, sample2_dataset_config)
            sample2_dataset = gen2_dataset_proto.load(repeat=False)
            model.test(sample2_dataset, "full-eval-qd-T2", 40)

            logging.info("=====Latent Space Interpolation=====")
            if isinstance(model, DrawerModel):
                interpolate(model,
                            sample1_dataset,
                            "interpolations",
                            interps=20)

        if FLAGS.gen:
            logging.info("=======================================")
            logging.info("==========Generation Testing===========")
            logging.info("=======================================")

            logging.info("===== Classifier Dataset: T1 =====")
            classifier1_configs = configs.get_config("classifier/T1")().parse(
                "")
            class_model1: ClassifierModel = models.get_model("classifier")(
                FLAGS.dir,
                "05-24_classifiers/classifier_T1",
                classifier1_configs,
                training=False)

            gen1_dataset_config: HParams = configs.get_config(
                "quickdraw")().parse("split=T1_msl64_28")
            gen1_dataset_proto = datasets.get_dataset("quickdraw")(
                FLAGS.data_dir, gen1_dataset_config)
            gen1_dataset = gen1_dataset_proto.load(repeat=False)

            logging.info("ST1 Classifier Test")
            class_model1.classify_predictions(gen1_dataset, model, steps=20)

            logging.info("===== Classifier Dataset: T2 =====")
            classifier2_configs = configs.get_config("classifier/T2")().parse(
                "")
            class_model2: ClassifierModel = models.get_model("classifier")(
                FLAGS.dir,
                "05-24_classifiers/classifier_T2",
                classifier2_configs,
                training=False)

            gen2_dataset_config: HParams = configs.get_config(
                "quickdraw")().parse("split=T2_msl64_28")
            gen2_dataset_proto = datasets.get_dataset("quickdraw")(
                FLAGS.data_dir, gen2_dataset_config)
            gen2_dataset = gen2_dataset_proto.load(repeat=False)

            logging.info("ST2 Classifier Test")
            class_model2.classify_predictions(gen2_dataset, model, steps=20)

        if FLAGS.usfs:
            logging.info("==================================================")
            logging.info("==========Unsupervised few-shot Omniglot==========")
            logging.info("==================================================")

            for cmodel_type in ["lr_fs"]:
                cmodel_config = HParams().parse("")
                cmodel = models.get_model(cmodel_type)(FLAGS.dir, FLAGS.id,
                                                       cmodel_config)
                for setup in [
                        "20way1shot", "20way5shot", "5way1shot", "5way5shot"
                ]:
                    usfs_dataset_config = configs.get_config(
                        "fs_omniglot/vinyals_test/{}".format(setup))().parse(
                            "")
                    usfs_dataset = datasets.get_dataset("fs_omniglot_vinyals")(
                        FLAGS.data_dir, usfs_dataset_config)

                    logging.info(
                        "===== Running Unsupervised Few-shot | linear_head: %s | split: %s | %s  ======",
                        cmodel_type, "vinyals", setup)
                    cmodel.episode(model, usfs_dataset, 2000)

                    for split in ["lake"]:
                        logging.info("===Getting usfs test dataset: %s/%s===",
                                     split, setup)
                        usfs_dataset_config = configs.get_config(
                            "fs_omniglot/{}_test/{}".format(split,
                                                            setup))().parse("")
                        usfs_dataset = datasets.get_dataset(ds)(
                            FLAGS.data_dir, usfs_dataset_config)

                        logging.info(
                            "===== Running Unsupervised Few-shot | linear_head: %s | split: %s | %s  ======",
                            cmodel_type, split, setup)
                        cmodel.episode(model, usfs_dataset, 2000)

        if FLAGS.checkpoint:
            logging.info("================================================")
            logging.info("==========Performing checkpoint sweep.==========")
            logging.info("================================================")
            ckpts_dir = os.path.join(FLAGS.dir, FLAGS.id, "checkpoints")
            ckpts = os.listdir(
                os.path.join(ckpts_dir,
                             os.listdir(ckpts_dir)[0]))

            ckpts = list(filter(lambda x: x.endswith(".index"), ckpts))
            ckpt_ids = sorted(
                list(map(lambda x: int(x.split(".")[0].split("-")[-1]),
                         ckpts)))

            for ckpt_id in ckpt_ids:
                ckpt_id = str(ckpt_id)
                logging.info("=====Loading Model with ckpt %s=====", ckpt_id)
                ckpt_model_config: HParams = configs.get_config(
                    FLAGS.model_cfgset)().parse(FLAGS.model_cfgs)
                ckpt_model = models.get_model(FLAGS.model)(FLAGS.dir,
                                                           FLAGS.id,
                                                           ckpt_model_config,
                                                           training=False,
                                                           ckpt=ckpt_id)

                for cmodel_type in ["lr_fs"]:
                    cmodel_config = HParams().parse("")
                    cmodel = models.get_model(cmodel_type)(FLAGS.dir, FLAGS.id,
                                                           cmodel_config)
                    for setup in ["20way1shot"]:
                        usfs_dataset_config = configs.get_config(
                            "fs_omniglot/vinyals_test/{}".format(
                                setup))().parse("")
                        usfs_dataset = datasets.get_dataset(
                            "fs_omniglot_vinyals")(FLAGS.data_dir,
                                                   usfs_dataset_config)

                        logging.info(
                            "===== Running Unsupervised Few-shot | linear_head: %s | split: %s | %s  ======",
                            cmodel_type, "Vinyals", setup)
                        cmodel.episode(ckpt_model, usfs_dataset, 500)

                if FLAGS.gen:
                    logging.info("==========Generation Testing===========")
                    logging.info("ST1")
                    class_model1.classify_predictions(gen1_dataset,
                                                      ckpt_model,
                                                      steps=5)
                    logging.info("ST2")
                    class_model2.classify_predictions(gen2_dataset,
                                                      ckpt_model,
                                                      steps=5)
Beispiel #52
0
def triangle_test():
   print ">> running triangle test ",
   sys.stdout.flush()
   f = open("triangle.dat","w")
   p = 4
   print >>f,"# triangle test "
   #print >>f,"# sph harmonic order ",p
   n = 10 # 40
   alphas = []
   cs = []
   for i in range(n+1):
     a = 1.0*i/n
     c = np.array(util.interpolate(c1,c2,a))
     alphas.append(a)
     cs.append(c)
   g0,R0 = g.distance(C1,C2)
   print >>f,"# RMSD ref =",g0
   print >>f,"# OGTO ref =",
   o0s = []
   for sig in sigmas: 
     d0,R0 = o.distance(c1,c2,sig)
     #d0,R0 = o.dist(c1,c2,sig)
     o0s.append(d0)
     print >>f,d0,
   print >>f
#  print >>f,"# SOAP ref =",
#  d0s = []
#  for sig in sigmas: 
#    d0,R0 = d.distance(c1,c2,p,sig)
#    d0s.append(d0)
#    print >>f,d0,
#  print >>f
   #print >>f,"# alpha (d1,d2)_rmsd, _ogto @, _soap @ sig=",
   print >>f,"# alpha (d1,d2)_rmsd, _ogto @ sig=",
   for sig in sigmas: print >>f, sig,
   print >>f
   for i in range(len(alphas)):
     sys.stdout.write("*")
     sys.stdout.flush()
     a = alphas[i]
     d1,R1 = g.distance(C1,cs[i]) 
     d2,R2 = g.distance(C2,cs[i]) 
     print "RMSD\n",R1,"\n",R2
     print >>f,"{0:8f} {1:10f} {2:10f}".format(a,d1/g0,d2/g0),
     for j in range(len(sigmas)):
       sig = sigmas[j]
       d0 = o0s[j]
       d1,R1 = o.distance(c1,cs[i],sig) 
       d2,R2 = o.distance(c2,cs[i],sig)
       #d1,R1 = o.dist(c1,cs[i],sig) 
       #d2,R2 = o.dist(c2,cs[i],sig)
       print "OGTO\n",R1,"\n",R2
       print >>f,"{0:10f} {1:10f}".format(d1/d0,d2/d0),
#    for j in range(len(sigmas)):
#      sig = sigmas[j]
#      d0 = d0s[j]
#      d1,R1 = d.distance(c1,cs[i],p,sig) 
#      d2,R2 = d.distance(c2,cs[i],p,sig)
#      print >>f,"{0:10f} {1:10f}".format(d1/d0,d2/d0),
     print >>f
   print "done"
Beispiel #53
0
def power_LLT(U0):
    if ceil(U0) == floor(U0):
        return power_table_LLT(U0)
    else:
        return interpolate(floor(U0), power_table_LLT(floor(U0)), ceil(U0), power_table_LLT(ceil(U0)), U0)
Beispiel #54
0
def filemerge(repo, mynode, orig, fcd, fco, fca):
    """perform a 3-way merge in the working directory

    mynode = parent node before merge
    orig = original local filename before merge
    fco = other file context
    fca = ancestor file context
    fcd = local file context for current/destination file
    """

    def temp(prefix, ctx):
        pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
        (fd, name) = tempfile.mkstemp(prefix=pre)
        data = repo.wwritedata(ctx.path(), ctx.data())
        f = os.fdopen(fd, "wb")
        f.write(data)
        f.close()
        return name

    def isbin(ctx):
        try:
            return util.binary(ctx.data())
        except IOError:
            return False

    if not fco.cmp(fcd): # files identical?
        return None

    ui = repo.ui
    fd = fcd.path()
    binary = isbin(fcd) or isbin(fco) or isbin(fca)
    symlink = 'l' in fcd.flags() + fco.flags()
    tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
    ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
               (tool, fd, binary, symlink))

    if not tool or tool == 'internal:prompt':
        tool = "internal:local"
        if ui.promptchoice(_(" no tool found to merge %s\n"
                             "keep (l)ocal or take (o)ther?") % fd,
                           (_("&Local"), _("&Other")), 0):
            tool = "internal:other"
    if tool == "internal:local":
        return 0
    if tool == "internal:other":
        repo.wwrite(fd, fco.data(), fco.flags())
        return 0
    if tool == "internal:fail":
        return 1

    # do the actual merge
    a = repo.wjoin(fd)
    b = temp("base", fca)
    c = temp("other", fco)
    out = ""
    back = a + ".orig"
    util.copyfile(a, back)

    if orig != fco.path():
        ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
    else:
        ui.status(_("merging %s\n") % fd)

    ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))

    # do we attempt to simplemerge first?
    try:
        premerge = _toolbool(ui, tool, "premerge", not (binary or symlink))
    except error.ConfigError:
        premerge = _toolstr(ui, tool, "premerge").lower()
        valid = 'keep'.split()
        if premerge not in valid:
            _valid = ', '.join(["'" + v + "'" for v in valid])
            raise error.ConfigError(_("%s.premerge not valid "
                                      "('%s' is neither boolean nor %s)") %
                                    (tool, premerge, _valid))

    if premerge:
        r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
        if not r:
            ui.debug(" premerge successful\n")
            os.unlink(back)
            os.unlink(b)
            os.unlink(c)
            return 0
        if premerge != 'keep':
            util.copyfile(back, a) # restore from backup and try again

    env = dict(HG_FILE=fd,
               HG_MY_NODE=short(mynode),
               HG_OTHER_NODE=str(fco.changectx()),
               HG_BASE_NODE=str(fca.changectx()),
               HG_MY_ISLINK='l' in fcd.flags(),
               HG_OTHER_ISLINK='l' in fco.flags(),
               HG_BASE_ISLINK='l' in fca.flags())

    if tool == "internal:merge":
        r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
    elif tool == 'internal:dump':
        a = repo.wjoin(fd)
        util.copyfile(a, a + ".local")
        repo.wwrite(fd + ".other", fco.data(), fco.flags())
        repo.wwrite(fd + ".base", fca.data(), fca.flags())
        return 1 # unresolved
    else:
        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back # read input from backup, write to original
        replace = dict(local=a, base=b, other=c, output=out)
        args = util.interpolate(r'\$', replace, args,
                                lambda s: '"%s"' % util.localpath(s))
        r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
                        out=ui.fout)

    if not r and (_toolbool(ui, tool, "checkconflicts") or
                  'conflicts' in _toollist(ui, tool, "check")):
        if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
                     re.MULTILINE):
            r = 1

    checked = False
    if 'prompt' in _toollist(ui, tool, "check"):
        checked = True
        if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd,
                           (_("&Yes"), _("&No")), 1):
            r = 1

    if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
                                  'changed' in _toollist(ui, tool, "check")):
        if filecmp.cmp(repo.wjoin(fd), back):
            if ui.promptchoice(_(" output file %s appears unchanged\n"
                                 "was merge successful (yn)?") % fd,
                               (_("&Yes"), _("&No")), 1):
                r = 1

    if _toolbool(ui, tool, "fixeol"):
        _matcheol(repo.wjoin(fd), back)

    if r:
        ui.warn(_("merging %s failed!\n") % fd)
    else:
        os.unlink(back)

    os.unlink(b)
    os.unlink(c)
    return r