コード例 #1
0
def pm_expand(constr_list):
    """
    Description
    -----------
    Expands functions which are implemented
    using partial minimization descriptions.
    constr_list: cvxpy_list of constraints.

    Arguments
    ---------
    constr_list: cvxpy_list of constraints.
    """

    new_list = cvxpy_list([])
    for c in constr_list:
        if(c.left.type == TREE and
           c.left.item.type == FUNCTION and
           c.left.item.expansion_type == PM):
            new_constr = transform(expand(c.left.item._pm_expand(c)))
            new_constr = pm_expand(new_constr._get_convex())
            new_list += new_constr
        elif(c.left.type == EXPRESSION and
             c.right.type == SET and
             c.right.expansion_type == PM):
            new_constr = transform(expand(c.right._pm_expand(c)))
            new_constr = pm_expand(new_constr._get_convex())
            new_list += new_constr
        else:
            new_list += cvxpy_list([c])

    # Return new list
    return new_list
コード例 #2
0
def pm_expand(constr_list):
    """
    Description
    -----------
    Expands functions which are implemented
    using partial minimization descriptions.
    constr_list: cvxpy_list of constraints.

    Arguments
    ---------
    constr_list: cvxpy_list of constraints.
    """

    new_list = cvxpy_list([])
    for c in constr_list:
        if (c.left.type == TREE and c.left.item.type == FUNCTION
                and c.left.item.expansion_type == PM):
            new_constr = transform(expand(c.left.item._pm_expand(c)))
            new_constr = pm_expand(new_constr._get_convex())
            new_list += new_constr
        elif (c.left.type == EXPRESSION and c.right.type == SET
              and c.right.expansion_type == PM):
            new_constr = transform(expand(c.right._pm_expand(c)))
            new_constr = pm_expand(new_constr._get_convex())
            new_list += new_constr
        else:
            new_list += cvxpy_list([c])

    # Return new list
    return new_list
コード例 #3
0
def keygen(file):
    SDLLinesForKeygen = []

    if ((type(file) is not str) or (len(file) == 0)):
        sys.exit("First argument passed to keygen.py is invalid.")

    parseFile2(file, False)
    (varsToBlindList, rccaData) = (transform(False))
    rcca(rccaData)
    varNamesForListDecls = []

    updateCodeAndStructs()

    if (keygenBlindingExponent in assignInfo[keygenFuncName]):
        sys.exit(
            "keygen.py:  the variable used for keygenBlindingExponent in config.py already exists in the keygen function of the scheme being analyzed."
        )

    if ((keygenFuncName not in assignInfo)
            or (outputKeyword not in assignInfo[keygenFuncName])):
        sys.exit(
            "assignInfo structure obtained in keygen function of keygen.py did not have the right keygen function name or output keywords."
        )

    keygenOutput = assignInfo[keygenFuncName][outputKeyword].getVarDeps()
    if (len(keygenOutput) == 0):
        sys.exit(
            "Variable dependencies obtained for output of keygen in keygen.py was of length zero."
        )

    SDLLinesForKeygen.append(keygenBlindingExponent + " := random(ZR)\n")
    lineNoAfterThisAddition = writeLinesToFuncAfterVarLastAssign(
        keygenFuncName, SDLLinesForKeygen, None)

    for keygenOutput_ind in keygenOutput:
        secretKeyName = blindKeygenOutputElement(keygenOutput_ind,
                                                 varsToBlindList,
                                                 varNamesForListDecls)

    if (len(varsToBlindList) != 0):
        sys.exit(
            "keygen.py completed without blinding all of the variables passed to it by transform.py."
        )

    SDLLinesForKeygen = []
    SDLLinesForKeygen.append("output := list{" + keygenBlindingExponent +
                             ", " + secretKeyName + blindingSuffix + "}\n")

    lineNoKeygenOutput = getLineNoOfOutputStatement(keygenFuncName)
    removeFromLinesOfCode([lineNoKeygenOutput])
    appendToLinesOfCode(SDLLinesForKeygen, lineNoKeygenOutput)
    updateCodeAndStructs()

    for index_listVars in range(0, len(varNamesForListDecls)):
        varNamesForListDecls[index_listVars] = varNamesForListDecls[
            index_listVars] + blindingSuffix + " := list\n"

    lineNoEndTypesSection = getEndLineNoOfFunc(TYPES_HEADER)
    appendToLinesOfCode(varNamesForListDecls, lineNoEndTypesSection)
    updateCodeAndStructs()
コード例 #4
0
ファイル: align.py プロジェクト: maduser64/WHORL
def align_fingerprints(template_fp, query_fp,k4=6):
	"""
	COMMENTS HERE
	"""
	# ===== 1. calculate focal vectors of both images =====

	template_vec = get_focal_vector(template_fp,k4)
	query_vec = get_focal_vector(query_fp,k4)

	# ===== 2. get rigid transformation mapping q.v. to t.v. =====

	if (template_fp == query_fp):
		transformation = {
			"shift" : (0,0),
			"rotation" : 0
		}
	else:
		shift, rot = compute_transform(template_vec,query_vec)
		transformation = {
			"shift" : shift,
			"rotation" : rot
		}

	# ===== 3. return shifted and rotated query template =====

	return transform(query_fp,transformation)
コード例 #5
0
def dofile(path):
    import os
    cps = transform(path, trampoline)
    base, ext = os.path.splitext(path)
    fout = open(base + '.cps.py', 'wb')
    fout.write(b'\nfrom scheduler import schedule, run\n\n')
    w = writer(fout)
    cps.emit_all(w)
    fout.write(b'\nrun()\n')
    fout.close()
コード例 #6
0
ファイル: trampoline.py プロジェクト: samrushing/cps-python
def dofile(path):
    import os

    cps = transform(path, trampoline)
    base, ext = os.path.splitext(path)
    fout = open(base + ".cps.py", "wb")
    fout.write(b"\nfrom scheduler import schedule, run\n\n")
    w = writer(fout)
    cps.emit_all(w)
    fout.write(b"\nrun()\n")
    fout.close()
コード例 #7
0
ファイル: test_spin.py プロジェクト: zoginni/helit
def rotate_noop(image, steps=32, transform=transform):
    rot = translate([-0.5 * shape[1], -0.5 * shape[0]])
    rot = rotate(numpy.pi * 2.0 / steps).dot(rot)
    rot = translate([0.5 * shape[1], 0.5 * shape[0]]).dot(rot)

    for _ in xrange(steps):
        image = transform(rot, image)

    fillmasked(image)

    return image
コード例 #8
0
def TTA(images):
    images_TTA_list = []

    for transform in TTA_list:
        cur_images = []
        for image in images:
            cur_images.append(transform(image))

        images_TTA_list.append(torch.stack(cur_images))

    return images_TTA_list
コード例 #9
0
ファイル: test_spin.py プロジェクト: PeterZhouSZ/helit
def rotate_noop(image, steps = 32, transform = transform):
  rot = translate([-0.5*shape[1], -0.5*shape[0]])
  rot = rotate(numpy.pi*2.0 / steps).dot(rot)
  rot = translate([0.5*shape[1], 0.5*shape[0]]).dot(rot)
  
  for _ in xrange(steps):
    image = transform(rot, image)
  
  fillmasked(image)
  
  return image
コード例 #10
0
ファイル: dataset.py プロジェクト: JaeHeee/Paper_Review
 def __init__(self, train):
     self.transform = transform()
     if train:
         self.imgs = torchvision.datasets.CIFAR100(root='../data',
                                                   train=True,
                                                   transform=self.transform,
                                                   download=True)
     else:
         self.imgs = torchvision.datasets.CIFAR100(root='../data',
                                                   train=False,
                                                   transform=self.transform)
コード例 #11
0
ファイル: control.py プロジェクト: juuhnpark/MOTEDecoder
  def update(self):
    if self.decoding:
      print(self.runNum)
      self.runNum += 1
      self.decodedPlot.clear()
      self.decodedData.clear()
      
      dataPath = saveData(self.buffer)

      if self.transform:
        transform(dataPath)
      # dataPath = '20190723_MOTE_V4_C1_Released_50mVpp_500Hz_9A_A=0p5_3.mat'

      timestamp, fin_delta_t, timeData, threshold_amp, tr_window, ts_peak_ind, chosen_ts_list, dist_figure, prevThreshold, time_in_range = decode(dataPath, self.threshold)

      if not self.manualThreshold:
        self.threshold = prevThreshold
        self.thresholdSpinBox.setValue(self.threshold)

      self.decodedPlot.plot(x_data=timestamp, y_data=fin_delta_t, color='r')
      self.decodedPlot.plot(x_data=timestamp, y_data=fin_delta_t, color='.')

      self.decodedData.plot(x_data=timeData, y_data=threshold_amp, color='y')
      self.decodedData.plot(x_data=timeData, y_data=tr_window, color='b')
      self.decodedData.plot(x_data=timeData[ts_peak_ind], y_data=threshold_amp[ts_peak_ind], color='p')
      self.decodedData.plot(x_data=timeData[chosen_ts_list], y_data=threshold_amp[chosen_ts_list], color='g')
      self.decodedData.plot(x_data=timeData, y_data=dist_figure, color='r')
      self.decodedData.plot(x_data=timeData, y_data=time_in_range, color='c')

      self.decodedPlot.setAxesTitle('Decoded Plot', 'Time (ms)', 'Δt (μs)')
      self.decodedData.setAxesTitle('Decoded Data', 'Time (ms)', 'Voltage (mV)')

      if self.saving:
        outputName, finalPath = createFolder()
        self.decodedPlot.saveFig(finalPath + '/Decoded Plot.png')
        self.decodedData.saveFig(finalPath + '/Decoded Data.png')
        copy(dataPath, finalPath + '/' + outputName + '.mat')
      
      self.buffer = (self.buffer + 1)%self.maxBuffer
      runPico()
コード例 #12
0
 def get_image(self, name, transform, mode='train'):
     image = cv2.imread('./input/{}/{}'.format(mode, name))
     try:
         mask = do_length_decode(
             self.rle_masks[name.split('.')[0]]['rle_mask'])
         mask = cv2.resize(mask, image.shape[:2][::-1])
     except:
         mask = cv2.imread('./input/masks/' + name, cv2.IMREAD_GRAYSCALE)
     if mask is None:
         mask = np.zeros_like(image[:, :, 0])
     x0, y0, x1, y1 = self.bbox_dict[name]
     image = image[int(y0):int(y1), int(x0):int(x1)]
     mask = mask[int(y0):int(y1), int(x0):int(x1)]
     image = transform(image, mask)
     return image
コード例 #13
0
def image_pipeline(img, mtx, dist):
    #Undistort the image
    undist = undistort(img, mtx, dist)
    undist = cv2.GaussianBlur(undist, (5, 5), 0)
    undist = normalize(undist)
    #Warp perspective
    warped, dst, src, Minv = transform(undist)
    #Threshold to generate binary image where lane lines are clear
    threshold_img = thresh_pipeline(warped)[0]
    #Use morphology to clean up the binary image
    kernel = np.ones((5, 5), np.uint8)
    result = cv2.morphologyEx(threshold_img, cv2.MORPH_OPEN, kernel)
    kernel = np.ones((10, 10), np.uint8)
    result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel)
    return result, threshold_img, warped, undist, Minv
コード例 #14
0
ファイル: shapes2.py プロジェクト: shrutig/sage
    def jmol_repr(self, render_params):
        r"""
        Returns representation of the object suitable for plotting
        using Jmol.

        TESTS::

            sage: P = point3d((1,2,3),size=3,color='purple')
            sage: P.jmol_repr(P.default_render_params())
            ['draw point_1 DIAMETER 3 {1.0 2.0 3.0}\ncolor $point_1  [128,0,128]']
        """
        name = render_params.unique_name('point')
        transform = render_params.transform
        cen = self.loc if transform is None else transform(self.loc)
        return ["draw %s DIAMETER %s {%s %s %s}\n%s" % (name, int(self.size), cen[0], cen[1], cen[2], self.texture.jmol_str('$' + name))]
コード例 #15
0
ファイル: keygen.py プロジェクト: JHUISI/auto-tools
def keygen(file):
    SDLLinesForKeygen = []

    if ( (type(file) is not str) or (len(file) == 0) ):
        sys.exit("First argument passed to keygen.py is invalid.")

    parseFile2(file, False)
    (varsToBlindList, rccaData) = (transform(False))
    rcca(rccaData)
    varNamesForListDecls = []

    updateCodeAndStructs()

    if (keygenBlindingExponent in assignInfo[keygenFuncName]):
        sys.exit("keygen.py:  the variable used for keygenBlindingExponent in config.py already exists in the keygen function of the scheme being analyzed.")

    if ( (keygenFuncName not in assignInfo) or (outputKeyword not in assignInfo[keygenFuncName]) ):
        sys.exit("assignInfo structure obtained in keygen function of keygen.py did not have the right keygen function name or output keywords.")

    keygenOutput = assignInfo[keygenFuncName][outputKeyword].getVarDeps()
    if (len(keygenOutput) == 0):
        sys.exit("Variable dependencies obtained for output of keygen in keygen.py was of length zero.")

    SDLLinesForKeygen.append(keygenBlindingExponent + " := random(ZR)\n")
    lineNoAfterThisAddition = writeLinesToFuncAfterVarLastAssign(keygenFuncName, SDLLinesForKeygen, None)

    for keygenOutput_ind in keygenOutput:
        secretKeyName = blindKeygenOutputElement(keygenOutput_ind, varsToBlindList, varNamesForListDecls)

    if (len(varsToBlindList) != 0):
        sys.exit("keygen.py completed without blinding all of the variables passed to it by transform.py.")

    SDLLinesForKeygen = []
    SDLLinesForKeygen.append("output := list{" + keygenBlindingExponent + ", " + secretKeyName + blindingSuffix + "}\n")

    lineNoKeygenOutput = getLineNoOfOutputStatement(keygenFuncName)
    removeFromLinesOfCode([lineNoKeygenOutput])
    appendToLinesOfCode(SDLLinesForKeygen, lineNoKeygenOutput)
    updateCodeAndStructs()

    for index_listVars in range(0, len(varNamesForListDecls)):
        varNamesForListDecls[index_listVars] = varNamesForListDecls[index_listVars] + blindingSuffix + " := list\n"

    lineNoEndTypesSection = getEndLineNoOfFunc(TYPES_HEADER)
    appendToLinesOfCode(varNamesForListDecls, lineNoEndTypesSection)
    updateCodeAndStructs()
コード例 #16
0
ファイル: pystats.py プロジェクト: estebanhurtado/cutedots
def fitPca(data, project=False, transform=None):
    "Variables in different rows"

    if not transform is None:
        data = transform(data)

    cdata = data - data.mean(axis=0)
    R = np.cov(cdata, rowvar=0)

    eval, evec = la.eigh(R)
    idx = np.argsort(eval)[::-1]
    evec = evec[:,idx]
    eval = eval[idx]
    if project:
        projected = np.dot(evec.T, cdata.T).T
        return evec, eval, projected
    else:
        return evec, eval
コード例 #17
0
ファイル: pystats.py プロジェクト: N7Daniel/WIP-Cutedots
def fitPca(data, project=False, transform=None):
    "Variables in different rows"

    if not transform is None:
        data = transform(data)

    cdata = data - data.mean(axis=0)
    R = np.cov(cdata, rowvar=0)

    eval, evec = la.eigh(R)
    idx = np.argsort(eval)[::-1]
    evec = evec[:, idx]
    eval = eval[idx]
    if project:
        projected = np.dot(evec.T, cdata.T).T
        return evec, eval, projected
    else:
        return evec, eval
コード例 #18
0
def read_image(dir):
    transform = transforms.Compose(
        [Rescale(224), RandomCrop(223),
         Normalize(), ToTensor()])

    image = cv2.imread(dir)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    sample = {'image': image, 'keypoints': None}
    transformed_image = transform(sample)

    images = transformed_image['image']
    print(images.shape)
    images = images.type(torch.FloatTensor)
    images = images.unsqueeze(0)
    out = model(images)
    out = out.view(-1, 2)
    out = out.data * 50.0 + 100
    images = np.transpose(images, (0, 2, 3, 1)).numpy().squeeze(3)
    display(images.squeeze(0), out)
コード例 #19
0
def action():
    choice = input('login? : ')
    if (choice == 'y'):
        login()
    game = input('enter game format : ')
    create_game(game)
    time.sleep(5)

    while (1):
        # raw_move = input('next move : ')
        #speak('what is your next move?')
        print('next move : ')
        raw_move = my_word()
        if raw_move == 'stop':
            exit()
        move = transform(raw_move)
        type = validate(move)
        if type != 'invalid':
            print('move is : ', move)
            make_move(move)
        else:
            print('invalid move : ', move)
コード例 #20
0
ファイル: main.py プロジェクト: liangwj45/Computer-Vision
from segmentation import *
from transform import *
from utils import *

if __name__ == "__main__":
    # rename('../DataSet')
    imgs = transform()
    identify(imgs)
コード例 #21
0
ファイル: chimera-ss.py プロジェクト: ShariqM/soundcoding
Fs, x = gen_signal(opt)
Fs, y = gen_signal(opt)
wavfile.write("test/ss/x.wav", Fs, x.astype(np.int16))
wavfile.write("test/ss/y.wav", Fs, y.astype(np.int16))

for (sig,name) in ((x, "X"), (y, "Y")):
    plot("Signal (%s)" % name, sig)

opt.time_length = len(x)


for f in (1, 6, 18):
    opt.nfilters_per_octave = f
    print 'Using nfilters_per_octave=%d' % f

    wc_x = transform(Fs, x, opt)
    wc_y = transform(Fs, y, opt)

    wc_xy = np.absolute(wc_x) * np.exp(1j * np.angle(wc_y))
    wc_yx = np.absolute(wc_y) * np.exp(1j * np.angle(wc_x))

    names = ['MAG_X_PHASE_Y', 'MAG_Y_PHASE_X']
    t = 0
    for wc_curr in (wc_xy, wc_yx):
        print 'Analyzing %s' % names[t]
        recon = itransform(wc_curr, Fs, opt)
        plot("Reconstruction %d" % t, recon)
        args = (opt.nfilters_per_octave, names[t])
        wavfile.write("test/ss/recon_f=%d_%s.wav" % args, Fs, recon.astype(np.int16))
        t = t + 1
コード例 #22
0
t.connect("osc", "/set", ["selected_loop_num", 5], "127.0.0.1", 9951,
          t.get("pgmchange",
                6))  # after pressing first switch on fcb1010 select sixth loop

t.connect("mod", "cha1", t.get("j0_button", 3))  # setting up modifiers
t.connect("mod", "cha2", t.get("j0_button", 0))  #
t.connect("mod", "cha3", t.get("j0_button", 2))  #
t.connect("mod", "cha4", t.get("j0_button", 1))  #

t.withModifiers(
    on=["cha1"]
)  # while pressed first button move right axis for wet volume of the first loop
t.connect(
    "osc", "/sl/0/set", ["wet"], "127.0.0.1", 9951, True,
    primitive(
        repeat(t.get("tick"), transform(t.get("j0_axis", 1), ampMoveDiscon)),
        0, 0, 1))
t.connect("osc", "/sl/0/hit", ["record"], "127.0.0.1", 9951,
          t.get("j0_button", 4))
t.connect("osc", "/sl/0/hit", ["overdub"], "127.0.0.1", 9951,
          t.get("j0_button", 5))
t.connect("osc", "/sl/0/hit", ["substitute"], "127.0.0.1", 9951,
          t.get("j0_button", 6))
t.connect("osc", "/sl/0/hit", ["multiply"], "127.0.0.1", 9951,
          t.get("j0_button", 7))
t.connect("osc", "/sl/0/hit", ["record"], "127.0.0.1", 9951, False, True,
          t.get("pgmchange", 3))
t.clearModifiers()

t.withModifiers(
    on=["cha2"]
コード例 #23
0
      print prefix, input.value
    return OutputSignal(prnt, input)

t.add(Printer())
t.add(Ticker(20))
t.add(Joystick("/dev/input/js0"), "j0_")
t.add(MidiInput())
t.add(MidiOutput())

t.connect("note", 48, t.get("j0_button",0))
t.connect("print", "note 48", t.get("note", 48))
t.connect("print", "escape:", t.get("key", 9))
t.connect("click", 1, t.get("j0_button", 4))
t.connect("click", 2, t.get("j0_button", 5))
t.connect("click", 3, t.get("j0_button", 6))
t.connect("mousemove", pair(transform(t.get("j0_axis", 2), ampMove),
                            transform(t.get("j0_axis", 3), ampMove)))
t.connect("key", 98, t.get("j0_axis", 4))
t.connect("print", primitive(repeat(t.get("tick"), transform(t.get("j0_axis", 1), ampMoveDiscon)), 0, 0, 127))
t.connect("note", 50, primitive(repeat(t.get("tick"), transform(t.get("j0_axis", 1), ampMoveDiscon)), 0, 0, 127))

"""
t.connect("print", "escape:", t.get("key", 9))
t.connect("mod", "special", sticky(t.get("key", 66)))
t.withModifiers(on=["special"])
t.connect("print", "special-a:", t.get("key", 38))
t.clearModifiers()
t.withModifiers(off=["special"])
t.connect("print", "non-special-a:", t.get("key", 38))
t.clearModifiers()
コード例 #24
0
ファイル: example.py プロジェクト: kikyou123/STN
    t_img2 = np.array(img2 * 255.0, dtype=np.uint8)
    cv2.imshow('img1', t_img1)
    cv2.waitKey()
    cv2.imshow('img2', t_img2)
    cv2.waitKey()

    img1 = np.reshape(img1, (1, 400, 400, 3))
    img2 = np.reshape(img2, (1, 400, 400, 3))
    img = np.concatenate((img1, img2), axis=0)
    return img


x = tf.placeholder(tf.float32, [None, 400, 400, 3])
n_fc = 6

initial = np.array([[0.5, 0, 0], [0, 0.5, 0]]).astype('float32')
initial = initial.flatten()

w_fc1 = tf.get_variable('W_fc1', [400 * 400 * 3, n_fc],
                        initializer=tf.constant_initializer(0.0))
b = tf.get_variable('b', initializer=initial)
x_ = tf.reshape(x, [-1, 400 * 400 * 3])
theta = tf.matmul(x_, w_fc1) + b
x_tran = transform(theta, x, [2, 200, 200, 3])

sess = tf.Session()
sess.run(tf.global_variables_initializer())
img = get_batch()
output = sess.run(x_tran, feed_dict={x: img})
imshow(output)
コード例 #25
0
def job():
    extract()
    transform()
    load()
コード例 #26
0
ファイル: main.py プロジェクト: mes5991/Udacity-SDC-P4
        r'C:\Users\mes59\Documents\Udacity\SDC\Term 1\Project 4\CarND-Advanced-Lane-Lines\test_images\\',
        mtx, dist)
    print("Transforms tested!")

if 'test_thresh' in options:
    '''Displays binary thresholding steps used in image processing'''

    test_path = r"C:\Users\mes59\Documents\Udacity\SDC\Term 1\Project 4\CarND-Advanced-Lane-Lines\test_images"
    test_imgs = os.listdir(test_path)
    for i in test_imgs:
        img_path = test_path + '/' + i
        img = mpimg.imread(img_path)
        undist = undistort(img, mtx, dist)
        undist = cv2.GaussianBlur(undist, (5, 5), 0)
        undist = normalize(undist)
        warped, dst, src, Minv = transform(undist)

        combined_binary, gradx, sbinary, gray_binary, morphed, masked = thresh_pipeline(
            warped)

        plt.subplot(2, 3, 1)
        plt.imshow(warped)
        plt.title("warped")
        plt.subplot(2, 3, 2)
        plt.imshow(gradx, cmap='gray')
        plt.title("gradx")
        plt.subplot(2, 3, 3)
        plt.imshow(sbinary, cmap='gray')
        plt.title("sbinary")
        plt.subplot(2, 3, 4)
        plt.imshow(gray_binary, cmap='gray')
コード例 #27
0
ds((((-bb_ - m) - 4 * m * aa_).expand() / d**4).expand())
ds((((-bb_ - m) - 4 * m * aa_).expand() / d**4).expand() + 7 * aa__ / 2)
ds(((((-bb_ - m) - 4 * m * aa_).expand() / d**4).expand() +
    7 * aa__ / 2).factor())

# In[35]:

(-((-bb_ - m - 4 * aa_ * m).expand() * -64 / (d**4) + 14 *
   ((4 * aa_ / d**2).expand() * 4).expand()).expand()).factor()

# In[28]:

((4 * aa_ / d**2).expand() * 4).expand()

# ## Curve for $\kappa$

# In[3]:

C, xtrans, ytrans = transform(4, 0, 8, 0, -2, 0)
C, xtrans, ytrans

# In[15]:


def wat(C):
    for x in C:
        yield int(x), 1


eclib.mwrank(wat(C))
コード例 #28
0
    def load(self,
             file,
             move,
             rotation,
             axis,
             rescale,
             camara,
             where_to_look,
             texture=None):

        global the_vp
        model = Obj(file)
        light = V3(0, 0, 1)
        for face in model.vfaces:
            vcount = len(face)
            if vcount == 3:
                f1 = face[0][0] - 1
                f2 = face[1][0] - 1
                f3 = face[2][0] - 1
                a = model.vertices[f1]
                b = model.vertices[f2]
                c = model.vertices[f3]
                A = transform(a, the_vp, move, rotation, axis, rescale, camara,
                              where_to_look)
                B = transform(b, the_vp, move, rotation, axis, rescale, camara,
                              where_to_look)
                C = transform(c, the_vp, move, rotation, axis, rescale, camara,
                              where_to_look)
                # print('new_abc',A,B,C)
                normal = norm(cross(sub(B, A), sub(C, A)))
                intensity = dot(light, normal)
                if not texture:
                    grey = int(255 * intensity)
                    if grey < 0:
                        continue
                #     should call triangle here
                    self.triangle(A, B, C, color(grey, grey, grey))

                else:
                    t1 = face[0][1] - 1
                    t2 = face[1][1] - 1
                    t3 = face[2][1] - 1
                    if len(model.tvertices[t1]) == 3:
                        tA = V3(*model.tvertices[t1])
                        tB = V3(*model.tvertices[t2])
                        tC = V3(*model.tvertices[t3])
                    else:
                        tA = V2(*model.tvertices[t1])
                        tB = V2(*model.tvertices[t2])
                        tC = V2(*model.tvertices[t3])
                    n1 = face[0][2] - 1
                    n2 = face[1][2] - 1
                    n3 = face[2][2] - 1
                    nA = V3(*model.vnomals[n1])
                    nB = V3(*model.vnomals[n2])
                    nC = V3(*model.vnomals[n3])
                    self.triangle(A,
                                  B,
                                  C,
                                  texture=texture,
                                  texture_coords=(tA, tB, tC),
                                  normal_cord=(nA, nB, nC),
                                  intensity=intensity)
コード例 #29
0
ファイル: build.py プロジェクト: fivejjs/mypy
        for f in files:
            if not is_stub(f.path):
                out_path = os.path.join(self.output_dir, basename(f.path))
                # TODO log translation of f.path to out_path
                # TODO report compile error if failed
                ver = 3
                if PYTHON2 in self.flags:
                    ver = 2
                v = pythongen.PythonGenerator(ver)
                f.accept(v)
                self.log('translate %s to %s' % (f.path, out_path))
                outfile = open(out_path, 'w')
                outfile.write(v.output())
                outfile.close()

    void transform(self, MypyFile[] files):
        for f in files:
            # Transform parse tree and produce pretty-printed output.
            v = transform.DyncheckTransformVisitor(
                self.type_checker.type_map,
                self.semantic_analyzer.modules,
                is_pretty=True)
            f.accept(v)

    void generate_icode(self, MypyFile[] files):
        builder = icode.IcodeBuilder()
        for f in files:
            # TODO remove ugly builtins hack
            if not f.path.endswith('/builtins.py'):
                f.accept(builder)
        self.icode = builder.generated
コード例 #30
0
ファイル: test.py プロジェクト: sunstarchan/DPANet
            line = line.rstrip()
            keys.append(line)

    for i in range(1, 10 + 1):
        img = cv2.imread("../data/RGBD_sal/train/NJUD/rgb/%s.jpg" %
                         keys[i]).astype(np.float32)

        mask = cv2.imread("../data/RGBD_sal/train/NJUD/gt/%s.png" %
                          keys[i]).astype(np.float32)
        depth = cv2.imread("../data/RGBD_sal/train/NJUD/depth/%s.jpg" %
                           keys[i]).astype(np.float32)

        img, depth, mask = func1(img, depth, mask)
        out, depth_1, mask_1 = func(img, depth, mask)

        imgs, depths, masks = transform(img, depth, mask)

        import pdb
        pdb.set_trace()

        plt.subplot(231)
        plt.title("image")
        plt.imshow(img[:, :, ::-1])
        plt.subplot(232)
        plt.title("depth")
        plt.imshow(depth, cmap='gray')
        plt.subplot(233)
        plt.title("gt")
        plt.imshow(mask, cmap='gray')

        plt.subplot(234)
コード例 #31
0
"Speech-Approximate Chimera"
from gen_signal import *
from transform import *
from common import *
from math import pi
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt

opt = options()

Fs, x = gen_signal(opt)
opt.time_length = len(x)
wc = transform(Fs, x, opt)
wavfile.write("test/sa/orig.wav", Fs, x.astype(np.int16))

plot("Signal", x)

mag, angle = mag_angle(wc)
angle_diff, angle_total = angle_data(angle)

imshow("Wavegram", mag[:, :8000], hsv=False)

#plot("Total Angle", angle_total[24,:])

for k in range(wc.shape[0]):
    avg_diff = np.mean(angle_diff[k, :])
    for t in range(1, wc.shape[1]):
        new_angle = angle[k, t - 1] + avg_diff
        new_angle = new_angle - 2 * pi if new_angle >= pi else new_angle
        angle[k, t] = new_angle
コード例 #32
0
ファイル: chimera-sn.py プロジェクト: ShariqM/soundcoding
from common import *

opt = options()
direc = 'test/sn'

Fs, x = gen_signal(opt)
wavfile.write("%s/x.wav" % direc, Fs, x.astype(np.int16))

plot("Signal", x)

opt.time_length = len(x)
for f in (1,6,18):
    opt.nfilters_per_octave = f
    print 'Using nfilters_per_octave=%d' % f

    wc = transform(Fs, x, opt)

    wc_pc = np.copy(wc) # phase corrupt
    wc_pc = np.sign(np.real(wc)) * np.absolute(wc)
    wc_pc = np.absolute(wc)

    wc_ac = np.copy(wc) # amplitude corrupt
    wc_ac = np.mean(np.absolute(wc)) * np.exp(1j * np.angle(wc))

    names = ['nothing', 'phase', 'amplitude']
    t = 0
    for wc_curr in (wc, wc_pc, wc_ac):
        print 'Analyzing %s' % names[t]
        x_recon = itransform(wc_curr, Fs, opt)
        plot("Reconstruciton %d" % t, x_recon)
        args = (direc, opt.nfilters_per_octave, t, names[t])
コード例 #33
0
ファイル: test_spin.py プロジェクト: zoginni/helit
    rot = translate([-0.5 * shape[1], -0.5 * shape[0]])
    rot = rotate(numpy.pi * 2.0 / steps).dot(rot)
    rot = translate([0.5 * shape[1], 0.5 * shape[0]]).dot(rot)

    for _ in xrange(steps):
        image = transform(rot, image)

    fillmasked(image)

    return image


# Create dictionary of algorithms to try...
algs = OrderedDict()

algs['B-Spline 0 (nearest neighbour)'] = lambda hg, image: transform(
    hg, image, -1, -1, 0)
algs['B-Spline 1 (linear)'] = lambda hg, image: transform(hg, image, -1, -1, 1)
algs['B-Spline 2 (quadratic)'] = lambda hg, image: transform(
    hg, image, -1, -1, 2)
algs['B-Spline 3 (cubic)'] = lambda hg, image: transform(hg, image, -1, -1, 3)
algs['B-Spline 4'] = lambda hg, image: transform(hg, image, -1, -1, 4)
algs['B-Spline 5'] = lambda hg, image: transform(hg, image, -1, -1, 5)

for name, alg in algs.iteritems():
    print(name)

    rot_image = rotate_noop(image, transform=alg)
    rot_image = numpy.concatenate(
        (rot_image['r'][:, :, numpy.newaxis],
         rot_image['g'][:, :, numpy.newaxis], rot_image['b'][:, :,
                                                             numpy.newaxis]),
コード例 #34
0
ファイル: test_rotate.py プロジェクト: PeterZhouSZ/helit
image = imread(fn).astype(numpy.float32)
shape = image.shape
image = {'r' : image[:,:,0], 'g' : image[:,:,1], 'b' : image[:,:,2]}

angle = float(sys.argv[2]) * numpy.pi / 180.0



# Calculate homography...
hg = translate([-0.5*shape[1], -0.5*shape[0]])
hg = rotate(angle).dot(hg)
hg = translate([0.5*shape[1], 0.5*shape[0]]).dot(hg)

hg, out_shape = fit(hg, shape)



# Apply...
image = transform(numpy.linalg.inv(hg), image, out_shape[0], out_shape[1])



# Save resulting file...
image = numpy.concatenate((image['r'][:,:,numpy.newaxis], image['g'][:,:,numpy.newaxis], image['b'][:,:,numpy.newaxis]), axis=2)
image = (image+0.5).astype(numpy.uint8)

image[0,0,:] = (255, 0, 0)

out_fn = os.path.splitext(fn)[0] + '_' + sys.argv[2] + '.png'
imsave(out_fn, image)
コード例 #35
0
Fs, x = gen_signal(opt)
Fs, y = gen_signal(opt)
wavfile.write("test/ss/x.wav", Fs, x.astype(np.int16))
wavfile.write("test/ss/y.wav", Fs, y.astype(np.int16))

for (sig, name) in ((x, "X"), (y, "Y")):
    plot("Signal (%s)" % name, sig)

opt.time_length = len(x)

for f in (1, 6, 18):
    opt.nfilters_per_octave = f
    print 'Using nfilters_per_octave=%d' % f

    wc_x = transform(Fs, x, opt)
    wc_y = transform(Fs, y, opt)

    wc_xy = np.absolute(wc_x) * np.exp(1j * np.angle(wc_y))
    wc_yx = np.absolute(wc_y) * np.exp(1j * np.angle(wc_x))

    names = ['MAG_X_PHASE_Y', 'MAG_Y_PHASE_X']
    t = 0
    for wc_curr in (wc_xy, wc_yx):
        print 'Analyzing %s' % names[t]
        recon = itransform(wc_curr, Fs, opt)
        plot("Reconstruction %d" % t, recon)
        args = (opt.nfilters_per_octave, names[t])
        wavfile.write("test/ss/recon_f=%d_%s.wav" % args, Fs,
                      recon.astype(np.int16))
        t = t + 1
コード例 #36
0
ファイル: test_rotate.py プロジェクト: zoginni/helit
fn = sys.argv[1]

image = imread(fn).astype(numpy.float32)
shape = image.shape
image = {'r': image[:, :, 0], 'g': image[:, :, 1], 'b': image[:, :, 2]}

angle = float(sys.argv[2]) * numpy.pi / 180.0

# Calculate homography...
hg = translate([-0.5 * shape[1], -0.5 * shape[0]])
hg = rotate(angle).dot(hg)
hg = translate([0.5 * shape[1], 0.5 * shape[0]]).dot(hg)

hg, out_shape = fit(hg, shape)

# Apply...
image = transform(numpy.linalg.inv(hg), image, out_shape[0], out_shape[1])

# Save resulting file...
image = numpy.concatenate(
    (image['r'][:, :, numpy.newaxis], image['g'][:, :, numpy.newaxis],
     image['b'][:, :, numpy.newaxis]),
    axis=2)
image = (image + 0.5).astype(numpy.uint8)

image[0, 0, :] = (255, 0, 0)

out_fn = os.path.splitext(fn)[0] + '_' + sys.argv[2] + '.png'
imsave(out_fn, image)
コード例 #37
0
from datetime import date
from typing import cast, Union

from dateutil.parser import isoparse
{% from "property_templates/date_property.py.jinja" import transform, construct %}
some_source = date(2020, 10, 12)
{{ transform(property, "some_source", "some_destination") }}
{{ construct(property, "some_destination") }}
コード例 #38
0
ファイル: test_spin.py プロジェクト: PeterZhouSZ/helit
  rot = rotate(numpy.pi*2.0 / steps).dot(rot)
  rot = translate([0.5*shape[1], 0.5*shape[0]]).dot(rot)
  
  for _ in xrange(steps):
    image = transform(rot, image)
  
  fillmasked(image)
  
  return image



# Create dictionary of algorithms to try...
algs = OrderedDict()

algs['B-Spline 0 (nearest neighbour)'] = lambda hg, image: transform(hg, image, -1, -1, 0)
algs['B-Spline 1 (linear)'] = lambda hg, image: transform(hg, image, -1, -1, 1)
algs['B-Spline 2 (quadratic)'] = lambda hg, image: transform(hg, image, -1, -1, 2)
algs['B-Spline 3 (cubic)'] = lambda hg, image: transform(hg, image, -1, -1, 3)
algs['B-Spline 4'] = lambda hg, image: transform(hg, image, -1, -1, 4)
algs['B-Spline 5'] = lambda hg, image: transform(hg, image, -1, -1, 5)




for name, alg in algs.iteritems():
  print(name)
  
  rot_image = rotate_noop(image, transform = alg)
  rot_image = numpy.concatenate((rot_image['r'][:,:,numpy.newaxis], rot_image['g'][:,:,numpy.newaxis], rot_image['b'][:,:,numpy.newaxis]), axis=2)