Ejemplo n.º 1
0
    def test_forward_merge_one_to_active(self, relPath, baseFmt, block):
        """
        Forward Merge One to Active Layer

        Create image chain: BASE---S1---S2
        Start VM
        Merge S1 >> S2
        Final image chain:  BASE---S2
        """
        base_file = utils.create_image('BASE', fmt=baseFmt, block=block)
        utils.write_image(base_file, 0, 3072, 1)
        s1_file = utils.create_image('S1', 'BASE', relative=relPath,
                                     backingFmt=baseFmt, block=block)
        utils.write_image(s1_file, 1024, 2048, 2)
        s2_file = utils.create_image('S2', 'S1', relative=relPath,
                                     block=block)
        utils.write_image(s2_file, 2048, 1024, 3)

        dom = utils.create_vm('livemerge-test', 'S2', block=block)
        try:
            dom.blockRebase(s2_file, base_file, 0, 0)
            flags = libvirt.VIR_DOMAIN_BLOCK_JOB_TYPE_PULL
            self.assertTrue(utils.wait_block_job(dom, s2_file, flags))
        finally:
            dom.destroy()

        self.assertTrue(utils.verify_image(s2_file, 0, 1024, 1))
        self.assertTrue(utils.verify_image(s2_file, 1024, 1024, 2))
        self.assertTrue(utils.verify_image(s2_file, 2048, 1024, 3))
        self.assertTrue(utils.verify_backing_file(base_file, None))
        self.assertTrue(utils.verify_backing_file(s2_file, 'BASE',
                                                  relative=relPath,
                                                  block=block))
        self.assertTrue(utils.verify_image_format(s1_file, 'qcow2'))
Ejemplo n.º 2
0
    def test_backward_merge_from_active(self):
        """
        Backward Merge One from Active Layer

        Create image chain: BASE---S1---S2
        Start VM
        Merge S1 << S2
        Final image chain:  BASE---S1
        """
        base_file = utils.create_image('BASE')
        s1_file = utils.create_image('S1', 'BASE')
        s2_file = utils.create_image('S2', 'S1')

        dom = utils.create_vm('livemerge-test', 'S2', block=False)
        try:
            dom.blockCommit(s2_file, s1_file, s2_file, 0, 0)
            flags = libvirt.VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT
            self.assertTrue(utils.wait_block_job(dom, s2_file, flags))
        finally:
            dom.destroy()

        self.assertTrue(utils.verify_backing_file(base_file, None))
        self.assertTrue(utils.verify_backing_file(s1_file, 'BASE',
                                                  relative=False,
                                                  block=False))
Ejemplo n.º 3
0
    def test_backward_merge_from_inactive(self, relPath, baseFmt, block):
        """
        Backward Merge One from Inactive Layer

        Create image chain: BASE---S1---S2
        Start VM
        Merge BASE << S1
        Final image chain:  BASE---S2
        """
        base_file = utils.create_image('BASE', fmt=baseFmt, block=block)
        s1_file = utils.create_image('S1', 'BASE', relative=relPath,
                                     backingFmt=baseFmt, block=block)
        s2_file = utils.create_image('S2', 'S1', relative=relPath,
                                     block=block)
        self.assertTrue(utils.verify_backing_file(base_file, None))
        self.assertTrue(utils.verify_backing_file(s1_file, 'BASE',
                                                  relative=relPath,
                                                  block=block))
        self.assertTrue(utils.verify_backing_file(s2_file, 'S1',
                                                  relative=relPath,
                                                  block=block))

        dom = utils.create_vm('livemerge-test', 'S2', block=block)
        try:
            dom.blockCommit('vda', base_file, s1_file, 0, 0)
            flags = libvirt.VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT
            self.assertTrue(utils.wait_block_job(dom, s2_file, flags))
        finally:
            dom.destroy()

        self.assertTrue(utils.verify_backing_file(base_file, None))
        self.assertTrue(utils.verify_backing_file(s2_file, 'BASE',
                                                  relative=relPath,
                                                  block=block))
        self.assertTrue(utils.verify_image_format(base_file, baseFmt))
Ejemplo n.º 4
0
def get_decomposition(img, net):
    net_in = ['rgb']
    net_out = ['albedo','shading','segmentation']
    normalize = True
    cuda = Cuda(0)

    copy_im = copy.deepcopy(img)
    pil_im = Image.fromarray(copy_im)
    resized_im = pil_im.resize((480,352),Image.ANTIALIAS)  # IntrinSeg network only accepts input of this size
    resized_im = np.array(resized_im,dtype=np.int64)
    resized_im = resized_im.astype(np.float32)
    resized_im[np.isnan(resized_im)] = 0
    in_ = resized_im
    in_ = in_.transpose((2, 0, 1))

    if normalize:
        in_ = (in_ * 255 / np.max(in_)).astype('uint8')
        in_ = (in_ / 255.0).astype(np.float32)

    in_ = np.expand_dims(in_, axis=0)
    rgb = torch.from_numpy(in_)
    rgb = Variable(rgb).cuda(device=cuda.device)
    albedo_out, shading_out, segmentation_out = net(rgb)
    albedo = albedo_out.cpu().detach().numpy()
    shading = shading_out.cpu().detach().numpy()
    albedo_out = create_image(albedo)
    shading_out = create_image(shading)

    albedo_out = cv2.resize(np.asarray(albedo_out),(img.shape[1],img.shape[0]))
    shading_out = cv2.resize(np.asarray(shading_out),(img.shape[1],img.shape[0]))
    shading_out = cv2.cvtColor(shading_out,cv2.COLOR_BGR2GRAY)

    return albedo_out,shading_out
Ejemplo n.º 5
0
def share():
    session['token_info'], authorized = get_token(session)
    session.modified = True
    if not authorized:
        flash("Please Login with your Spotify Account")
        return redirect('/')
    return serve_image(create_image(songs=session['songs_5'], artists=session['artists_5'], username=session['username']))
Ejemplo n.º 6
0
    def get_image(self):
        comm = ut.commect(self.param_dic)

        image_name = None
        if comm != None: image_name = ut.create_image(comm)

        comm.close()

        return image_dir
Ejemplo n.º 7
0
    def test_forward_merge_all_to_active(self):
        """
        Forward Merge All to Active Layer

        Create image chain: BASE---S1---S2
        Start VM
        Merge (BASE + S1) >> S2
        Final image chain:  S2
        """
        utils.create_image('BASE')
        utils.create_image('S1', 'BASE')
        s2_file = utils.create_image('S2', 'S1')

        dom = utils.create_vm('livemerge-test', 'S2', block=False)
        try:
            dom.blockRebase(s2_file, None, 0, 0)
            flags = libvirt.VIR_DOMAIN_BLOCK_JOB_TYPE_PULL
            self.assertTrue(utils.wait_block_job(dom, s2_file, flags))
        finally:
            dom.destroy()

        self.assertTrue(utils.verify_backing_file(s2_file, None))
Ejemplo n.º 8
0
def function_create():
    with utils.AtomicRequest() as atomic:

        function_id = uuid.uuid4().hex

        atomic.driver_endpoint = driver_endpoint

        user, tenant = utils.get_headers(request)

        zip_file = utils.get_zip(request)
        zip_url = utils.upload_zip(function_id, zip_file)

        if not zip_url:
            atomic.errors = True
            return critical_error('Not able to store zip.')

        atomic.zip_url = zip_url

        metadata = utils.get_metadata(request)

        if not utils.validate_json(utils.build_schema, metadata):
            atomic.errors = True
            return bad_request("Error validating json.")

        tag = "{0}_{1}_{2}".format(tenant, user, metadata.get('name'))
        payload = {
            "memory": metadata.get('memory'),
            "tags": [tag],
            "runtime": metadata.get('runtime'),
            "zip_location": zip_url,
            "name": metadata.get('name')
        }

        image_id = utils.create_image(driver_endpoint, payload)
        atomic.image_id = image_id

        function = utils.create_function(tenant, user, function_id, image_id,
                                         zip_url, tag, metadata)

        if not function:
            atomic.errors = True
            return critical_error('Error building the function.')

        return Response(function_id, status=201)
Ejemplo n.º 9
0
    def test_commit(self):
        print "Creating VM image"
        base_file = utils.get_image_path('BASE', False, False)
        utils.build_vm('BASE', touch_script, '10G')
        s1_file = utils.create_image('S1', 'BASE', size='10G')

        # Monitor the image sizes
        stats = {'BASE': [], 'S1': []}
        stopEvent = threading.Event()
        base_watcher = ImageWatcher(base_file, stats['BASE'], stopEvent)
        base_watcher.start()
        s1_watcher = ImageWatcher(s1_file, stats['S1'], stopEvent)
        s1_watcher.start()

        # Run the test
        print "Starting VM"
        dom = utils.create_vm('livemerge-test', 'S1')
        # TODO: Start a livemerge

        try:
            dom.blockCommit(s1_file, base_file, s1_file, 0, 0)
            flags = libvirt.VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT
            self.assertTrue(utils.wait_block_job(dom, s1_file, flags))
        finally:
            dom.destroy()


        ## Wait until image has grown enough
        #print "Sampling"
        #while True:
            #end = utils.get_image_end_offset(s1_file) / 1024 / 1024
            #print "S1 is using %i MB" % end
            #if end >= 5 * 1024:
                #break
            #time.sleep(5)

        # Stop the test
        print "Cleaning up"
        stopEvent.set()
        base_watcher.join()
        s1_watcher.join()

        # Print results
        self._print_results(stats)
Ejemplo n.º 10
0
    def __init__(width, height, caption):
        GameEngine.states = []
        GameEngine.run = True

        GameEngine.display_width = width
        GameEngine.display_height = height

        GameEngine.font = pygame.font.Font("resources/vgaoem.fon", 15)
        GameEngine.clock = pygame.time.Clock()
        GameEngine.sprite_dict = {
            filename[:-4]: utils.create_image(filename[:-4])
            for filename in os.listdir('resources') if filename[-4:] == '.png'
        }

        pygame.display.set_mode(
            (GameEngine.display_width, GameEngine.display_height))
        pygame.display.set_caption(caption)

        GameEngine.game_display = pygame.display.get_surface()
Ejemplo n.º 11
0
    end_y = 2.5
    width = 1000  # image width
    step = (end_x - start_x) / width
    Y, X = np.mgrid[start_y:end_y:step, start_x:end_x:step]
    Z = X + 1j * Y
    Z = Z.astype(np.complex64)

    import time
    t1 = time.time()
    seqs = np.zeros([n] + list(Z.shape) + [3])
    for i, phase in enumerate(tqdm(np.linspace(0, 2 * np.pi, n))):
        phase = tf.Variable(1j * phase)
        ns, zs = julia_set(Z, phase)
        final_step = ns.numpy()
        final_z = zs.numpy()
        img = create_image(final_z, final_step, R)
        seqs[i, :, :] = np.array(img)
    t2 = time.time()
    #gif('julia', seqs, 8)
    mp4('img/juliatf', seqs, 8)

    t3 = time.time()
    seqs = np.zeros([n] + list(Z.shape) + [3])
    for i, phase in enumerate(tqdm(np.linspace(0, 2 * np.pi, n))):
        ns, zs = julia_set_np(Z, 1j * phase)
        final_step = ns
        final_z = zs
        img = create_image(final_z, final_step, R)
        seqs[i, :, :] = np.array(img)
    t4 = time.time()
    mp4('img/julianp', seqs, 8)
import matplotlib.pyplot as plt
from utils import create_image, create_random_circles, Timer
import numpy as np

from affine import Affine
from rasterstats import zonal_stats

# Define geo extent
llx = -75.0; lly = -35.50; urx = -34.0; ury = 5.54
extent = [llx, lly, urx, ury]

# Define resolution
resolution = 2.0 # km

# Create image
image = create_image(extent, resolution, driver='MEM')

# Get Affine object in order to run zonal_stats
aff = Affine.from_gdal(*image.GetGeoTransform())

# Extract values
values = image.ReadAsArray()

# Get no-data value
nodata = image.GetRasterBand(1).GetNoDataValue()

# User-defined statistics example
def model(x):
    value = np.max(x) - np.min(x)
    if np.ma.is_masked(value):
        value = None
# -*- coding: utf-8 -*-

__author__ = 'Douglas Uba'
__email__ = '*****@*****.**'

import matplotlib.pyplot as plt
from utils import create_image

# Define geo extent
llx = -75.0
lly = -35.50
urx = -34.0
ury = 5.54
extent = [llx, lly, urx, ury]

# Define resolution
resolution = 2.0  # km

# Create image
image = create_image(extent,
                     resolution,
                     path='./data/grid.tif',
                     driver='GTiff')

# Show result
plt.imshow(image.ReadAsArray())
plt.show()
Ejemplo n.º 14
0
def interpolate(templist, descs, model, audiofile):

    video_temp_list = []

    # interpole elements between each image

    for idx1, pt in enumerate(descs):

        # get the next index of the descs list,
        # if it z1_idx is out of range, break the loop
        z1_idx = idx1 + 1
        if z1_idx >= len(descs):
            break

        current_lyric = pt[1]

        # get the interval betwee 2 lines/elements in seconds `ttime`
        d1 = pt[0]
        d2 = descs[z1_idx][0]
        ttime = d2 - d1

        # if it is the very first index, load the first pt temp file
        # if not assign the previous pt file (z1) to zs variable
        if idx1 == 0:
            zs = torch.load(templist[idx1])
        else:
            zs = z1

        # compute for the number of elements to be insert between the 2 elements
        N = round(ttime * interpol)
        print(z1_idx)
        # the codes below determine if the output is list (for biggan)
        # if not insert it into a list
        if not isinstance(zs, list):
            z0 = [zs]
            z1 = [torch.load(templist[z1_idx])]
        else:
            z0 = zs
            z1 = torch.load(templist[z1_idx])

        # loop over the range of elements and generate the images
        image_temp_list = []
        for t in range(N):

            azs = []
            for r in zip(z0, z1):
                z_diff = r[1] - r[0]
                inter_zs = r[0] + sigmoid(t / (N - 1)) * z_diff
                azs.append(inter_zs)

            # Generate image
            with torch.no_grad():
                if generator == 'biggan':
                    img = model(azs[0], azs[1], 1).cpu().numpy()
                    img = img[0]
                elif generator == 'dall-e':
                    img = unmap_pixels(
                        torch.sigmoid(model(
                            azs[0])[:, :3]).cpu().float()).numpy()
                    img = img[0]
                elif generator == 'stylegan':
                    img = model(azs[0])
                image_temp = create_image(img, t, current_lyric, generator)
            image_temp_list.append(image_temp)

        video_temp = create_video.createvid(f'{current_lyric}',
                                            image_temp_list,
                                            duration=ttime / N)
        video_temp_list.append(video_temp)
    # Finally create the final output and save to output folder
    create_video.concatvids(descs, video_temp_list, audiofile, lyrics=lyrics)