Ejemplo n.º 1
0
  def Execute(self, opt, args):
    if not args:
      self.Usage()

    nb = args[0]
    if not git.check_ref_format('heads/%s' % nb):
      print >>sys.stderr, "error: '%s' is not a valid name" % nb
      sys.exit(1)

    nb = args[0]
    err = []
    success = []
    all_projects = self.GetProjects(args[1:])

    pm = Progress('Abandon %s' % nb, len(all_projects))
    for project in all_projects:
      pm.update()

      status = project.AbandonBranch(nb)
      if status is not None:
        if status:
          success.append(project)
        else:
          err.append(project)
    pm.end()

    if err:
      for p in err:
        print >>sys.stderr,\
          "error: %s/: cannot abandon %s" \
          % (p.relpath, nb)
      sys.exit(1)
    elif not success:
      print >>sys.stderr, 'error: no project has branch %s' % nb
      sys.exit(1)
    else:
      print >>sys.stderr, 'Abandoned in %d project(s):\n  %s' % (
            len(success), '\n  '.join(p.relpath for p in success))
Ejemplo n.º 2
0
 def start(self):
     if self.finished:
         raise NotImplementedError(
             "Builder instance can be used only once.")
     self.rc = 0
     if not len(self.sortedList):
         # if self.printUpToDate:
         cinfo(self.printInfo, "All targets are up-to-date. Nothing to do.")
         self.finished = True
         return
     self.workers = [Worker(self, i) for i in range(self.numWorkers)]
     try:
         for worker in self.workers:
             worker.start()
         for worker in self.workers:
             logger.debugf("Joining worker {}", worker.id)
             worker.join()
     # exception handling: progressFn have to be called with end marker
     finally:
         if self.progressFn:
             prg = Progress(self.numWorkers)
             prg.finish(self.rc)
             self.progressFn(prg)
Ejemplo n.º 3
0
def writeAnimation(human, linebuffer, animTrack, config):
    import numpy as np
    progress = Progress(len(human.getSkeleton().getBones()))
    log.message("Exporting animation %s.", animTrack.name)
    linebuffer.append('        <animation name="%s" length="%s">' %
                      (animTrack.name, animTrack.getPlaytime()))
    linebuffer.append('            <tracks>')
    for bIdx, bone in enumerate(human.getSkeleton().getBones()):
        # Note: OgreXMLConverter will optimize out unused (not moving) animation tracks
        linebuffer.append('                <track bone="%s">' % bone.name)
        linebuffer.append('                    <keyframes>')
        frameTime = 1.0 / float(animTrack.frameRate)
        for frameIdx in xrange(animTrack.nFrames):
            poseMat = animTrack.getAtFramePos(frameIdx)[bIdx]
            translation = poseMat[:3, 3]
            angle, axis, _ = transformations.rotation_from_matrix(poseMat)
            axis = np.asarray(
                axis *
                np.matrix(bone.getRestMatrix(offsetVect=config.offset)))[0]
            linebuffer.append('                        <keyframe time="%s">' %
                              (float(frameIdx) * frameTime))
            linebuffer.append(
                '                            <translate x="%s" y="%s" z="%s" />'
                % (translation[0], translation[1], translation[2]))
            # TODO account for scale
            linebuffer.append(
                '                            <rotate angle="%s">' % angle)
            linebuffer.append(
                '                                <axis x="%s" y="%s" z="%s" />'
                % (axis[0], axis[1], axis[2]))
            linebuffer.append('                            </rotate>')
            linebuffer.append('                        </keyframe>')
        linebuffer.append('                    </keyframes>')
        linebuffer.append('                </track>')
        progress.step()
    linebuffer.append('            </tracks>')
    linebuffer.append('        </animation>')
Ejemplo n.º 4
0
def writePolylist(fp, mesh, config):
    progress = Progress(2)

    nFaces = len(mesh.fvert)

    fp.write(
        '        <polylist count="%d">\n' % nFaces +
        '          <input offset="0" semantic="VERTEX" source="#%s-Vertex"/>\n' % mesh.name)

    if config.useNormals:
        fp.write(
        '          <input offset="1" semantic="NORMAL" source="#%s-Normals"/>\n' % mesh.name +
        '          <input offset="2" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name +
        '          <vcount>')
    else:
        fp.write(
        '          <input offset="1" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name +
        '          <vcount>')

    fp.write( ''.join(["4 " for fv in mesh.fvert]) )

    fp.write('\n' +
        '          </vcount>\n'
        '          <p>')
    progress.step()

    for fn,fv in enumerate(mesh.fvert):
        fuv = mesh.fuvs[fn]
        if config.useNormals:
            fp.write( ''.join([("%d %d %d " % (fv[n], fv[n], fuv[n])) for n in range(4)]) )
        else:
            fp.write( ''.join([("%d %d " % (fv[n], fuv[n])) for n in range(4)]) )

    fp.write(
        '          </p>\n' +
        '        </polylist>\n')
    progress.step()
Ejemplo n.º 5
0
    def Execute(self, opt, args):
        if not args:
            self.Usage()

        nb = args[0]
        err = []
        all = self.GetProjects(args[1:])

        pm = Progress('Checkout %s' % nb, len(all))
        for project in all:
            pm.update()
            if not project.CheckoutBranch(nb):
                err.append(project)
        pm.end()

        if err:
            if len(err) == len(all):
                print >> sys.stderr, 'error: no project has branch %s' % nb
            else:
                for p in err:
                    print >>sys.stderr,\
                      "error: %s/: cannot checkout %s" \
                      % (p.relpath, nb)
            sys.exit(1)
Ejemplo n.º 6
0
def mapMaskSoft(dimensions=(1024, 1024), mesh=None):
    """
    Create a texture mask for the selected human (software renderer).
    """
    progress = Progress()(0)
    if mesh is None:
        mesh = G.app.selectedHuman.mesh

    W = dimensions[0]
    H = dimensions[1]

    components = 4
    dstImg = mh.Image(width=W, height=H, components=components)
    dstImg.data[...] = np.tile([0, 0, 0, 255], (H, W)).reshape(
        (H, W, components))

    faces = getFaces(mesh)

    coords = np.asarray(
        [0, H])[None, None, :] + mesh.texco[mesh.fuvs[faces]] * np.asarray(
            [W, -H])[None, None, :]
    shape = mesh.fvert[faces].shape
    shape = tuple(list(shape) + [components])
    colors = np.repeat(1, np.prod(shape)).reshape(shape)

    log.debug("mapMask: begin render")

    progress(0.1, 0.55)
    RasterizeTriangles(dstImg, coords[:, [0, 1, 2], :], MaskShader())
    progress(0.55, 0.99)
    RasterizeTriangles(dstImg, coords[:, [2, 3, 0], :], MaskShader())

    log.debug("mapMask: end render")

    progress.finish()
    return dstImg
Ejemplo n.º 7
0
  def _Fetch(self, projects, opt):
    fetched = set()
    pm = Progress('Fetching projects', len(projects))

    if self.jobs == 1:
      for project in projects:
        pm.update()
        if project.Sync_NetworkHalf(quiet=opt.quiet):
          fetched.add(project.gitdir)
        else:
          print >>sys.stderr, 'error: Cannot fetch %s' % project.name
          if opt.force_broken:
            print >>sys.stderr, 'warn: --force-broken, continuing to sync'
          else:
            sys.exit(1)
    else:
      threads = set()
      lock = _threading.Lock()
      sem = _threading.Semaphore(self.jobs)
      for project in projects:
        sem.acquire()
        t = _threading.Thread(target = self._FetchHelper,
                              args = (opt,
                                      project,
                                      lock,
                                      fetched,
                                      pm,
                                      sem))
        threads.add(t)
        t.start()

      for t in threads:
        t.join()

    pm.end()
    return fetched
Ejemplo n.º 8
0
def mapImageGL(srcImg, mesh, leftTop, rightBottom):
    progress = Progress() (0)
    log.debug("mapImageGL: 1")

    dstImg = G.app.selectedHuman.meshData.object3d.textureTex

    dstW = dstImg.width
    dstH = dstImg.height

    left, top = leftTop
    right, bottom = rightBottom

    camera = getCamera(mesh)

    coords = mesh.r_texco

    texmat = G.app.modelCamera.getConvertToScreenMatrix(mesh)
    texmat = matrix.scale((1/(right - left), 1/(top - bottom), 1)) * matrix.translate((-left, -bottom, 0)) * texmat
    texmat = np.asarray(texmat)

    texco = mesh.r_coord

    alpha = np.sum(mesh.r_vnorm * camera[None,:], axis=-1)
    alpha = np.maximum(alpha, 0)
    color = (np.array([0, 0, 0, 0])[None,...] + alpha[...,None]) * 255

    color = np.ascontiguousarray(color, dtype=np.uint8)
    texco = np.ascontiguousarray(texco, dtype=np.float32)

    progress(0.5, 0.99)
    result = mh.renderSkin(dstImg, mesh.vertsPerPrimitive, coords, index = mesh.index,
                           texture = srcImg, UVs = texco, textureMatrix = texmat,
                           color = color, clearColor = None)

    progress(1)
    return result
Ejemplo n.º 9
0
def add_locations(request, pk=None):
    try:
        in_progress = models.UploadProgress.objects.get(pk=pk)
    except:
        raise Http404
    progress = Progress(in_progress)
    try:
        locations_map = progress.progress['locations']
    except KeyError:
        locations_map = None
    try:
        location_map = progress.progress['location']
    except KeyError:
        location_map = None
    if location_map:
        _locations = {None: location_map}
    elif locations_map:
        _locations = locations_map
    else:
        _locations = {None: {'pk': -1, 'planting_methods': -1}}
    unknown_names = [
        name for name in _locations if _locations[name]['pk'] == -1
    ]
    return add_model(
        request,
        progress,
        unknown_names=unknown_names,
        template='add-locations.html',
        Formcls=forms.AddLocation,
        form_fieldname='location',
        input_fieldname='name',
        map_name=progress.map_planting_methods,
        is_verified=progress.locations_verified,
        next_url='planting-methods/',
        model_cache=utils.create_location_model_cache(),
    )
Ejemplo n.º 10
0
    def Execute(self, opt, args):
        if not args:
            self.Usage()

        nb = args[0]
        if not git.check_ref_format('heads/%s' % nb):
            print >> sys.stderr, "error: '%s' is not a valid name" % nb
            sys.exit(1)

        err = []
        projects = []
        if not opt.all:
            projects = args[1:]
            if len(projects) < 1:
                print >> sys.stderr, "error: at least one project must be specified"
                sys.exit(1)

        all = self.GetProjects(projects)

        pm = Progress('Starting %s' % nb, len(all))
        for project in all:
            pm.update()
            # If the current revision is a specific SHA1 then we can't push back
            # to it so substitute the manifest default revision instead.
            if IsId(project.revisionExpr):
                project.revisionExpr = self.manifest.default.revisionExpr
            if not project.StartBranch(nb):
                err.append(project)
        pm.end()

        if err:
            for p in err:
                print >>sys.stderr,\
                  "error: %s/: cannot start %s" \
                  % (p.relpath, nb)
            sys.exit(1)
Ejemplo n.º 11
0
    def resetFacialCodes(self, erase_all='True'):
        progress_reset_button = Progress(len(self.sliders))
        was_subdivided = False
        if self.facs_human.isSubdivided():
            was_subdivided = True
            self.facs_human.setSubdivided(False)
        for aSlider in self.sliders.keys():
            #if self.slidersValues[aSlider] >= 0:
            self.sliders[aSlider].resetValue()
            self.sliders[aSlider].update()
            self.slidersValues[aSlider] = 0
            self.labelSlider[aSlider].setTextFormat('Intensity: 0%%')
            gui3d.app.statusPersist('Reseting : ' + aSlider)
            progress_reset_button.step()

        if erase_all:
            self.au_timeline_values = {}
            self.au_facs_loaded_file_values = {}
            self.txt_animatiom_file_loaded.setText('No animation file loaded')

        self.animation_test.onChange(0)
        self.animation_test.update()
        self.animation_test.setValue(0)

        self.general_intensity.onChange(100)
        self.general_intensity.update()
        self.general_intensity.setValue(100)

        self.au_coding.setText('Neutral')
        self.txt_file_loaded.setText('- New facial code -')
        self.facs_human.applyAllTargets()
        # self.refreshAuSmoothSetting()
        if was_subdivided == True:
            self.facs_human.setSubdivided(True)
        gui3d.app.statusPersist(
            'Reset is done, now in neutral facial expression setting')
Ejemplo n.º 12
0
def writeShapeKey(fp, name, shape, mesh, config):
    if len(shape.verts) == 0:
        log.debug("Shapekey %s has zero verts. Ignored" % name)
        return

    progress = Progress()

    # Verts

    progress(0)
    target = mesh.coord.copy()
    target[:] += config.offset
    target[shape.verts] += shape.data[np.s_[...]]
    target = rotateCoord(config.scale*target, config)
    nVerts = len(target)

    fp.write(
        '    <geometry id="%sMeshMorph_%s" name="%s">\n' % (mesh.name, name, name) +
        '      <mesh>\n' +
        '        <source id="%sMeshMorph_%s-positions">\n' % (mesh.name, name) +
        '          <float_array id="%sMeshMorph_%s-positions-array" count="%d">\n' % (mesh.name, name, 3*nVerts) +
        '           ')

    fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in target]) )

    fp.write('\n' +
        '          </float_array>\n' +
        '          <technique_common>\n' +
        '            <accessor source="#%sMeshMorph_%s-positions-array" count="%d" stride="3">\n' % (mesh.name, name, nVerts) +
        '              <param name="X" type="float"/>\n' +
        '              <param name="Y" type="float"/>\n' +
        '              <param name="Z" type="float"/>\n' +
        '            </accessor>\n' +
        '          </technique_common>\n' +
        '        </source>\n')
    progress(0.3)

    # Polylist

    nFaces = len(mesh.fvert)

    fp.write(
        '        <vertices id="%sMeshMorph_%s-vertices">\n' % (mesh.name, name) +
        '          <input semantic="POSITION" source="#%sMeshMorph_%s-positions"/>\n' % (mesh.name, name) +
        '        </vertices>\n' +
        '        <polylist count="%d">\n' % nFaces +
        '          <input semantic="VERTEX" source="#%sMeshMorph_%s-vertices" offset="0"/>\n' % (mesh.name, name) +
        #'          <input semantic="NORMAL" source="#%sMeshMorph_%s-normals" offset="1"/>\n' % (mesh.name, name) +
        '          <vcount>')

    fp.write( ''.join(["4 " for fv in mesh.fvert]) )

    fp.write('\n' +
        '          </vcount>\n' +
        '          <p>')

    fp.write( ''.join([("%d %d %d %d " % tuple(fv)) for fv in mesh.fvert]) )

    fp.write('\n' +
        '          </p>\n' +
        '        </polylist>\n' +
        '      </mesh>\n' +
        '    </geometry>\n')
    progress(1)
Ejemplo n.º 13
0
if __name__ == '__main__':
    import sys
    sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')

from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt'
          ) as answers:
    for line in answers:
        answers_list.append(int(line))
progress_ = Progress("Problem 001: Multiples of 3 and 5", 0, answers_list[1])

for x in range(1000):
    if (x % 3 == 0) or (x % 5 == 0):
        progress_.count += x
        progress_.progress()

if __name__ == '__main__':
    input()
Ejemplo n.º 14
0
if __name__ == '__main__':
    import sys
    sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')

from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt'
          ) as answers:
    for line in answers:
        answers_list.append(int(line))
progress_ = Progress("Problem 029: Distinct powers", 0, answers_list[29])

result = []
for a in range(2, 101):
    progress_.count = len(list(set(result)))
    progress_.progress()
    for b in range(2, 101):
        result.append(a**b)

progress_.count = len(list(set(result)))
progress_.progress()

if __name__ == '__main__':
    input()
Ejemplo n.º 15
0
    def Execute(self, opt, args):
        if opt.jobs:
            self.jobs = opt.jobs
        if self.jobs > 1:
            soft_limit, _ = _rlimit_nofile()
            self.jobs = min(self.jobs, (soft_limit - 5) / 3)

        if opt.network_only and opt.detach_head:
            print('error: cannot combine -n and -d', file=sys.stderr)
            sys.exit(1)
        if opt.network_only and opt.local_only:
            print('error: cannot combine -n and -l', file=sys.stderr)
            sys.exit(1)
        if opt.manifest_name and opt.smart_sync:
            print('error: cannot combine -m and -s', file=sys.stderr)
            sys.exit(1)
        if opt.manifest_name and opt.smart_tag:
            print('error: cannot combine -m and -t', file=sys.stderr)
            sys.exit(1)
        if opt.manifest_server_username or opt.manifest_server_password:
            if not (opt.smart_sync or opt.smart_tag):
                print('error: -u and -p may only be combined with -s or -t',
                      file=sys.stderr)
                sys.exit(1)
            if None in [
                    opt.manifest_server_username, opt.manifest_server_password
            ]:
                print('error: both -u and -p must be given', file=sys.stderr)
                sys.exit(1)

        cwd = os.getcwd()
        if cwd.startswith(gitc_utils.GITC_MANIFEST_DIR) and not opt.force_gitc:
            print(
                'WARNING this will pull all the sources like a normal repo sync.\n'
                '\nIf you want to update your GITC Client View please rerun this '
                'command in \n%s%s.\nOr if you actually want to pull the sources, '
                'rerun with --force-gitc.' %
                (gitc_utils.GITC_FS_ROOT_DIR,
                 cwd.split(gitc_utils.GITC_MANIFEST_DIR)[1]))
            sys.exit(1)

        self._gitc_sync = False
        if cwd.startswith(gitc_utils.GITC_FS_ROOT_DIR):
            self._gitc_sync = True
            self._client_name = cwd.split(
                gitc_utils.GITC_FS_ROOT_DIR)[1].split('/')[0]
            self._client_dir = os.path.join(gitc_utils.GITC_MANIFEST_DIR,
                                            self._client_name)
            print('Updating GITC client: %s' % self._client_name)

        if opt.manifest_name:
            self.manifest.Override(opt.manifest_name)

        manifest_name = opt.manifest_name
        smart_sync_manifest_name = "smart_sync_override.xml"
        smart_sync_manifest_path = os.path.join(
            self.manifest.manifestProject.worktree, smart_sync_manifest_name)

        if opt.smart_sync or opt.smart_tag:
            if not self.manifest.manifest_server:
                print(
                    'error: cannot smart sync: no manifest server defined in '
                    'manifest',
                    file=sys.stderr)
                sys.exit(1)

            manifest_server = self.manifest.manifest_server
            if not opt.quiet:
                print('Using manifest server %s' % manifest_server)

            if not '@' in manifest_server:
                username = None
                password = None
                if opt.manifest_server_username and opt.manifest_server_password:
                    username = opt.manifest_server_username
                    password = opt.manifest_server_password
                else:
                    try:
                        info = netrc.netrc()
                    except IOError:
                        # .netrc file does not exist or could not be opened
                        pass
                    else:
                        try:
                            parse_result = urllib.parse.urlparse(
                                manifest_server)
                            if parse_result.hostname:
                                auth = info.authenticators(
                                    parse_result.hostname)
                                if auth:
                                    username, _account, password = auth
                                else:
                                    print(
                                        'No credentials found for %s in .netrc'
                                        % parse_result.hostname,
                                        file=sys.stderr)
                        except netrc.NetrcParseError as e:
                            print('Error parsing .netrc file: %s' % e,
                                  file=sys.stderr)

                if (username and password):
                    manifest_server = manifest_server.replace(
                        '://', '://%s:%s@' % (username, password), 1)

            transport = PersistentTransport(manifest_server)
            if manifest_server.startswith('persistent-'):
                manifest_server = manifest_server[len('persistent-'):]

            try:
                server = xmlrpc.client.Server(manifest_server,
                                              transport=transport)
                if opt.smart_sync:
                    p = self.manifest.manifestProject
                    b = p.GetBranch(p.CurrentBranch)
                    branch = b.merge
                    if branch.startswith(R_HEADS):
                        branch = branch[len(R_HEADS):]

                    env = os.environ.copy()
                    if 'SYNC_TARGET' in env:
                        target = env['SYNC_TARGET']
                        [success, manifest_str
                         ] = server.GetApprovedManifest(branch, target)
                    elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
                        target = '%s-%s' % (env['TARGET_PRODUCT'],
                                            env['TARGET_BUILD_VARIANT'])
                        [success, manifest_str
                         ] = server.GetApprovedManifest(branch, target)
                    else:
                        [success,
                         manifest_str] = server.GetApprovedManifest(branch)
                else:
                    assert (opt.smart_tag)
                    [success, manifest_str] = server.GetManifest(opt.smart_tag)

                if success:
                    manifest_name = smart_sync_manifest_name
                    try:
                        f = open(smart_sync_manifest_path, 'w')
                        try:
                            f.write(manifest_str)
                        finally:
                            f.close()
                    except IOError as e:
                        print('error: cannot write manifest to %s:\n%s' %
                              (smart_sync_manifest_path, e),
                              file=sys.stderr)
                        sys.exit(1)
                    self._ReloadManifest(manifest_name)
                else:
                    print('error: manifest server RPC call failed: %s' %
                          manifest_str,
                          file=sys.stderr)
                    sys.exit(1)
            except (socket.error, IOError, xmlrpc.client.Fault) as e:
                print('error: cannot connect to manifest server %s:\n%s' %
                      (self.manifest.manifest_server, e),
                      file=sys.stderr)
                sys.exit(1)
            except xmlrpc.client.ProtocolError as e:
                print('error: cannot connect to manifest server %s:\n%d %s' %
                      (self.manifest.manifest_server, e.errcode, e.errmsg),
                      file=sys.stderr)
                sys.exit(1)
        else:  # Not smart sync or smart tag mode
            if os.path.isfile(smart_sync_manifest_path):
                try:
                    os.remove(smart_sync_manifest_path)
                except OSError as e:
                    print(
                        'error: failed to remove existing smart sync override manifest: %s'
                        % e,
                        file=sys.stderr)

        rp = self.manifest.repoProject
        rp.PreSync()

        mp = self.manifest.manifestProject
        mp.PreSync()

        if opt.repo_upgraded:
            _PostRepoUpgrade(self.manifest, quiet=opt.quiet)

        if self._gitc_sync:
            gitc_utils.generate_gitc_manifest(self._client_dir, self.manifest)
            print('GITC client successfully synced.')
            return

        if not opt.local_only:
            mp.Sync_NetworkHalf(quiet=opt.quiet,
                                current_branch_only=opt.current_branch_only,
                                no_tags=opt.no_tags,
                                optimized_fetch=opt.optimized_fetch)

        if mp.HasChanges:
            syncbuf = SyncBuffer(mp.config)
            mp.Sync_LocalHalf(syncbuf)
            if not syncbuf.Finish():
                sys.exit(1)
            self._ReloadManifest(manifest_name)
            if opt.jobs is None:
                self.jobs = self.manifest.default.sync_j
        all_projects = self.GetProjects(args,
                                        missing_ok=True,
                                        submodules_ok=opt.fetch_submodules)

        self._fetch_times = _FetchTimes(self.manifest)
        if not opt.local_only:
            to_fetch = []
            now = time.time()
            if _ONE_DAY_S <= (now - rp.LastFetch):
                to_fetch.append(rp)
            to_fetch.extend(all_projects)
            to_fetch.sort(key=self._fetch_times.Get, reverse=True)

            fetched = self._Fetch(to_fetch, opt)
            _PostRepoFetch(rp, opt.no_repo_verify)
            if opt.network_only:
                # bail out now; the rest touches the working tree
                return

            # Iteratively fetch missing and/or nested unregistered submodules
            previously_missing_set = set()
            while True:
                self._ReloadManifest(manifest_name)
                all_projects = self.GetProjects(
                    args, missing_ok=True, submodules_ok=opt.fetch_submodules)
                missing = []
                for project in all_projects:
                    if project.gitdir not in fetched:
                        missing.append(project)
                if not missing:
                    break
                # Stop us from non-stopped fetching actually-missing repos: If set of
                # missing repos has not been changed from last fetch, we break.
                missing_set = set(p.name for p in missing)
                if previously_missing_set == missing_set:
                    break
                previously_missing_set = missing_set
                fetched.update(self._Fetch(missing, opt))

        if self.manifest.IsMirror or self.manifest.IsArchive:
            # bail out now, we have no working tree
            return

        if self.UpdateProjectList():
            sys.exit(1)

        syncbuf = SyncBuffer(mp.config, detach_head=opt.detach_head)
        pm = Progress('Syncing work tree', len(all_projects))
        for project in all_projects:
            pm.update()
            if project.worktree:
                project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
        pm.end()
        print(file=sys.stderr)
        if not syncbuf.Finish():
            sys.exit(1)

        # If there's a notice that's supposed to print at the end of the sync, print
        # it now...
        if self.manifest.notice:
            print(self.manifest.notice)
Ejemplo n.º 16
0
    def Execute(self, opt, args):
        nb = args[0]
        err = []
        projects = []
        if not opt.all:
            projects = args[1:]
            if len(projects) < 1:
                projects = ['.']  # start it in the local project by default

        all_projects = self.GetProjects(projects,
                                        missing_ok=bool(self.gitc_manifest))

        # This must happen after we find all_projects, since GetProjects may need
        # the local directory, which will disappear once we save the GITC manifest.
        if self.gitc_manifest:
            gitc_projects = self.GetProjects(projects,
                                             manifest=self.gitc_manifest,
                                             missing_ok=True)
            for project in gitc_projects:
                if project.old_revision:
                    project.already_synced = True
                else:
                    project.already_synced = False
                    project.old_revision = project.revisionExpr
                project.revisionExpr = None
            # Save the GITC manifest.
            gitc_utils.save_manifest(self.gitc_manifest)

            # Make sure we have a valid CWD
            if not os.path.exists(os.getcwd()):
                os.chdir(self.manifest.topdir)

            pm = Progress('Syncing %s' % nb,
                          len(all_projects),
                          quiet=opt.quiet)
            for project in all_projects:
                gitc_project = self.gitc_manifest.paths[project.relpath]
                # Sync projects that have not been opened.
                if not gitc_project.already_synced:
                    proj_localdir = os.path.join(
                        self.gitc_manifest.gitc_client_dir, project.relpath)
                    project.worktree = proj_localdir
                    if not os.path.exists(proj_localdir):
                        os.makedirs(proj_localdir)
                    project.Sync_NetworkHalf()
                    sync_buf = SyncBuffer(self.manifest.manifestProject.config)
                    project.Sync_LocalHalf(sync_buf)
                    project.revisionId = gitc_project.old_revision
                pm.update()
            pm.end()

        def _ProcessResults(_pool, pm, results):
            for (result, project) in results:
                if not result:
                    err.append(project)
                pm.update()

        self.ExecuteInParallel(opt.jobs,
                               functools.partial(self._ExecuteOne,
                                                 opt.revision, nb),
                               all_projects,
                               callback=_ProcessResults,
                               output=Progress('Starting %s' % (nb, ),
                                               len(all_projects),
                                               quiet=opt.quiet))

        if err:
            for p in err:
                print("error: %s/: cannot start %s" % (p.relpath, nb),
                      file=sys.stderr)
            sys.exit(1)
Ejemplo n.º 17
0
def main():
	# load MNIST images
	images, labels = dataset.load_train_images()

	# config
	discriminator_config = gan.config_discriminator
	generator_config = gan.config_generator

	# settings
	max_epoch = 1000
	num_updates_per_epoch = 500
	plot_interval = 5
	batchsize_true = 100
	batchsize_fake = batchsize_true

	# seed
	np.random.seed(args.seed)
	if args.gpu_device != -1:
		cuda.cupy.random.seed(args.seed)

	# training
	progress = Progress()
	for epoch in xrange(1, max_epoch + 1):
		progress.start_epoch(epoch, max_epoch)
		sum_loss_critic = 0
		sum_loss_generator = 0

		for t in xrange(num_updates_per_epoch):

			for k in xrange(discriminator_config.num_critic):
				# clamp parameters to a cube
				gan.clip_discriminator_weights()
				# gan.decay_discriminator_weights()

				# sample true data from data distribution
				images_true = dataset.sample_data(images, batchsize_true, binarize=False)
				# sample fake data from generator
				images_fake = gan.generate_x(batchsize_fake)
				images_fake.unchain_backward()

				fw_true, activations_true = gan.discriminate(images_true)
				fw_fake, _ = gan.discriminate(images_fake)

				loss_critic = -F.sum(fw_true - fw_fake) / batchsize_true
				sum_loss_critic += float(loss_critic.data) / discriminator_config.num_critic

				# update discriminator
				gan.backprop_discriminator(loss_critic)

			# generator loss
			images_fake = gan.generate_x(batchsize_fake)
			fw_fake, activations_fake = gan.discriminate(images_fake)
			loss_generator = -F.sum(fw_fake) / batchsize_fake

			# feature matching
			if discriminator_config.use_feature_matching:
				features_true = activations_true[-1]
				features_true.unchain_backward()
				if batchsize_true != batchsize_fake:
					images_fake = gan.generate_x(batchsize_true)
					_, activations_fake = gan.discriminate(images_fake, apply_softmax=False)
				features_fake = activations_fake[-1]
				loss_generator += F.mean_squared_error(features_true, features_fake)

			# update generator
			gan.backprop_generator(loss_generator)
			sum_loss_generator += float(loss_generator.data)
			if t % 10 == 0:
				progress.show(t, num_updates_per_epoch, {})

		gan.save(args.model_dir)

		progress.show(num_updates_per_epoch, num_updates_per_epoch, {
			"wasserstein": -sum_loss_critic / num_updates_per_epoch,
			"loss_g": sum_loss_generator / num_updates_per_epoch,
		})

		if epoch % plot_interval == 0 or epoch == 1:
			plot(filename="epoch_{}_time_{}min".format(epoch, progress.get_total_time()))
Ejemplo n.º 18
0
    def Execute(self, opt, args):
        nb = args[0]
        err = []
        projects = []
        if not opt.all:
            projects = args[1:]
            if len(projects) < 1:
                projects = [
                    '.',
                ]  # start it in the local project by default

        all_projects = self.GetProjects(projects,
                                        missing_ok=bool(self.gitc_manifest))

        # This must happen after we find all_projects, since GetProjects may need
        # the local directory, which will disappear once we save the GITC manifest.
        if self.gitc_manifest:
            gitc_projects = self.GetProjects(projects,
                                             manifest=self.gitc_manifest,
                                             missing_ok=True)
            for project in gitc_projects:
                if project.old_revision:
                    project.already_synced = True
                else:
                    project.already_synced = False
                    project.old_revision = project.revisionExpr
                project.revisionExpr = None
            # Save the GITC manifest.
            gitc_utils.save_manifest(self.gitc_manifest)

            # Make sure we have a valid CWD
            if not os.path.exists(os.getcwd()):
                os.chdir(self.manifest.topdir)

        pm = Progress('Starting %s' % nb, len(all_projects))
        for project in all_projects:
            pm.update()

            if self.gitc_manifest:
                gitc_project = self.gitc_manifest.paths[project.relpath]
                # Sync projects that have not been opened.
                if not gitc_project.already_synced:
                    proj_localdir = os.path.join(
                        self.gitc_manifest.gitc_client_dir, project.relpath)
                    project.worktree = proj_localdir
                    if not os.path.exists(proj_localdir):
                        os.makedirs(proj_localdir)
                    project.Sync_NetworkHalf()
                    sync_buf = SyncBuffer(self.manifest.manifestProject.config)
                    project.Sync_LocalHalf(sync_buf)
                    project.revisionId = gitc_project.old_revision

            # If the current revision is immutable, such as a SHA1, a tag or
            # a change, then we can't push back to it. Substitute with
            # dest_branch, if defined; or with manifest default revision instead.
            branch_merge = ''
            if IsImmutable(project.revisionExpr):
                if project.dest_branch:
                    branch_merge = project.dest_branch
                else:
                    branch_merge = self.manifest.default.revisionExpr

            if not project.StartBranch(nb, branch_merge=branch_merge):
                err.append(project)
        pm.end()

        if err:
            for p in err:
                print("error: %s/: cannot start %s" % (p.relpath, nb),
                      file=sys.stderr)
            sys.exit(1)
Ejemplo n.º 19
0
import math

if __name__ == '__main__':
    import sys
    sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')

from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt'
          ) as answers:
    for line in answers:
        answers_list.append(int(line))
progress_ = Progress("Problem 073: Counting fractions in a range", 0, 12001)

count = 0
for n in range(2, 12001):
    progress_.count = n
    progress_.progress()
    for i in range(math.floor(n / 3) + 1, math.ceil(n / 2)):
        if math.gcd(n, i) == 1:
            count += 1

progress_.count = count
progress_.total = answers_list[73]
progress_.progress()

if __name__ == '__main__':
    input()
Ejemplo n.º 20
0
def main():
    # load MNIST images
    train_images, train_labels = dataset.load_train_images()

    # config
    config = aae.config

    # settings
    # _l -> labeled
    # _u -> unlabeled
    max_epoch = 1000
    num_trains_per_epoch = 5000
    batchsize = 100
    alpha = 1

    # seed
    np.random.seed(args.seed)
    if args.gpu_device != -1:
        cuda.cupy.random.seed(args.seed)

    # classification
    # 0 -> true sample
    # 1 -> generated sample
    class_true = aae.to_variable(np.zeros(batchsize, dtype=np.int32))
    class_fake = aae.to_variable(np.ones(batchsize, dtype=np.int32))

    # training
    progress = Progress()
    for epoch in xrange(1, max_epoch):
        progress.start_epoch(epoch, max_epoch)
        sum_loss_reconstruction = 0
        sum_loss_discriminator = 0
        sum_loss_generator = 0

        for t in xrange(num_trains_per_epoch):
            # sample from data distribution
            images_l, label_onehot_l, label_ids_l = dataset.sample_labeled_data(
                train_images, train_labels, batchsize)

            # reconstruction phase
            z_l = aae.encode_x_z(images_l)
            reconstruction_l = aae.decode_yz_x(label_onehot_l, z_l)
            loss_reconstruction = F.mean_squared_error(
                aae.to_variable(images_l), reconstruction_l)
            aae.backprop_generator(loss_reconstruction)
            aae.backprop_decoder(loss_reconstruction)

            # adversarial phase
            images_l = dataset.sample_labeled_data(train_images, train_labels,
                                                   batchsize)[0]
            z_fake_l = aae.encode_x_z(images_l)
            z_true_l = sampler.gaussian(batchsize,
                                        config.ndim_z,
                                        mean=0,
                                        var=1)
            dz_true = aae.discriminate_z(z_true_l, apply_softmax=False)
            dz_fake = aae.discriminate_z(z_fake_l, apply_softmax=False)
            loss_discriminator = F.softmax_cross_entropy(
                dz_true, class_true) + F.softmax_cross_entropy(
                    dz_fake, class_fake)
            aae.backprop_discriminator(loss_discriminator)

            # adversarial phase
            images_l = dataset.sample_labeled_data(train_images, train_labels,
                                                   batchsize)[0]
            z_fake_l = aae.encode_x_z(images_l)
            dz_fake = aae.discriminate_z(z_fake_l, apply_softmax=False)
            loss_generator = F.softmax_cross_entropy(dz_fake, class_true)
            aae.backprop_generator(loss_generator)

            sum_loss_reconstruction += float(loss_reconstruction.data)
            sum_loss_discriminator += float(loss_discriminator.data)
            sum_loss_generator += float(loss_generator.data)

            if t % 10 == 0:
                progress.show(t, num_trains_per_epoch, {})

        aae.save(args.model_dir)

        progress.show(
            num_trains_per_epoch, num_trains_per_epoch, {
                "loss_r": sum_loss_reconstruction / num_trains_per_epoch,
                "loss_d": sum_loss_discriminator / num_trains_per_epoch,
                "loss_g": sum_loss_generator / num_trains_per_epoch,
            })
Ejemplo n.º 21
0
all_squares = []
all_pentagons = []
all_hexagons = []
all_heptagons = []
all_octagons = []
all_nums = [
    all_triangles, all_squares, all_pentagons, all_hexagons, all_heptagons,
    all_octagons
]

for i in range(1000, 10000):
    for j in range(6):
        if all_functions[j](i):
            all_nums[j].append(i)

progress_ = Progress("Problem 061: Cyclical figurate numbers", 0,
                     len(all_triangles))

for triangle in all_triangles:
    progress_.count += 1
    progress_.progress()
    if find_cycle(all_nums[1:], triangle // 100, triangle % 100, triangle):
        break

progress_.count = find_cycle(all_nums[1:], triangle // 100, triangle % 100,
                             triangle)
progress_.total = answers_list[61]
progress_.progress()

if __name__ == '__main__':
    input()
Ejemplo n.º 22
0
def writeSkinController(fp, rmesh, amt, config):
    progress = Progress()
    progress(0, 0.1)

    nVerts = len(rmesh.getCoord())
    nBones = len(amt.bones)

    skinWeights = []
    vertexWeights = [list() for _ in xrange(nVerts)]
    wn = 0
    for bn, bname in enumerate(amt.bones):
        try:
            wts = rmesh.weights[bname]
        except KeyError:
            wts = []
        log.debug("W %d %s %s" % (bn, bname, wts))
        skinWeights += wts
        for (vn, _w) in wts:
            vertexWeights[int(vn)].append((bn, wn))
            wn += 1
    nSkinWeights = len(skinWeights)

    progress(0.1, 0.2)
    fp.write('\n' + '    <controller id="%s-skin">\n' % rmesh.name +
             '      <skin source="#%sMesh">\n' % rmesh.name +
             '        <bind_shape_matrix>\n' + '          1 0 0 0\n' +
             '          0 1 0 0\n' + '          0 0 1 0\n' +
             '          0 0 0 1\n' + '        </bind_shape_matrix>\n' +
             '        <source id="%s-skin-joints">\n' % rmesh.name +
             '          <IDREF_array count="%d" id="%s-skin-joints-array">\n' %
             (nBones, rmesh.name) + '           ')

    for bone in amt.bones.values():
        bname = goodBoneName(bone.name)
        fp.write(' %s' % bname)

    progress(0.2, 0.4)
    fp.write(
        '\n' + '          </IDREF_array>\n' +
        '          <technique_common>\n' +
        '            <accessor count="%d" source="#%s-skin-joints-array" stride="1">\n'
        % (nBones, rmesh.name) +
        '              <param type="IDREF" name="JOINT"></param>\n' +
        '            </accessor>\n' + '          </technique_common>\n' +
        '        </source>\n' +
        '        <source id="%s-skin-weights">\n' % rmesh.name +
        '          <float_array count="%d" id="%s-skin-weights-array">\n' %
        (nSkinWeights, rmesh.name) + '           ')

    fp.write(''.join(' %s' % w[1] for w in skinWeights))

    fp.write(
        '\n' + '          </float_array>\n' +
        '          <technique_common>\n' +
        '            <accessor count="%d" source="#%s-skin-weights-array" stride="1">\n'
        % (nSkinWeights, rmesh.name) +
        '              <param type="float" name="WEIGHT"></param>\n' +
        '            </accessor>\n' + '          </technique_common>\n' +
        '        </source>\n' +
        '        <source id="%s-skin-poses">\n' % rmesh.name +
        '          <float_array count="%d" id="%s-skin-poses-array">' %
        (16 * nBones, rmesh.name))

    progress(0.4, 0.6)
    for bone in amt.bones.values():
        #mat = la.inv(bone.getRestOrTPoseMatrix(config))
        mat = la.inv(bone.getRestMatrix(config))
        for i in range(4):
            fp.write('\n           ')
            for j in range(4):
                fp.write(' %.4f' % mat[i, j])
        fp.write('\n')

    progress(0.6, 0.8)
    fp.write(
        '\n' + '          </float_array>\n' +
        '          <technique_common>\n' +
        '            <accessor count="%d" source="#%s-skin-poses-array" stride="16">\n'
        % (nBones, rmesh.name) +
        '              <param type="float4x4"></param>\n' +
        '            </accessor>\n' + '          </technique_common>\n' +
        '        </source>\n' + '        <joints>\n' +
        '          <input semantic="JOINT" source="#%s-skin-joints"/>\n' %
        rmesh.name +
        '          <input semantic="INV_BIND_MATRIX" source="#%s-skin-poses"/>\n'
        % rmesh.name + '        </joints>\n' +
        '        <vertex_weights count="%d">\n' % nVerts +
        '          <input offset="0" semantic="JOINT" source="#%s-skin-joints"/>\n'
        % rmesh.name +
        '          <input offset="1" semantic="WEIGHT" source="#%s-skin-weights"/>\n'
        % rmesh.name + '          <vcount>\n' + '            ')

    fp.write(''.join(['%d ' % len(wts) for wts in vertexWeights]))

    progress(0.8, 0.99)
    fp.write('\n' + '          </vcount>\n' '          <v>\n' + '           ')

    for wts in vertexWeights:
        fp.write(''.join([' %d %d' % pair for pair in wts]))

    fp.write('\n' + '          </v>\n' + '        </vertex_weights>\n' +
             '      </skin>\n' + '    </controller>\n')

    progress(1)
Ejemplo n.º 23
0
def writeGeometry(fp, mesh, config, shapes=None):
    progress = Progress()
    progress(0)

    coord = mesh.coord + config.offset
    coord = rotateCoord(coord, config)
    nVerts = len(coord)

    fp.write('\n' +
        '    <geometry id="%sMesh" name="%s">\n' % (mesh.name,mesh.name) +
        '      <mesh>\n' +
        '        <source id="%s-Position">\n' % mesh.name +
        '          <float_array count="%d" id="%s-Position-array">\n' % (3*nVerts,mesh.name) +
        '          ')

    fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in coord]) )

    fp.write('\n' +
        '          </float_array>\n' +
        '          <technique_common>\n' +
        '            <accessor count="%d" source="#%s-Position-array" stride="3">\n' % (nVerts,mesh.name) +
        '              <param type="float" name="X"></param>\n' +
        '              <param type="float" name="Y"></param>\n' +
        '              <param type="float" name="Z"></param>\n' +
        '            </accessor>\n' +
        '          </technique_common>\n' +
        '        </source>\n')
    progress(0.2)

    # Normals

    if config.useNormals:
        mesh.calcNormals()
        vnorm = rotateCoord(mesh.vnorm, config)
        nNormals = len(mesh.vnorm)
        fp.write(
            '        <source id="%s-Normals">\n' % mesh.name +
            '          <float_array count="%d" id="%s-Normals-array">\n' % (3*nNormals,mesh.name) +
            '          ')

        fp.write( ''.join([("%.4f %.4f %.4f " % tuple(no)) for no in vnorm]) )

        fp.write('\n' +
            '          </float_array>\n' +
            '          <technique_common>\n' +
            '            <accessor count="%d" source="#%s-Normals-array" stride="3">\n' % (nNormals,mesh.name) +
            '              <param type="float" name="X"></param>\n' +
            '              <param type="float" name="Y"></param>\n' +
            '              <param type="float" name="Z"></param>\n' +
            '            </accessor>\n' +
            '          </technique_common>\n' +
            '        </source>\n')
        progress(0.35)

    # UV coordinates

    nUvVerts = len(mesh.texco)

    fp.write(
        '        <source id="%s-UV">\n' % mesh.name +
        '          <float_array count="%d" id="%s-UV-array">\n' % (2*nUvVerts,mesh.name) +
        '           ')

    fp.write( ''.join([("%.4f %.4f " % tuple(uv)) for uv in mesh.texco]) )

    fp.write('\n' +
        '          </float_array>\n' +
        '          <technique_common>\n' +
        '            <accessor count="%d" source="#%s-UV-array" stride="2">\n' % (nUvVerts,mesh.name) +
        '              <param type="float" name="S"></param>\n' +
        '              <param type="float" name="T"></param>\n' +
        '            </accessor>\n' +
        '          </technique_common>\n' +
        '        </source>\n')
    progress(0.5, 0.7)

    # Faces

    fp.write(
        '        <vertices id="%s-Vertex">\n' % mesh.name +
        '          <input semantic="POSITION" source="#%s-Position"/>\n' % mesh.name +
        '        </vertices>\n')

    checkFaces(mesh, nVerts, nUvVerts)
    progress(0.7, 0.9)
    writePolylist(fp, mesh, config)
    progress(0.9, 0.99)

    fp.write(
        '      </mesh>\n' +
        '    </geometry>\n')

    if shapes is not None:
        shaprog = Progress(len(shapes))
        for name,shape in shapes:
            writeShapeKey(fp, name, shape, mesh, config)
            shaprog.step()

    progress(1)
Ejemplo n.º 24
0
Archivo: train.py Proyecto: cai-mj/ddgm
def main():
    # load MNIST images
    images = load_rgb_images(args.image_dir)

    # config
    config_energy_model = to_object(params_energy_model["config"])
    config_generative_model = to_object(params_generative_model["config"])

    # settings
    max_epoch = 1000
    n_trains_per_epoch = 500
    batchsize_positive = 128
    batchsize_negative = 128
    plot_interval = 5

    # seed
    np.random.seed(args.seed)
    if args.gpu_device != -1:
        cuda.cupy.random.seed(args.seed)

    # init weightnorm layers
    if config_energy_model.use_weightnorm:
        print "initializing weight normalization layers of the energy model ..."
        x_positive = sample_from_data(images, batchsize_positive * 5)
        ddgm.compute_energy(x_positive)

    if config_generative_model.use_weightnorm:
        print "initializing weight normalization layers of the generative model ..."
        x_negative = ddgm.generate_x(batchsize_negative * 5)

    progress = Progress()
    for epoch in xrange(1, max_epoch):
        progress.start_epoch(epoch, max_epoch)
        sum_energy_positive = 0
        sum_energy_negative = 0
        sum_loss = 0
        sum_kld = 0

        for t in xrange(n_trains_per_epoch):
            # sample from data distribution
            x_positive = sample_from_data(images, batchsize_positive)

            # sample from generator
            x_negative = ddgm.generate_x(batchsize_negative)

            # train energy model
            energy_positive = ddgm.compute_energy_sum(x_positive)
            energy_negative = ddgm.compute_energy_sum(x_negative)
            loss = energy_positive - energy_negative
            ddgm.backprop_energy_model(loss)

            # train generative model
            # TODO: KLD must be greater than or equal to 0
            x_negative = ddgm.generate_x(batchsize_negative)
            kld = ddgm.compute_kld_between_generator_and_energy_model(
                x_negative)
            ddgm.backprop_generative_model(kld)

            sum_energy_positive += float(energy_positive.data)
            sum_energy_negative += float(energy_negative.data)
            sum_loss += float(loss.data)
            sum_kld += float(kld.data)
            progress.show(t, n_trains_per_epoch, {})

        progress.show(
            n_trains_per_epoch, n_trains_per_epoch, {
                "x+": int(sum_energy_positive / n_trains_per_epoch),
                "x-": int(sum_energy_negative / n_trains_per_epoch),
                "loss": sum_loss / n_trains_per_epoch,
                "kld": sum_kld / n_trains_per_epoch
            })
        ddgm.save(args.model_dir)

        if epoch % plot_interval == 0 or epoch == 1:
            plot(filename="epoch_{}_time_{}min".format(
                epoch, progress.get_total_time()))
    def _syncRemote1Finished(self):
        self.log.trace("Enter")

        progress = Progress()
        current = self.overrideProgressAfterDownloads
        prog = float(current) / float(self.overrideProgressLength)
        progress(prog, desc="Unzipping seed zips")

        if os.path.exists(self.thumbseed):
            self.log.debug("HAS THUMB ZIP", self.thumbseed)
            zip = ZipFile(self.thumbseed, 'r')
            if not os.path.exists(self.root):
                os.makedirs(self.root)
            zip.extractall(self.root)
            zip.close()
            if os.path.exists(self.remotedb):
                os.remove(self.remotedb)
            os.remove(self.thumbseed)
            current = current + 1
            prog = float(current) / float(self.overrideProgressLength)
            progress(prog, desc="Unzipping seed zips")
        else:
            self.log.debug("Did not have thumb seed zip")

        if os.path.exists(self.screenseed):
            self.log.debug("HAS SCREEN ZIP", self.screenseed)
            zip = ZipFile(self.screenseed, 'r')
            if not os.path.exists(self.root):
                os.makedirs(self.root)
            zip.extractall(self.root)
            zip.close()
            if os.path.exists(self.remotedb):
                os.remove(self.remotedb)
            os.remove(self.screenseed)
        else:
            self.log.debug("Did not have screen seed zip")

        current = current + 1
        prog = float(current) / float(self.overrideProgressLength)
        progress(prog, desc="Checking for additional files to download")

        self.overrideProgressLength = None
        self.overrideProgressAfterDownloads = None

        if os.path.exists(self.remotedb + ".keep"):
            os.rename(self.remotedb + ".keep", self.remotedb)

        self._loadRemoteDB()

        filesToDownload = []

        self.log.debug("downloadScreenshots", self.downloadScreenshots)
        self.log.debug("downloadThumbnails", self.downloadThumbnails)

        for assetType in self.remoteAssets.keys():
            for assetId in self.remoteAssets[assetType].keys():
                remoteAsset = self.remoteAssets[assetType][assetId]
                tuples = remoteAsset.getDownloadTuples(
                    ignoreExisting=True,
                    onlyMeta=True,
                    excludeScreenshot=not self.downloadScreenshots,
                    excludeThumb=not self.downloadThumbnails)
                self.log.spam("Tuples", tuples)
                filesToDownload.extend(tuples)

        self.log.debug("filesToDownload", filesToDownload)

        progress(1.0)

        self._downloadTask = DownloadTask(self._syncParentWidget,
                                          filesToDownload,
                                          self._syncRemote2Finished,
                                          self._syncRemote2Progress)
Ejemplo n.º 26
0
    def _Checkout(self, all_projects, opt, err_event, err_results):
        """Checkout projects listed in all_projects

    Args:
      all_projects: List of all projects that should be checked out.
      opt: Program options returned from optparse.  See _Options().
      err_event: We'll set this event in the case of an error (after printing
          out info about the error).
      err_results: A list of strings, paths to git repos where checkout
          failed.
    """

        # Perform checkouts in multiple threads when we are using partial clone.
        # Without partial clone, all needed git objects are already downloaded,
        # in this situation it's better to use only one process because the checkout
        # would be mostly disk I/O; with partial clone, the objects are only
        # downloaded when demanded (at checkout time), which is similar to the
        # Sync_NetworkHalf case and parallelism would be helpful.
        if self.manifest.CloneFilter:
            syncjobs = self.jobs
        else:
            syncjobs = 1

        lock = _threading.Lock()
        pm = Progress('Checking out projects',
                      len(all_projects),
                      print_newline=not (opt.quiet),
                      always_print_percentage=opt.quiet)

        threads = set()
        sem = _threading.Semaphore(syncjobs)

        for project in all_projects:
            # Check for any errors before running any more tasks.
            # ...we'll let existing threads finish, though.
            if err_event.isSet() and opt.fail_fast:
                break

            sem.acquire()
            if project.worktree:
                kwargs = dict(opt=opt,
                              sem=sem,
                              project=project,
                              lock=lock,
                              pm=pm,
                              err_event=err_event,
                              err_results=err_results)
                if syncjobs > 1:
                    t = _threading.Thread(target=self._CheckoutWorker,
                                          kwargs=kwargs)
                    # Ensure that Ctrl-C will not freeze the repo process.
                    t.daemon = True
                    threads.add(t)
                    t.start()
                else:
                    self._CheckoutWorker(**kwargs)

        for t in threads:
            t.join()

        pm.end()
Ejemplo n.º 27
0
if __name__ == '__main__':
    import sys
    sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')

from functions import *

from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt'
          ) as answers:
    for line in answers:
        answers_list.append(int(line))
progress_ = Progress("Problem 050: Consecutive prime sum", 0, 1000000)

prime_sum = 0
primes = []
num = 2

while True:
    if is_prime(num):
        primes.append(num)
        if prime_sum + num >= 1000000:
            break
        prime_sum += num
        progress_.count = prime_sum
        progress_.progress()
    num += 1

while not is_prime(prime_sum):
    prime_sum -= primes[0]
    del primes[0]
Ejemplo n.º 28
0
if __name__ == '__main__':
    import sys
    sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')

from functions import *

from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt') as answers:
    for line in answers:
        answers_list.append(int(line))
progress_ = Progress("Problem 058: Spiral primes", 0, answers_list[58])

a = 2
n = 0
while (n/(4*a - 3) >= 0.10) or (n == 0):
    if is_prime(4*a**2 - 10*a + 7):
        n += 1
    if is_prime(4*a**2 - 8*a + 5):
        n += 1
    if is_prime(4*a**2 - 6*a + 3):
        n += 1
    #bottom-right diagonal is always an odd square
    a += 1
    if a % 11 == 0: #arbitrary number for display purposes
        progress_.count = 2*a - 1
        progress_.progress()

progress_.count = 2*a - 1
progress_.progress()
Ejemplo n.º 29
0
if __name__ == '__main__':
    import sys
    sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')

from functions import *

from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt'
          ) as answers:
    for line in answers:
        answers_list.append(int(line))
progress_ = Progress("Problem 049: Prime permutations", 1000,
                     int(str(answers_list[49])[:4]))

done = 0
n = 1488
while True:
    if is_prime(n):
        progress_.count = n
        progress_.progress()
        for a in range(1000, 5000):
            if is_prime(n + a) and is_prime(n + 2 * a):
                digits_1 = [int(i) for i in str(n)]
                digits_2 = [int(i) for i in str(n + a)]
                digits_3 = [int(i) for i in str(n + 2 * a)]
                if (sorted(digits_1)
                        == sorted(digits_2)) and (sorted(digits_1)
                                                  == sorted(digits_3)):
                    done = 1
                    break
Ejemplo n.º 30
0
            raise StopIteration
        self.i += 1
        k = self.keys[self.i]
        return k, self[k]

    def __invert__(self):
        _self = Symmetric_Dictionary()
        _self._dict = self.__dict
        _self.__dict = self._dict
        self.i = 0
        return _self

    def __str__(self):
        return "\n".join([f"{k}: {v}" for k, v in self])


if __name__ == "__main__":
    from numpy.random import randint
    from progress import Progress
    count = 10_000
    nums = range(5, count)
    sd = Symmetric_Dictionary()
    with Progress("Filling symdict", count, precision=2) as (u, _, _):
        for i, key in enumerate(nums):
            sd[key] = u(i)
    print("Reversing...")
    sd = ~sd
    print("Printing...")
    s = [f"{k}: {v}" for k, v in sd]
    print(s)