示例#1
0
    def download(self, filename, token=None):
        # remove leading backlash
        path = self.path
        url = self.get_url(token)
        self.path = None
        if path.startswith('/'):
            path = path[1:]
        if self.credentials:
            blob = self.bucket.get_blob(path)
            blob.download_to_filename(filename)
        else:

            def _download_internal():
                r = requests.get(url, stream=True)
                raise_detailed_error(r)
                if r.status_code == 200:
                    with open(filename, 'wb') as f:
                        for chunk in r:
                            f.write(chunk)
                elif r.status_code >= 500:
                    raise HTTPServerError(r.status_code, r.text)

            retry(_download_internal,
                  no_retries=10,
                  sleep_time=5,
                  exception_class=HTTPServerError)
示例#2
0
def _resetDMP(sid,explicit_unmap=False,delete_nodes=False):
# If mpath has been turned on since the sr/vdi was attached, we
# might be trying to unmap it before the daemon has been started
# This is unnecessary (and will fail) so just return.
    deactivate_MPdev(sid)
    if not _is_mpath_daemon_running():
        util.SMlog("Warning: Trying to unmap mpath device when multipathd not running")
        return

# If the multipath daemon is running, but we were initially plugged
# with multipathing set to no, there may be no map for us in the multipath
# tables. In that case, list_paths will return [], but remove_map might
# throw an exception. Catch it and ignore it.
    if explicit_unmap:
        util.retry(lambda: util.pread2(['/usr/sbin/multipath', '-f', sid]),
                   maxretry = 3, period = 4)
        util.retry(lambda: util.pread2(['/usr/sbin/multipath', '-W']), maxretry = 3,
                   period = 4)
    else:
        mpath_cli.ensure_map_gone(sid)

    path = "/dev/mapper/%s" % sid
    
    if not util.wait_for_nopath(path, 10):
        util.SMlog("MPATH: WARNING - path did not disappear [%s]" % path)
    else:
        util.SMlog("MPATH: path disappeared [%s]" % path)
示例#3
0
文件: mpath_dmp.py 项目: xcp-ng/sm
def _resetDMP(sid, explicit_unmap=False):
# If mpath has been turned on since the sr/vdi was attached, we
# might be trying to unmap it before the daemon has been started
# This is unnecessary (and will fail) so just return.
    deactivate_MPdev(sid)
    if not _is_mpath_daemon_running():
        util.SMlog("Warning: Trying to unmap mpath device when multipathd not running")
        return

# If the multipath daemon is running, but we were initially plugged
# with multipathing set to no, there may be no map for us in the multipath
# tables. In that case, list_paths will return [], but remove_map might
# throw an exception. Catch it and ignore it.
    if explicit_unmap:
        util.retry(lambda: util.pread2(['/usr/sbin/multipath', '-f', sid]),
                   maxretry=3, period=4)
        util.retry(lambda: util.pread2(['/usr/sbin/multipath', '-W']), maxretry=3,
                   period=4)
    else:
        mpath_cli.ensure_map_gone(sid)

    path = "/dev/mapper/%s" % sid

    if not util.wait_for_nopath(path, 10):
        util.SMlog("MPATH: WARNING - path did not disappear [%s]" % path)
    else:
        util.SMlog("MPATH: path disappeared [%s]" % path)
示例#4
0
文件: asgroup.py 项目: nivertech/udo
 def reload(self):
     if not util.confirm(
             "Are you sure you want to tear down the {} ASgroup and recreate it?"
             .format(self.name())):
         return
     util.message_integrations("Reloading ASgroup {}".format(self.name()))
     self.deactivate()
     util.retry(lambda: self.activate(), 60)
def proxyVotes(b, e):
    vote(firstProducer, firstProducer + 1)
    proxy = accounts[firstProducer]['name']
    retry(args.cleos + 'system regproxy ' + proxy)
    sleep(1.0)
    for i in range(b, e):
        voter = accounts[i]['name']
        retry(args.cleos + 'system voteproducer proxy ' + voter + ' ' + proxy)
示例#6
0
 def top_tracks(self, artist_uid, count=10):
     uid = artist_uid.split(':')[-1].split('/')[-1]
     uri = 'https://api.spotify.com/v1/artists/%s' % uid
     log.debug(uri)
     resp = util.retry(uri)
     doc = resp.json()
     artist_name = doc['name']
     limit = min(count, 50)
     misses = 0
     offset = 0
     tracks = []
     seen = set()
     log.debug(artist_name.encode('utf-8'))
     while len(tracks) < count and misses < 5:
         prev_len = len(tracks)
         uri = 'https://api.spotify.com/v1/search?%s' % urllib.urlencode(
             {
                 'type': 'track',
                 'limit': limit,
                 'offset': offset,
                 'q': 'artist:%s' % artist_name.encode('utf-8')
             }
         )
         log.debug(uri)
         resp = util.retry(uri)
         
         try:
             doc = resp.json()
         except:
             doc = {'tracks': {'items': []}}
         
         for track in doc.get('tracks', {}).get('items',[]):
             title = kt.normalize(track['name'])
             if track['artists'][0]['uri'].split(':')[-1] == uid and title not in seen:
                 uniq = True
                 for seen_title in seen:
                     if kt.matches(title, seen_title):
                         uniq = False
                         break
                 if uniq:
                     tracks.append(track_from_json(track))
                 seen.add(title)
             if len(tracks) >= count:
                 break
         offset += limit
         if len(tracks) == prev_len:
             misses += 1
     return tracks
      
     
     
     
     
     
     
     
示例#7
0
        def finish_download():
            try:
                self._download_file(key, tar_filename)
            except BaseException as e:
                self.logger.debug(e)

            if os.path.exists(tar_filename):
                # first, figure out if the tar file has a base path of .
                # or not
                self.logger.info("Untarring {}".format(tar_filename))
                listtar, _ = subprocess.Popen(['tar', '-tf', tar_filename],
                                              stdout=subprocess.PIPE,
                                              stderr=subprocess.PIPE,
                                              close_fds=True).communicate()
                listtar = listtar.strip().split(b'\n')
                listtar = [s.decode('utf-8') for s in listtar]

                self.logger.info('List of files in the tar: ' + str(listtar))
                if listtar[0].startswith('./'):
                    # Files are archived into tar from .; adjust path
                    # accordingly
                    basepath = local_path
                else:
                    basepath = local_basepath

                tarcmd = ('mkdir -p {} && ' +
                          'tar -xf {} -C {} --keep-newer-files') \
                    .format(basepath, tar_filename, basepath)

                self.logger.debug('Tar cmd = {}'.format(tarcmd))

                tarp = subprocess.Popen(['/bin/bash', '-c', tarcmd],
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.STDOUT,
                                        close_fds=True)

                tarout, tarerr = tarp.communicate()
                if tarp.returncode != 0:
                    self.logger.info('tar had a non-zero return code!')
                    self.logger.info('tar cmd = ' + tarcmd)
                    self.logger.info('tar output: \n ' + str(tarout))

                if len(listtar) == 1:
                    actual_path = os.path.join(basepath, listtar[0])
                    self.logger.info('Renaming {} into {}'.format(
                        actual_path, local_path))
                    retry(lambda: os.rename(actual_path, local_path),
                          no_retries=5,
                          sleep_time=1,
                          exception_class=OSError,
                          logger=self.logger)

                os.remove(tar_filename)
            else:
                self.logger.warning(
                    'file {} download failed'.format(tar_filename))
示例#8
0
def _refresh_DMP(sid, npaths):
    util.retry(lambda: util.pread2(['/usr/sbin/multipath', '-r', sid]), maxretry = 3,
                           period = 4)
    path = os.path.join(DEVMAPPERPATH, sid)
    util.wait_for_path(path, 10)
    if not os.path.exists(path):
        raise xs_errors.XenError('DMP failed to activate mapper path')
    lvm_path = "/dev/disk/by-scsid/"+sid+"/mapper"
    util.wait_for_path(lvm_path, 10)
    activate_MPdev(sid, path)
示例#9
0
文件: asgroup.py 项目: nivertech/udo
 def activate_lc(self):
     # make sure we have a launchconfig activated
     lc = self.lc()
     if not lc:
         # might need to wait a min
         util.retry(lambda: self.lc(), 60)
     if lc.exists():
         print "Using LaunchConfig {}".format(lc.name())
         return True
     print "Creating LaunchConfig {}".format(lc.name())
     return lc.activate()
示例#10
0
文件: asgroup.py 项目: Bauerpauer/udo
 def update_lc(self):
     oldlc = self.lc()
     # get new version
     lc = oldlc.update()
     # set lc
     asgroup = self.get_asgroup()
     lcname = lc.name()
     setattr(asgroup, 'launch_config_name', lcname)
     asgroup.update()
     # delete old
     conn = util.as_conn()
     if oldlc.name() is not lcname:
         util.retry(lambda: conn.delete_launch_configuration(oldlc.name()), 60)
示例#11
0
    def reload(self):
        # skip deactivation if it doesn't exist
        asgroup = self.get_asgroup()
        if not asgroup or not self.exists():
            self.activate()
            return

        debug("In asgroup.py reload")
        if not util.confirm("Are you sure you want to tear down the {} ASgroup and recreate it?".format(self.name())):
            return
        util.message_integrations("Reloading ASgroup {}".format(self.name()))
        self.deactivate()
        util.retry(lambda: self.activate(), 60)
示例#12
0
文件: mpath_dmp.py 项目: thomasmck/sm
def _refresh_DMP(sid, npaths):
    if not _is_valid_multipath_device(sid):
        return
    util.retry(lambda: util.pread2(['/usr/sbin/multipath', '-r', sid]),
               maxretry=3,
               period=4)
    path = os.path.join(DEVMAPPERPATH, sid)
    util.wait_for_path(path, 10)
    if not os.path.exists(path):
        raise xs_errors.XenError('DMP failed to activate mapper path')
    lvm_path = "/dev/disk/by-scsid/" + sid + "/mapper"
    util.wait_for_path(lvm_path, 10)
    activate_MPdev(sid, path)
示例#13
0
文件: asgroup.py 项目: nivertech/udo
 def update_lc(self):
     oldlc = self.lc()
     # get new version
     lc = oldlc.update()
     # set lc
     asgroup = self.get_asgroup()
     lcname = lc.name()
     setattr(asgroup, 'launch_config_name', lcname)
     asgroup.update()
     # delete old
     conn = util.as_conn()
     if oldlc.name() is not lcname:
         util.retry(lambda: conn.delete_launch_configuration(oldlc.name()),
                    60)
示例#14
0
    def stop_experiment(self, experiment):
        if isinstance(experiment, six.string_types):
            key = experiment
        else:
            key = experiment.key

        headers = self._get_headers()

        def post_request():
            request = requests.post(self.url + '/api/stop_experiment',
                                    headers=headers,
                                    data=json.dumps({"key": key}))
            self._raise_detailed_error(request)

        retry(post_request, sleep_time=10, logger=self.logger)
示例#15
0
文件: asgroup.py 项目: Bauerpauer/udo
 def activate_lc(self):
     # make sure we have a launchconfig activated
     lc = self.lc()
     if not lc:
         # might need to wait a min
         util.retry(lambda: self.lc(), 60)
         lc = self.lc()
     if not lc:
         print "Timed out waiting to create LaunchConfiguration"
         return false
     if lc.exists():
         print "Using LaunchConfig {}".format(lc.name())
         return True
     print "Creating LaunchConfig {}".format(lc.name())
     return lc.activate()
示例#16
0
文件: asgroup.py 项目: abunuwas/udo
    def reload(self):
        # skip deactivation if it doesn't exist
        asgroup = self.get_asgroup()
        if not asgroup or not self.exists():
            self.activate()
            return

        debug("In asgroup.py reload")
        if not util.confirm(
                "Are you sure you want to tear down the {} ASgroup and recreate it?"
                .format(self.name())):
            return
        util.message_integrations("Reloading ASgroup {}".format(self.name()))
        self.deactivate()
        util.retry(lambda: self.activate(), 60)
示例#17
0
    def _put_file(self, path, file_object, token, userid):
        request_ref = self.storage_bucket + "/o?name={0}".format(path)

        def post_file(**kwargs):
            def _post_file():
                request_object = self.requests.post(request_ref,
                                                    data=file_object,
                                                    **kwargs)
                raise_detailed_error(request_object)
                return request_object

            return retry(_post_file,
                         no_retries=10,
                         sleep_time=5,
                         exception_class=HTTPServerError)

        if token:
            headers = {"Authorization": "Firebase " + token}

            request_object = post_file(headers=headers)
            if userid:
                headers['Content-Type'] = 'application/json'

                def patch_owner():
                    patch_request = self.requests.patch(
                        request_ref,
                        headers=headers,
                        data=json.dumps({'metadata': {
                            'owner': userid
                        }}))

                    raise_detailed_error(patch_request)
                    return patch_request

                retry(patch_owner,
                      no_retries=10,
                      sleep_time=5,
                      exception_class=HTTPServerError)

            return request_object.json()
        elif self.credentials:
            blob = self.bucket.blob(path)
            if isinstance(file_object, str):
                return blob.upload_from_filename(filename=file_object)
            else:
                return blob.upload_from_file(file_obj=file_object)
        else:
            return post_file().json()
示例#18
0
    def deactivate(self): # a.k.a asg destroy
        # NOTE
        # deleting asg logic should be in its own function

        # * delete ASG by reducing capacities of asg to 0
        # * delete launchconfig
        #
        # reducing ASG capacities to 0 triggers eventual instance
        # termination
        debug("In asgroup.py deactivate")        

        asg_name = self.name()
        ag = util.as_conn()
        ec2 = util.ec2_conn()
    
        asg_info = ag.describe_auto_scaling_groups( AutoScalingGroupNames = [ asg_name ] )

        if not asg_info['AutoScalingGroups']:
            print("ASG does not exist.  Maybe it was already deleted?")
        else:
            # delete the ASG
            num_instances = len(asg_info['AutoScalingGroups'][0]['Instances'])
            if self.get_num_instances() == 0:
                pprint("There are no instances in asg: " + asg_name)
                print("Deleting asg: " + asg_name)
                response = ag.delete_auto_scaling_group( AutoScalingGroupName=asg_name )
                util.message_integrations("Deleted ASgroup {}".format(asg_name))
            else:
                debug("There are " + str(num_instances) + " instances that need to be removed from asg: " + asg_name)
                debug("terminating instances in asg: " + asg_name)
                debug("by setting to 0 MinSize, MaxSize, DesiredCapacity")
                response = ag.update_auto_scaling_group(AutoScalingGroupName = asg_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
                debug("Waiting 30 seconds to give AWS time to terminate the instances")

                if self.get_num_instances() != 0:
                    util.retry(lambda: ag.delete_auto_scaling_group(AutoScalingGroupName=asg_name), 300)
                if self.get_num_instances() != 0 or self.get_num_instances():
                    print("unable to delete instances in asg.")
                    return False
                util.message_integrations("Deleted ASgroup {}".format(asg_name))

        # if launch config exists, delete it 
        lc = self.lc()
        if not lc.exists():
            print("launchconfig does not exist.  Maybe you deleted it already?")
        else:
            lc.deactivate()
        return True
示例#19
0
    def add_experiment(self, experiment, userid=None, compression=None):

        headers = self._get_headers()
        compression = compression if compression else self.compression

        for tag, art in six.iteritems(experiment.artifacts):
            if not art['mutable'] and art.get('local') is not None:
                art['hash'] = HTTPArtifactStore(None, None,
                                                compression,
                                                self.verbose) \
                    .get_artifact_hash(art)

        data = {}
        data['experiment'] = experiment.__dict__
        data['compression'] = compression

        def post_request():
            request = requests.post(self.url + '/api/add_experiment',
                                    headers=headers,
                                    data=json.dumps(data))

            self._raise_detailed_error(request)
            return request

        request = retry(post_request,
                        no_retries=10,
                        sleep_time=10,
                        logger=self.logger)

        artifacts = request.json()['artifacts']

        self._update_artifacts(experiment, artifacts)
示例#20
0
文件: app.py 项目: knzm/gaeprimes
def get_next_prime():
    log.debug("get_next_prime()")
    @transactional
    def get_ownership(prime):
        log.debug("get_ownereship()")
        key = prime.key()
        last_assigned_at = prime.last_assigned_at
        # get the current entity in this transaction
        prime = model.Prime.get(key)
        if prime.last_assigned_at != last_assigned_at:
            return None
        prime.last_assigned_at = datetime.now()
        prime.put()
        return prime
    while retry(10, raise_if_failed=True):
        all_primes = model.Prime.all()
        primes = all_primes.order("last_assigned_at").order("number").fetch(1)
        log.debug("primes retrieved")
        if primes:
            prime = get_ownership(primes[0])
            if prime:
                return prime
        elif not all_primes.fetch(1):
            log.debug("call Prime.ensure_number()")
            sentinel = model.Prime.ensure_number(0, parent=prime_root)
        else:
            pass
示例#21
0
文件: app.py 项目: knzm/gaeprimes
def get_max_sieve():
    log.debug("get_max_sieve()")
    while retry(10, raise_if_failed=True):
        all_sieves = model.Sieve.all()
        sieves = all_sieves.order("-number").fetch(1)
        if sieves:
            return sieves[0].number
示例#22
0
    def get_issues(page):
        url = ("https://api.github.com/repos/Khan/khan-exercises/issues"
               "?page=%d&per_page=100" % page)
        issue_data = util.retry(lambda: urllib2.urlopen(url, timeout=60),
                                'fetching khan-exercises issues',
                                lambda exc: isinstance(exc, urlfetch_errors))

        # This flag is False if we should continue to the next page of
        # issues and True if we should stop looking at more pages.
        done = False
        for issue in json.loads(issue_data.read()):
            if issue["user"]["login"] == "KhanBugz":
                if last_issue == -1:
                    # If we have no data so far, only go one page.
                    done = True

                if issue["number"] > last_issue:
                    first_issue[0] = max(first_issue[0], issue["number"])
                    issues.append(issue)
                else:
                    # If we've come to an issue we already saw,
                    # don't continue to further pages or issues
                    done = True
                    break

        if ((re.findall(r'<(.*?)>; rel="(.*?)"',
                        issue_data.info().getheader("Link"))[0][1] == "next")
                and not done):
            get_issues(page + 1)
示例#23
0
    def get_issues(page):
        url = ("https://api.github.com/repos/Khan/khan-exercises/issues"
               "?page=%d&per_page=100" % page)
        issue_data = util.retry(lambda: urllib2.urlopen(url, timeout=60),
                                'fetching khan-exercises issues',
                                lambda exc: isinstance(exc, urlfetch_errors))

        # This flag is False if we should continue to the next page of
        # issues and True if we should stop looking at more pages.
        done = False
        for issue in json.loads(issue_data.read()):
            if issue["user"]["login"] == "KhanBugz":
                if last_issue == -1:
                    # If we have no data so far, only go one page.
                    done = True

                if issue["number"] > last_issue:
                    first_issue[0] = max(first_issue[0], issue["number"])
                    issues.append(issue)
                else:
                    # If we've come to an issue we already saw,
                    # don't continue to further pages or issues
                    done = True
                    break

        if ((re.findall(
                r'<(.*?)>; rel="(.*?)"',
                issue_data.info().getheader("Link"))[0][1] == "next") and
                not done):
            get_issues(page + 1)
示例#24
0
文件: asgroup.py 项目: abunuwas/udo
 def activate_lc(self):
     debug("In asgroup.py activate_lc")
     # make sure we have a launchconfig activated
     lc = self.lc()
     if not lc:
         # might need to wait a min
         util.retry(lambda: self.lc(), 60)
         lc = self.lc()
     if not lc:
         print "Timed out waiting to create LaunchConfiguration"
         return false
     if lc.exists():
         print "Using LaunchConfig {}".format(lc.get_lc_server_name())
         return True
     print "Creating LaunchConfig {}".format(lc.name())
     return lc.activate()
示例#25
0
def _refresh_DMP(sid, npaths):
    if not _is_valid_multipath_device(sid):
        return
    path = os.path.join(DEVMAPPERPATH, sid)
    # If the mapper path doesn't exist force a reload in multipath
    if not os.path.exists(path):
        util.retry(lambda: util.pread2(['/usr/sbin/multipath', '-r', sid]),
                   maxretry=3,
                   period=4)
        util.wait_for_path(path, 10)
    if not os.path.exists(path):
        raise xs_errors.XenError(
            'MultipathMapperPathMissing',
            'Device mapper path {} not found'.format(path))
    lvm_path = "/dev/disk/by-scsid/" + sid + "/mapper"
    util.wait_for_path(lvm_path, 10)
    activate_MPdev(sid, path)
示例#26
0
文件: launchconfig.py 项目: anbet/udo
 def deactivate(self):
     if not self.exists():
         return
     print "Deleting launchconfig..."
     lc = self.get_lc()
     if util.retry(lambda: lc.delete(), 500):
         util.message_integrations("Deleted LaunchConfig {}".format(self.name()))
     else:
         util.message_integrations("Failed to delete LaunchConfig {}".format(self.name()))
示例#27
0
 def deactivate(self):
     if not self.exists():
         return
     print "Deleting launchconfig..."
     lc = self.get_lc()
     if util.retry(lambda: lc.delete(), 500):
         util.message_integrations("Deleted LaunchConfig {}".format(self.name()))
     else:
         util.message_integrations("Failed to delete LaunchConfig {}".format(self.name()))
示例#28
0
        def post_file(**kwargs):
            def _post_file():
                request_object = self.requests.post(request_ref,
                                                    data=file_object,
                                                    **kwargs)
                raise_detailed_error(request_object)
                return request_object

            return retry(_post_file,
                         no_retries=10,
                         sleep_time=5,
                         exception_class=HTTPServerError)
示例#29
0
def myretry(*args, **kvargs):
    """ Log error and how many retries is remaining. """
    def log_retries(tries_remaining, exception, delay):
        global logger
        logger.warn('{}; retry {} more times, next in {} sec.'\
            .format(
                str(exception),
                tries_remaining,
                delay
            )
        )
    kvargs['hook']=log_retries
    return retry(*args, **kvargs)
示例#30
0
def myretry(*args, **kvargs):
    """ Log error and how many retries is remaining. """
    def log_retries(tries_remaining, exception, delay):
        global logger
        logger.warn('{}; retry {} more times, next in {} sec.'\
            .format(
                str(exception),
                tries_remaining,
                delay
            )
        )

    kvargs['hook'] = log_retries
    return retry(*args, **kvargs)
def createStakedAccounts(b, e):
    ramFunds = round(args.ram_funds * 10000)
    configuredMinStake = round(args.min_stake * 10000)
    maxUnstaked = round(args.max_unstaked * 10000)
    for i in range(b, e):
        a = accounts[i]
        funds = a['funds']
        print('#' * 80)
        print('# %d/%d %s %s' % (i, e, a['name'], intToCurrency(funds)))
        print('#' * 80)
        if funds < ramFunds:
            print('skipping %s: not enough funds to cover ram' % a['name'])
            continue
        minStake = min(funds - ramFunds, configuredMinStake)
        unstaked = min(funds - ramFunds - minStake, maxUnstaked)
        stake = funds - ramFunds - unstaked
        stakeNet = round(stake / 2)
        stakeCpu = stake - stakeNet
        print('%s: total funds=%s, ram=%s, net=%s, cpu=%s, unstaked=%s' % (a['name'], intToCurrency(a['funds']), intToCurrency(ramFunds), intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(unstaked)))
        assert(funds == ramFunds + stakeNet + stakeCpu + unstaked)
        retry(args.cleos + 'system newaccount --transfer eosio %s %s --stake-net "%s" --stake-cpu "%s" --buy-ram "%s"   ' %
            (a['name'], a['pub'], intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(ramFunds)))
        if unstaked:
            retry(args.cleos + 'transfer eosio %s "%s"' % (a['name'], intToCurrency(unstaked)))
示例#32
0
文件: app.py 项目: knzm/gaeprimes
def pop_sieves():
    log.debug("pop_sieves()")
    while retry(10, raise_if_failed=True):
        all_sieves = model.Sieve.all()
        sieves = all_sieves.order("number").fetch(1)
        if sieves:
            try:
                return delete_sieve(sieves[0])
            except (KeyError, db.TransactionFailedError):
                pass
        else:
            @transactional
            def init_sieves():
                for number in xrange(2, chunk_size):
                    model.Sieve(number=number, parent=sieve_root).put()
            init_sieves()
示例#33
0
文件: asgroup.py 项目: Bauerpauer/udo
 def deactivate(self):
     if not self.exists():
         return
     ag = self.get_asgroup()
     ag.min_size = 0
     ag.max_size = 0
     ag.desired_capacity = 0
     ag.update()
     ag.shutdown_instances()
     print "Deleting... this may take a few minutes..."
     if util.retry(lambda: ag.delete(), 500):
         util.message_integrations("Deleted ASgroup {}".format(self.name()))
         # delete launchconfig too
         lc = self.lc()
         lc.deactivate()
     else:
         util.message_integrations("Failed to delete ASgroup {}".format(self.name()))
示例#34
0
    def check(self):
        import hashlib
        import datetime
        now = datetime.datetime.utcnow()
        logging.info(now)
        uid = self.system_id
        s = "{uid}{date:%Y%m%d}{hour}{cl_cons}".format(date=now,
                                                       hour=now.hour % 7,
                                                       cl_cons="3456789012",
                                                       uid=self.system_id)
        pokey = hashlib.md5(s.encode()).hexdigest()
        url = 'https://kiosk.multidat.ru/api.php?action=CheckLichense&PoKey={key}&uid={uid}'.format(
            key=pokey, uid=uid)
        resp = {}
        try:
            resp = retry(urllib.error.URLError)(urllib.request.urlopen)(
                url, timeout=5).read().decode()
            resp = json.loads(resp)
        except urllib.error.URLError:
            exit(403)
        except Exception:
            exit(100)

        if resp.get('err_txt'):
            logging.critical(resp.get('err_txt'))
            exit(int(resp.get('err_no', '403')))
        else:
            #assert self.system_id == resp['uid']
            s = "2109876543{date:%d%m%Y}{hour}{key}".format(date=now,
                                                            hour=now.hour % 7,
                                                            key=pokey)
            #print(s)
            srvkey = hashlib.md5(s.encode()).hexdigest()
            print(srvkey)
            print(resp)

            if resp['SrvKey'][:-3] != srvkey:
                exit(int(resp.get('err_no', '403')))
            print(resp['SrvKey'][-3:], end="")
            print('ok')
            t = self.checker

            self.checker = Checker(int(resp['SrvKey'][-3:]), self.check)
            self.checker.start()
            return
示例#35
0
文件: asgroup.py 项目: nivertech/udo
 def deactivate(self):
     if not self.exists():
         return
     ag = self.get_asgroup()
     ag.min_size = 0
     ag.max_size = 0
     ag.desired_capacity = 0
     ag.update()
     ag.shutdown_instances()
     print "Deleting... this may take a few minutes..."
     if util.retry(lambda: ag.delete(), 500):
         util.message_integrations("Deleted ASgroup {}".format(self.name()))
         # delete launchconfig too
         lc = self.lc()
         lc.deactivate()
     else:
         util.message_integrations("Failed to delete ASgroup {}".format(
             self.name()))
示例#36
0
    def checkpoint_experiment(self, experiment):
        if isinstance(experiment, six.string_types):
            key = experiment
            experiment = self.get_experiment(key)
        else:
            key = experiment.key

        headers = self._get_headers()

        def post_request():
            request = requests.post(self.url + '/api/checkpoint_experiment',
                                    headers=headers,
                                    data=json.dumps({"key": key}))

            self._raise_detailed_error(request)
            artifacts = request.json()['artifacts']
            return artifacts

        artifacts = retry(post_request, sleep_time=10, logger=self.logger)

        self._update_artifacts(experiment, artifacts)
        experiment.time_last_checkpoint = time.time()
示例#37
0
def get_ticket_data(start_time_t):
    global JIRA_PASSWORD
    if JIRA_PASSWORD is None:
        with open(JIRA_PASSWORD_FILE) as f:
            JIRA_PASSWORD = f.read().strip()

    # Compose API call: get as many issues as possible from > start_time_t
    # We can only get 1000 at a time, according to
    #   https://confluence.atlassian.com/display/CLOUDKB/Changing+maxResults+Parameter+for+JIRA+REST+API
    # Also note that we must pass in an integer representing milliseconds
    # since the epoch, according to:
    #   https://confluence.atlassian.com/display/JIRA/Advanced+Searching#AdvancedSearching-Created
    fields = ','.join([CREATED_FIELD, EXERCISE_FIELD])
    values = {'fields': fields,
              'maxResults': 1000,
              'project': '"Assessment items"'
                         ' and "Issue type" != "Not translated"'
                         ' and created > %s'
                         ' order by created asc'
                         % int(1000 * start_time_t),
              }
    url = ('https://khanacademy.atlassian.net/rest/api/latest/search'
           '?jql=%s' % urllib.urlencode(values))
    request = urllib2.Request(url)
    # Send base64-encoded 'user:password', according to
    #   https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Basic+Authentication
    encoded_password = base64.standard_b64encode('%s:%s' % (JIRA_USER,
                                                            JIRA_PASSWORD))
    request.add_unredirected_header('Authorization',
                                    'Basic %s' % encoded_password)
    request.add_header('Content-Type', 'application/json')
    urlfetch_errors = (socket.error, urllib2.HTTPError, httplib.HTTPException)

    data = util.retry(lambda: urllib2.urlopen(request, timeout=60),
                      'loading jira ticket data',
                      lambda exc: isinstance(exc, urlfetch_errors))
    return json.load(data)
示例#38
0
def get_ticket_data(start_time_t):
    """Given start_time to export from, call Zendesk API for ticket data."""
    global ZENDESK_PASSWORD
    if ZENDESK_PASSWORD is None:
        with open(ZENDESK_PASSWORD_FILE) as f:
            ZENDESK_PASSWORD = f.read().strip()

    # According to
    #   http://developer.zendesk.com/documentation/rest_api/ticket_export.html
    # "Requests with a start_time less than 5 minutes old will also
    # be rejected."
    if int(time.time()) - start_time_t <= 300:
        return None

    url = ('https://khanacademy.zendesk.com/api/v2/exports/tickets.json'
           '?start_time=%s' % start_time_t)
    request = urllib2.Request(url)
    # This is the best way to set the user, according to
    #    http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
    encoded_password = base64.standard_b64encode('%s:%s' % (ZENDESK_USER,
                                                            ZENDESK_PASSWORD))
    request.add_unredirected_header('Authorization',
                                    'Basic %s' % encoded_password)

    def _should_retry(exc):
        if isinstance(exc, urllib2.HTTPError) and exc.code == 429:
            # quota limits: try again, but wait first.
            time.sleep(int(exc.headers['Retry-After']))
        return isinstance(exc, (socket.error, urllib2.HTTPError,
                                httplib.HTTPException))

    data = util.retry(lambda: urllib2.urlopen(request, timeout=60),
                      'loading zendesk ticket data',
                      _should_retry)

    return json.load(data)
示例#39
0
def get_ticket_data(start_time_t):
    """Given start_time to export from, call Zendesk API for ticket data."""
    global ZENDESK_PASSWORD
    if ZENDESK_PASSWORD is None:
        with open(ZENDESK_PASSWORD_FILE) as f:
            ZENDESK_PASSWORD = f.read().strip()

    # According to
    #   http://developer.zendesk.com/documentation/rest_api/ticket_export.html
    # "Requests with a start_time less than 5 minutes old will also
    # be rejected."
    if int(time.time()) - start_time_t <= 300:
        return None

    url = ('https://khanacademy.zendesk.com/api/v2/exports/tickets.json'
           '?start_time=%s' % start_time_t)
    request = urllib2.Request(url)
    # This is the best way to set the user, according to
    #    http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
    encoded_password = base64.standard_b64encode('%s:%s' % (ZENDESK_USER,
                                                            ZENDESK_PASSWORD))
    request.add_unredirected_header('Authorization',
                                    'Basic %s' % encoded_password)

    def _should_retry(exc):
        if isinstance(exc, urllib2.HTTPError) and exc.code == 429:
            # quota limits: try again, but wait first.
            time.sleep(int(exc.headers['Retry-After']))
        return isinstance(exc, (socket.error, urllib2.HTTPError,
                                httplib.HTTPException))

    data = util.retry(lambda: urllib2.urlopen(request, timeout=60),
                      'loading zendesk ticket data',
                      _should_retry)

    return json.load(data)
示例#40
0
def createStakedAccounts(begin, end):
    #(b,e) =(0,len(accounts))=(0,7),(0~3)users,(4~7)producers
    for i in range(begin, end):
        a = accounts[i]

        stakeNet = 500000000000  #50000000DATX
        stakeCpu = 500000000000  #50000000DATX
        stakeRam = 500000000000  #50000000DATX
        small_stake = 5000000  #500DATX

        util.retry(
            'cldatx ' +
            'system newaccount --transfer datxos %s %s --stake-net "%s" --stake-cpu "%s" --buy-ram "%s"   '
            % (a['name'], a['pub'], intToCurrency(stakeNet),
               intToCurrency(stakeCpu), intToCurrency(stakeRam)))
        util.sleep(1)

    for i in range(user_limit):
        a = accounts[i]
        util.retry(
            'cldatx ' +
            'push action datxos.token transfer \'["datxos",%s,"%s","vote"]\' -p datxos'
            % (a['name'], intToCurrency(5000000000000)))
        util.sleep(1)
        util.retry('cldatx ' +
                   'system delegatebw datxos %s "%s" "%s" --transfer' %
                   (a['name'], intToCurrency(300000000000),
                    intToCurrency(350000000000)))
        util.sleep(1)

    util.run(
        'cldatx ' +
        'push action datxos.dtoke transfer \'{"from":"datxos.dbtc","to":"alice","quantity":"300.0000 DBTC","memo":"test"}\' -p datxos.dbtc'
    )
    util.run(
        'cldatx ' +
        'push action datxos.dtoke transfer \'{"from":"datxos.deth","to":"alice","quantity":"300.0000 DETH","memo":"test"}\' -p datxos.deth'
    )
    util.run(
        'cldatx ' +
        'push action datxos.dtoke transfer \'{"from":"datxos.deos","to":"alice","quantity":"300.0000 DEOS","memo":"test"}\' -p datxos.deos'
    )
示例#41
0
def SetSystemContract():
    util.retry('cldatx ' + 'set contract datxos ' + contracts_dir +
               'DatxSystem/ -x 3500')
    util.sleep(1)
    util.run('cldatx ' + 'push action datxos setpriv' +
             util.jsonArg(['datxos.msig', 1]) + '-p datxos@active')
示例#42
0
文件: asgroup.py 项目: Bauerpauer/udo
 def reload(self):
     if not util.confirm("Are you sure you want to tear down the {} ASgroup and recreate it?".format(self.name())):
         return
     util.message_integrations("Reloading ASgroup {}".format(self.name()))
     self.deactivate()
     util.retry(lambda: self.activate(), 60)
示例#43
0
def vote(b, e):
    for i in range(b, e):
        voter = accounts[i]['name']
        prods = random.sample(range(firstProducer, firstProducer + numProducers), num_producers_vote-1)
        prods = ' '.join(map(lambda x: accounts[x]['name'], prods))
        util.retry('cldatx ' + 'system voteproducer prods ' + voter + ' ' + prods)
示例#44
0
文件: asgroup.py 项目: abunuwas/udo
    def deactivate(self):  # a.k.a asg destroy
        # NOTE
        # deleting asg logic should be in its own function

        # * delete ASG by reducing capacities of asg to 0
        # * delete launchconfig
        #
        # reducing ASG capacities to 0 triggers eventual instance
        # termination
        debug("In asgroup.py deactivate")

        asg_name = self.name()
        ag = util.as_conn()
        ec2 = util.ec2_conn()

        asg_info = ag.describe_auto_scaling_groups(
            AutoScalingGroupNames=[asg_name])

        if not asg_info['AutoScalingGroups']:
            print("ASG does not exist.  Maybe it was already deleted?")
        else:
            # delete the ASG
            num_instances = len(asg_info['AutoScalingGroups'][0]['Instances'])
            if self.get_num_instances() == 0:
                pprint("There are no instances in asg: " + asg_name)
                print("Deleting asg: " + asg_name)
                response = ag.delete_auto_scaling_group(
                    AutoScalingGroupName=asg_name)
                util.message_integrations(
                    "Deleted ASgroup {}".format(asg_name))
            else:
                debug("There are " + str(num_instances) +
                      " instances that need to be removed from asg: " +
                      asg_name)
                debug("terminating instances in asg: " + asg_name)
                debug("by setting to 0 MinSize, MaxSize, DesiredCapacity")
                response = ag.update_auto_scaling_group(
                    AutoScalingGroupName=asg_name,
                    MinSize=0,
                    MaxSize=0,
                    DesiredCapacity=0)
                debug(
                    "Waiting 30 seconds to give AWS time to terminate the instances"
                )

                if self.get_num_instances() != 0:
                    util.retry(
                        lambda: ag.delete_auto_scaling_group(
                            AutoScalingGroupName=asg_name), 300)
                if self.get_num_instances() != 0 or self.get_num_instances():
                    print("unable to delete instances in asg.")
                    return False
                util.message_integrations(
                    "Deleted ASgroup {}".format(asg_name))

        # if launch config exists, delete it
        lc = self.lc()
        if not lc.exists():
            print(
                "launchconfig does not exist.  Maybe you deleted it already?")
        else:
            lc.deactivate()
        return True
示例#45
0
 def acknowledge(self, ack_id):
     retry(lambda: self._client.delete_message(QueueUrl=self._queue_url,
                                               ReceiptHandle=ack_id),
           sleep_time=10,
           logger=self.logger)
def stepSetSystemContract():
    retry(args.cleos + 'set contract eosio ' + args.contracts_dir + 'eosio.system/')
    sleep(1)
    run(args.cleos + 'push action eosio setpriv' + jsonArg(['eosio.msig', 1]) + '-p eosio@active')
示例#47
0
def RegProducers():

    util.retry('cldatx system regproducer ' + producer_name + ' ' +
               public_key + ' ' + producer_url + ' ' + verifier_url)