示例#1
0
class ToggleAnimation(AnimationActionSprite, Triggered):
    "Animation that can be toggled on/off. """
    def __init__(self, **others):
        Triggered.__init__(self, **others)
        AnimationActionSprite.__init__(self, **others)
        
        self.debugging = False
        self.playing = False
        self.timer = Timer(0.2)
        
        self.animation_player.backwards = True

    def update(self, player, collisions_group, **others):
        self.dprint("\n### ToggleAnimation.update()")
        e = self.get_trigger(self.triggered_code)
        self.dprint("\tEvent:" + str(e))
        if self.timer.finished and e:
            self.timer.reset()
            self.dprint("\t\tToggling Animation")
            if self.playing:
                self.dprint("\t\t\tDeactivating animation")
                self.playing = False
            else:
                self.dprint("\t\t\tActivating animation")
                self.playing = True
        
        if self.playing:
            self.next_frame()
示例#2
0
class RemoteSubscriber(object):
    def __init__(self, uuid, commandID, ipaddress="", port=32400, protocol="http", name=""):
        self.poller         = False
        self.uuid           = uuid
        self.commandID      = commandID
        self.url            = ""
        self.name           = name
        self.lastUpdated    = Timer()

        if ipaddress and protocol:
            self.url = "%s://%s:%s" % (protocol, ipaddress, port)

    def refresh(self, sub):
        log.debug("RemoteSubscriber::refresh %s (cid=%s)" % (self.uuid, sub.commandID))

        if sub.url != self.url:
            log.debug("RemoteSubscriber::refresh new url %s", sub.url)
            self.url = sub.url

        if sub.commandID != self.commandID:
            log.debug("RemoteSubscriber::refresh new commandID %s", sub.commandID)
            self.commandID = sub.commandID

        self.lastUpdated.restart()

    def shouldRemove(self):
        if self.lastUpdated.elapsed() > SUBSCRIBER_REMOVE_INTERVAL:
            log.debug("RemoteSubscriber::shouldRemove removing %s because elapsed: %lld" % (self.uuid, self.lastUpdated.elapsed()))
            return True

        log.debug("RemoteSubscriber::shouldRemove will not remove %s because elapsed: %lld" % (self.uuid, self.lastUpdated.elapsed()))
        return False
示例#3
0
def _find_seeks_index(dbfile, indexname, queries, debug=False):
    """Use the index file to find exact seek positions for relevant
    records. End locations are not necessary since we are guaranteed that
    the data will be present, so a number of occurances is sufficient for
    prompt termination."""
    timer = Timer(rl_min_dur=1)
    locs = Counter()
    if debug:
        print "  Searching index..."
    indexfh = ChunkedFile(dbfile, indexname, mode='r')
    last_bookmark = 0
    for query in sorted(queries):
        # Use bookmarks to rapidly search the index!
        bookmark = indexfh.find_bookmark(query.encode('utf-8'))
        if bookmark != last_bookmark:
            indexfh.seek(bookmark)
            #print "  Seek to", bookmark
            last_bookmark = bookmark
        for i, line in enumerate(indexfh):
            title, nums = line.decode('utf-8').split('\t')
            if i % 100 == 0:
                timer.step()
            if title in queries:
                locs.update(int(x) for x in nums.split(' '))
            elif title > query:
                break   # This works because the index is sorted.
    indexfh.close()
    for start, nresults in sorted(locs.items()):
        yield (start, None, nresults)
    if debug:
        print '  Completed in', timer, 'seconds.'
示例#4
0
def convert_model_data(subpath):
    tm = Timer()
    fullpath = os.path.join(baseDir, subpath)

    objects = {}
    mtllib = None
    vertices = []
    texcoords = [[0, 0]]
    normals = []
    mtlBaseDir = os.path.split(fullpath)[0]

    lineID = 0
    for line in open(fullpath, "r"):
        lineID += 1
        # print lineID
        if line.startswith('#'): continue
        v = line.split()
        if not v: continue

        if v[0] == 'o' or v[0] == 'g':
            name = v[1].split('_')[0]
            obj = _Object(name)
            objects[obj.name] = obj
        elif v[0] == 'usemtl':
            materialName = v[1]
            obj.material = mtllib.get(materialName)
        elif v[0] == 'v':
            assert len(v) == 4
            v = map(float, v[1:4])
            vertices.append(v)
        elif v[0] == 'vn':
            assert len(v) == 4
            v = map(float, v[1:4])
            normals.append(v)
        elif v[0] == 'vt':
            assert len(v) == 3
            v = map(float, v[1:3])
            texcoords.append(v)
        elif v[0] == 'mtllib':
            mtllib = MaterialLib.load(os.path.realpath(
                os.path.join(mtlBaseDir, v[1])))
        elif v[0] == 'f':
            indices = v[1:]
            assert len(indices) == 3, 'please use triangle faces'
            # each index tuple: (v, t, n)
            for x in indices:
                x = x.split('/')
                vi, ti, ni = map(int, x)
                obj.vdata.extend(
                    texcoords[ti] + normals[ni-1] + vertices[vi-1])
    data = {
        'objects': objects,
        'mtllib': mtllib,
    }
    print 'convert {}, time: {}ms'.format(subpath, tm.tick())
    return data
class QueryExecutor:

    index = None
    timer = None

    def __init__(self, index):
        self.index = index


    def executeQueries(self, queryList):
        self.timer = Timer()
        self.timer.start()

        queryMatchingList = []
        executedTokens = 0

        for query in queryList:
            searchTokens = query.getSearchTokens()
            excludedTokens = query.getExcludedTokens()

            searchResult = QueryResult()
            for token in searchTokens:
                executedTokens += 1
                tmpPostingsList = self.index.getDictionary().getPostingsList(token)
                searchResult.addPostingList(token, tmpPostingsList)

            excludedResult = QueryResult()
            for token in excludedTokens:
                tmpPostingsList = self.index.getDictionary().getPostingsList(token)
                excludedResult.addPostingList(token, tmpPostingsList)

            if(len(excludedResult.getItems()) > 0):
                queryMatching = QueryResult.mergeWithExclusions(searchResult, excludedResult)
            else:
                queryMatching = searchResult

            queryMatchingList.append(queryMatching)

        queryMatching = QueryResult.mergeWithIntersection(queryMatchingList)

        rankedResult = RankedResult()
        for doc, queryResultItem in queryMatching.getItems().items():
            rank = RankProvider.provideRank(queryResultItem, executedTokens)
            rankedResultItem = RankedResultItem(doc, rank, queryResultItem)
            rankedResult.addRankedResultItem(rankedResultItem)

        self.timer.stop()

        return rankedResult.getSortedResult()


    def getTimer(self):
        return self.timer
示例#6
0
def load_models():
    tm = Timer()
    if config.GZIP_LEVEL is not None:
        infile = gzip.open(config.DAT_PATH, 'rb', config.GZIP_LEVEL)
    else:
        infile = open(config.DAT_PATH, 'rb')
    # data = infile.read()
    modeldatas = cPickle.loads(infile.read())
    infile.close()
    print 'load dat time: {}ms'.format(tm.tick())
    for filepath, data in modeldatas.iteritems():
        models[filepath] = load_single(filepath, data)
示例#7
0
    def __init__(self, screen, pos, images, scroll_period, duration=-1):
        """
        If duration == -1, animation goes on indefinetly.
        """

        self.screen = screen
        self.pos = pos
        self.images = [pygame.image.load(image) for image in images]
        self.image_ptr = 0
        self.scroll_period = scroll_period
        self.duration = duration
        self.active = True

        self.scroll_timer = Timer(scroll_period, self.advance_images)
        self.active_timer = Timer(duration, self.inactivate, 1)
示例#8
0
文件: creep2.py 项目: MeetLuck/works
 def __init__(self):
     pygame.init()
     self.screen = pygame.display.set_mode( [self.SCREEN_WIDTH,self.SCREEN_HEIGHT],0,32 )
     self.tile_img = pygame.image.load(self.BG_TITLE_IMG).convert_alpha()
     self.tile_img_rect = self.tile_img.get_rect()
     self.field_box = self.getFieldBox()
     self.tboard = self.getTboard()
     self.mboard = self.getMBoard()
     self.clock = pygame.time.Clock()
     self.creep_images = list()
     self.paused = False
     self.creep_images = [
             ( pygame.image.load(f1).convert_alpha(), pygame.image.load(f2).convert_alpha() )
               for f1,f2 in self.CREEP_FILENAMES ]
     explosion_img = pygame.image.load('images/explosion1.png').convert_alpha()
     self.explosion_images = [ explosion_img, pygame.transform.rotate(explosion_img,90) ]
     self.field_rect = self.getFieldRect()
     self.creeps = pygame.sprite.Group()
     self.spawnNewCreep()
     self.creep_spawn_timer = Timer(500, self.spawnNewCreep)
     self.createWalls()
     # create the grid path representation of the grid
     self.grid_nrows = self.FIELD_SIZE[1]/self.GRID_SIZE
     self.grid_ncols = self.FIELD_SIZE[0]/self.GRID_SIZE
     self.goal_coord = self.grid_nrows - 1, self.grid_ncols - 1
     self.gridpath = GridPath(self.grid_nrows,self.grid_ncols,self.goal_coord)
     for wall in self.walls:
         self.gridpath.set_blocked(wall)
     self.options = dict( draw_grid=False )
示例#9
0
文件: elev.py 项目: yuribak/pylevator
    def update_display(self, elevators, floors):

        # elevator positions
        for i, e in enumerate(elevators):
            self.print_elevator(i, e.old_pos, c=" " * (len(str(e)) + 2))
            self.print_elevator(i, e.pos, c=str(e), color=e.state)

        # rider counds
        for f in xrange(len(floors)):
            m = len(floors[f])
            self.screen.print_at(" " * 20, self.frame_x + 2, self.frame_y + self.frame_h - 2 - f)

            label = "{:<10}{:<3}({})".format("*" * min(m, 10), "..." if m > 10 else "", m)
            self.screen.print_at(label, self.frame_x + 2, self.frame_y + self.frame_h - 2 - f)

        # stats:
        self.screen.print_at(" " * self.frame_w, self.frame_x, self.frame_y - 1)
        self.screen.print_at(
            "Time: {:<6.1f}, Produced: {:<6}, Delivered: {:<6} ({:<2.0f}%), Died: {:<6} ({:2.0f}%)".format(
                Timer.timer(self.game).time(),
                self.game.produced,
                self.game.delivered,
                100.0 * self.game.delivered / self.game.produced,
                self.game.died,
                100.0 * self.game.died / self.game.produced,
            ),
            self.frame_x,
            self.frame_y - 1,
        )

        self.screen.refresh()
示例#10
0
文件: player.py 项目: noonat/omplex
    def __init__(self):
        self._player      = None
        self._video       = None
        self._lock        = RLock()
        self.last_update = Timer()

        self.__part      = 1
示例#11
0
    def storeIndex(self):
        self.timer = Timer()
        self.timer.start()

        storage = Storage()
        storage.saveIndex(self.dictionary)

        self.timer.stop()
示例#12
0
文件: task.py 项目: cms-sw/web-confdb
def worker(args):
    label, method = args

    # call the requested method
    try:
        logger.info('%s: call to DataBuilder.%s' % (label, method))
        t = Timer()
        data = getattr(databuilder, method)()
        t.stop()
        logger.info('%s: done [%.1fs]' % (label, t.elapsed))
        return label, data, None

    except:
        import StringIO
        import traceback
        buffer = StringIO.StringIO()
        traceback.print_exc(file = buffer)
        return label, None, buffer.getvalue()
示例#13
0
 def __init__(self, **others):
     Triggered.__init__(self, **others)
     AnimationActionSprite.__init__(self, **others)
     
     self.debugging = False
     self.playing = False
     self.timer = Timer(0.2)
     
     self.animation_player.backwards = True
示例#14
0
 def __init__(self, **others):
     Triggered.__init__(self, **others)
     OnceAnimationActionSprite.__init__(self, **others)
     
     self.debugging = False
     self.playing = False
     self.timer = Timer(0.2)
     
     self.debugging = False
示例#15
0
文件: timeline.py 项目: noonat/omplex
    def __init__(self):
        self.currentItems   = {}
        self.currentStates  = {}
        self.idleTimer      = Timer()
        self.subTimer       = Timer()
        self.serverTimer    = Timer()
        self.stopped        = False
        self.halt           = False

        threading.Thread.__init__(self)
示例#16
0
    def __init__(self, uuid, commandID, ipaddress="", port=32400, protocol="http", name=""):
        self.poller         = False
        self.uuid           = uuid
        self.commandID      = commandID
        self.url            = ""
        self.name           = name
        self.lastUpdated    = Timer()

        if ipaddress and protocol:
            self.url = "%s://%s:%s" % (protocol, ipaddress, port)
示例#17
0
    def __init__(self, ):
        super(Scene, self).__init__()

        self.fps = Timer()
        self.surf_main = pg.display.get_surface()

        self.running = False
        self.loaded = False

        self.last_time = 0
示例#18
0
 def run(self):
     """run
     """
     all_greenlet = []
     # 定时爬取
     for group_url in self.group_list:
         # timer = Timer(random.randint(0, self.interval), self.interval)
         timer = Timer(random.randint(0, 2), self.interval)
         greenlet = gevent.spawn(
             timer.run, self._init_page_tasks, group_url)
         all_greenlet.append(greenlet)
     # 生产 & 消费
     all_greenlet.append(gevent.spawn(self._page_loop))
     all_greenlet.append(gevent.spawn(self._topic_loop))
     # 重载代理,10分
     proxy_timer = Timer(PROXY_INTERVAL, PROXY_INTERVAL)
     all_greenlet.append(
         gevent.spawn(proxy_timer.run(self.reload_proxies)))
     gevent.joinall(all_greenlet)
示例#19
0
文件: lib.py 项目: mxm/yoka
 def __str__(self):
     s = "%s:\n\n" % self.id
     for system in self.systems:
         s += "%s config:\n%s\n\n" % (system, pformat(system.config))
     s += "\n"
     for run_time in self.runs:
         s += Timer.format_run_times(run_time)
         s += "\n"
     s += "\n"
     return s
示例#20
0
class ToggleText(TextActionSprite, Triggered):
    """ Text that toggles on/off when a trigger event is received. 
        Special parameters are:
        - Delay: amount of time to wait before you can toggle the text
                again.
    """
    def __init__(self, **others):
        Triggered.__init__(self, **others)
        TextActionSprite.__init__(self, **others)
        
        
        # custom parameters
        self.custom_properties = { "Delay" : {"type" : float, "destination" : "delay"} }
        self.parse_catching_errors(self.custom_properties, others, self.__dict__)

        # create the timer
        self.timer = Timer(self.delay)

        # prints lots of debugging text
        self.debugging = False
    
    def update(self, player, collisions_group, **others):
        self.dprint("\n### TriggeredText.update()")
        e = self.get_trigger(self.triggered_code)
        self.dprint("\tEvent:" + str(e))
        if e and self.timer.finished:
            self.timer.reset()
            self.dprint("\t\tToggling text")

            if self.showing:
                self.update_position(player)
                self.dprint("\t\t\tDeactivating text")
                self.showing = False
            else:
                self.update_position(player)
                self.dprint("\t\t\tActivating text")
                self.showing = True
        
        self.update_alpha()
    
    def do(self):
        pass
示例#21
0
    def __init__(self, source, parserType):
        self.source = source
        self.parserType = parserType
        self.dictionary = Dictionary()

        self.timer = Timer()
        self.timer.start()

        self.setup()

        self.timer.stop()
示例#22
0
class Animation(object):

    def __init__(self, screen, pos, images, scroll_period, duration=-1):
        """
        If duration == -1, animation goes on indefinetly.
        """

        self.screen = screen
        self.pos = pos
        self.images = [pygame.image.load(image) for image in images]
        self.image_ptr = 0
        self.scroll_period = scroll_period
        self.duration = duration
        self.active = True

        self.scroll_timer = Timer(scroll_period, self.advance_images)
        self.active_timer = Timer(duration, self.inactivate, 1)

    def update(self, time_passed):
        if self.active:
            self.scroll_timer.update(time_passed)
            self.active_timer.update(time_passed)

    def blitme(self):
        if self.active:
            self.update_rect()
            self.screen.blit(self.images[self.image_ptr], self.rect)

    def update_rect(self):
        image_w, image_h = self.images[self.image_ptr].get_size()

        self.rect = self.images[self.image_ptr].get_rect().move(
            self.pos.x - image_w / 2,
            self.pos.y - image_h / 2)

    def advance_images(self):
        self.image_ptr = (self.image_ptr + 1) % len(self.images)

    def inactivate(self):
        if self.duration >= 0:
            self.active = False
示例#23
0
class RunTime(object):
    def __init__(self):
        # behaviour update callbacks
        self.behav_callbacks = []
        self.init_time = time()
        self.current_time = 0
        self.timer = Timer()

    def run(self):
        while True:
            # update all the behaviours
            for cb in self.behav_callbacks:
                cb(self.current_time)
            # notify all the timer events
            self.timer.update(self.current_time)
            # TODO fps control
            sleep(0.1)
            self.current_time = time()-self.init_time

    def register(self, callback):
        self.behav_callbacks.append(callback)
示例#24
0
class PingPongTriggeredAnimation(OnceAnimationActionSprite, Triggered):
    """ Animation that tries to be always in start or the end of the
        animation. Kind of an animation of a lever, the lever can be
        switched on/off but alawys end up in the start or end position.
        """
        
    def __init__(self, **others):
        Triggered.__init__(self, **others)
        OnceAnimationActionSprite.__init__(self, **others)
        
        self.debugging = False
        self.playing = False
        self.timer = Timer(0.2)
        
        self.debugging = False

    def do(self):
        pass

    def update(self, player, collisions_group, **others):
        self.dprint("\n### PingPongAnimation.update()")
        e = self.get_trigger(self.triggered_code)
        self.dprint("\tEvent:" + str(e))
        if e and self.timer.finished:
            #~ import pdb; pdb.set_trace()
            self.dprint("\t\tToggling Animation")
            self.timer.reset()
            if self.animation_player.finished:
                #~ import pdb; pdb.set_trace()
                self.animation_player.finished = False
                self.animation_player.backwards = not self.animation_player.backwards
            else:
                if self.animation_player.backwards:
                    self.animation_player.backwards = False
                else:
                    self.animation_player.backwards = True
            
        
        self.next_frame()
示例#25
0
def make_dat():
    data = {}
    tm = Timer()
    for subpath in config.MODEL_SUBPATHS:
        if subpath.endswith(os.path.sep):
            for f in os.listdir(os.path.join(baseDir, subpath)):
                if extract_num(f) is not None:
                    fpath = os.path.join(subpath, f)
                    data[fpath] = convert_model_data(fpath)
                    # break # dummy
        else:
            data[subpath] = convert_model_data(subpath)
    print 'total convert time: {}ms'.format(tm.tick())
    if config.GZIP_LEVEL is not None:
        print 'compressing...'
        outf = gzip.open(config.DAT_PATH, 'wb', config.GZIP_LEVEL)
    else:
        print 'writing...'
        outf = open(config.DAT_PATH, 'wb')
    cPickle.dump(data, outf, -1)
    outf.close()
    print 'write {}, time: {}ms'.format(config.DAT_PATH, tm.tick())
示例#26
0
class Scene(object):
    def __init__(self, ):
        super(Scene, self).__init__()

        self.fps = Timer()
        self.surf_main = pg.display.get_surface()

        self.running = False
        self.loaded = False

        self.last_time = 0

    def is_running(self):
        return self.running

    def is_loaded(self):
        return self.loaded

    def on_load(self):
        self.loaded = True

    def on_reset(self):
        pass

    def on_event(self, events):
        pass

    def on_update(self):
        self.fps.on_update()
        # speed_factor = self.fps.speed_factor

    def on_render(self):
        pass

    def on_run(self):
        self.on_event()
        self.on_update()
        self.on_render()
示例#27
0
    def __init__(self, **others):
        Triggered.__init__(self, **others)
        TextActionSprite.__init__(self, **others)
        
        
        # custom parameters
        self.custom_properties = { "Delay" : {"type" : float, "destination" : "delay"} }
        self.parse_catching_errors(self.custom_properties, others, self.__dict__)

        # create the timer
        self.timer = Timer(self.delay)

        # prints lots of debugging text
        self.debugging = False
示例#28
0
文件: lib.py 项目: mxm/yoka
 def __str__(self):
     # print run times
     s = "Cluster suite %s\n\n" % self.id
     s += Timer.format_run_times(self.run_times)
     s += "\n"
     # print system configs
     s += "Cluster %s config\n%s\n\n" % (self.cluster.__class__.__name__, pformat(self.cluster.config))
     for system in self.systems:
         s += "%s config:\n%s\n\n" % (system, pformat(system.config))
     s += "\n"
     # print benchmark times and system configs
     for benchmark in self.benchmarks:
         s += "%s\n\n" % benchmark
     return s
示例#29
0
文件: elev.py 项目: yuribak/pylevator
    def __init__(self, screen, floor_count, elev_count, life_expectancy, rps):

        self.life_expectancy = life_expectancy
        self.rps = rps

        self.display = Display(screen, floor_count, elev_count, self)

        self.floors = [list() for _ in xrange(floor_count)]
        self.elevators = [Elevator(random.randint(0, floor_count - 1)) for _ in xrange(elev_count)]

        self.timer = Timer.timer(self)

        self.delivered = 0
        self.produced = 0
        self.died = 0
示例#30
0
def init():
    tm = Timer()
    global screen
    pygame.display.init()

    glutInit()

    screen = pygame.display.set_mode(config.SCREEN_SIZE, 
            pygame.HWSURFACE | pygame.OPENGL | pygame.DOUBLEBUF)

    glEnable(GL_DEPTH_TEST)
    glEnable(GL_RESCALE_NORMAL)
    glEnable(GL_TEXTURE_2D)

    glShadeModel(GL_SMOOTH)

    glClearColor(*config.BACK_COLOR)

    glLight(GL_LIGHT0, GL_AMBIENT, (.5, .5, .5, 1.))
    glLight(GL_LIGHT0, GL_DIFFUSE, (.8, .8, .8, 1.))
    glLight(GL_LIGHT0, GL_SPECULAR, (.5, .5, .5, 1.))
    glLightModelfv(GL_LIGHT_MODEL_AMBIENT, (.4, .4, .4, 1.))
    #if you want to adjust light intensity, edit here
    glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, .1)
    
    glEnable(GL_LIGHTING)
    glEnable(GL_LIGHT0)
    
    glLineWidth(1)
    glMatrixMode(GL_MODELVIEW)

    glEnableClientState(GL_VERTEX_ARRAY)
    glEnableClientState(GL_NORMAL_ARRAY)
    glEnableClientState(GL_TEXTURE_COORD_ARRAY)

    print 'Display init time:', tm.tick()
示例#31
0
    def getKeywordsAndSave(self, *args, **kwargs):
        import pickle
        freq_lower_bound = int(kwargs["freq_lower_bound"])
        token_len_lower_bound = int(kwargs["token_len_lower_bound"])
        doc_len_lower_bound = int(kwargs["doc_len_lower_bound"])
        doc_len_upper_bound = int(kwargs["doc_len_upper_bound"])

        if str(kwargs["method"]) == "keyword":
            file_keywords = open(
                self.conf_io["prefix"] +
                self.conf_io["output_data_directory"] +
                str(kwargs["target_name"]) + '.fine.keywords', 'w')
        elif str(kwargs["method"]) == "normal":
            file_keywords = open(
                self.conf_io["prefix"] +
                self.conf_io["output_data_directory"] +
                str(kwargs["target_name"]) + '.keywords', 'w')
        tokens = []
        token_indexes = {}
        if bool(kwargs["static_file"]) is True:
            source_name = self.conf_io["prefix"] + self.conf_io[
                "output_data_directory"] + str(kwargs["source_name"])
            with open(source_name, 'r') as f:
                _ind = 0
                for ind, line in enumerate(f):
                    try:
                        with Timer('calculateTokens') as t:
                            tokens.append(
                                self.calculateTokens(
                                    line,
                                    method=str(kwargs["method"]),
                                    doc_len_lower_bound=doc_len_lower_bound,
                                    doc_len_upper_bound=doc_len_upper_bound))
                        # [experimental feature]
                        # this is to be used with LDA
                        # to show what raw doc is associated with each topic
                        token_indexes[ind] = _ind
                        _ind += 1
                    except Exception as e:
                        if e is KeyboardInterrupt:
                            break
                        print e
                        print "error with ", line
                        continue
                    else:
                        pass
                for line in tokens:
                    if line is not None:
                        filtered_tokens = [
                            token for token in line.split(',')
                            if self.frequency[token.lower()] > freq_lower_bound
                            and len(token) > token_len_lower_bound
                        ]
                        filtered_tokens = ','.join(filtered_tokens)
                        file_keywords.write('%s\n' %
                                            (filtered_tokens.encode('utf-8')))
                        file_keywords.flush()
            f.close()
            # experimental
            json.dump(token_indexes,
                      open(self.f_token_indexes + "token_indexes.pickle", "w"),
                      ensure_ascii=True)
        else:
            doc_list = args[0]
            for ind, line in enumerate(list(doc_list)):
                try:
                    tokens.append(
                        self.calculateTokens(
                            line,
                            method=str(kwargs["method"]),
                            doc_len_lower_bound=doc_len_lower_bound,
                            doc_len_upper_bound=doc_len_upper_bound))
                except Exception as e:
                    if e is KeyboardInterrupt:
                        break
                    print e
                    print "error with ", line
                    continue
                else:
                    pass
            for line in tokens:
                if line is not None:
                    filtered_tokens = [
                        token for token in line.split(',')
                        if self.frequency[token.lower()] > freq_lower_bound
                        and len(token) > token_len_lower_bound
                    ]
                    filtered_tokens = ','.join(filtered_tokens)
                    file_keywords.write('%s\n' %
                                        (filtered_tokens.encode('utf-8')))
                    file_keywords.flush()
        file_keywords.close()
        pynlpir.close()
        return True
示例#32
0
"""
This is for evaluating the paragraph selector.
"""

from utils import Timer
from utils import HotPotDataHandler
from utils import ConfigReader

from modules import ParagraphSelector

import argparse
import sys
import os

# =========== PARAMETER INPUT
take_time = Timer()

parser = argparse.ArgumentParser()
parser.add_argument('config_file',
                    metavar='config',
                    type=str,
                    help='configuration file for evaluation')
parser.add_argument('model_name',
                    metavar='model',
                    type=str,
                    help="name of the model's file")
args = parser.parse_args()
cfg = ConfigReader(args.config_file)

model_abs_path = cfg('model_abs_dir') + args.model_name + "/"
results_abs_path = model_abs_path + args.model_name + ".test_scores"
示例#33
0
def eval_model(
    model,
    num_classes,
    testset,
    priors,
    thresh=0.005,
    max_per_image=300,
):

    # Testing after training

    print('Start Evaluation...')
    model.eval()
    detector = Detect(num_classes)
    transform = BaseTransform(args.size, (104, 117, 123), (2, 0, 1))
    num_images = len(testset)
    all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
    rgbs = dict()
    _t = {'im_detect': Timer(), 'im_nms': Timer()}
    for i in range(num_images):
        img = testset.pull_image(i)
        scale = torch.Tensor(
            [img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
        with torch.no_grad():
            x = transform(img).unsqueeze(0)
            (x, scale) = (x.cuda(), scale.cuda())

            _t['im_detect'].tic()
            out = model(x)  # forward pass
            (boxes, scores) = detector.forward(out, priors)
            detect_time = _t['im_detect'].toc()

        boxes *= scale  # scale each detection back up to the image
        boxes = boxes.cpu().numpy()
        scores = scores.cpu().numpy()

        _t['im_nms'].tic()
        for j in range(1, num_classes):
            inds = np.where(scores[:, j - 1] > thresh)[0]
            if len(inds) == 0:
                all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
                continue
            c_bboxes = boxes[inds]
            c_scores = scores[inds, j - 1]
            c_dets = np.hstack(
                (c_bboxes, c_scores[:, np.newaxis])).astype(np.float32,
                                                            copy=False)
            keep = nms(c_dets,
                       thresh=args.nms_thresh)  # non maximum suppression
            c_dets = c_dets[keep, :]
            all_boxes[j][i] = c_dets
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in range(1, num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        nms_time = _t['im_nms'].toc()

        if i == 10:
            _t['im_detect'].clear()
            _t['im_nms'].clear()
        if i % math.floor(num_images / 10) == 0 and i > 0:
            print('[{}/{}]Time results: detect={:.2f}ms,nms={:.2f}ms,'.format(
                i, num_images, detect_time * 1000, nms_time * 1000))
    testset.evaluate_detections(all_boxes, 'eval/{}/'.format(args.dataset))
    model.train()
示例#34
0
def run(dataset, config):
    log.info(f"\n**** Hyperopt-sklearn [v{config.framework_version}] ****\n")
    save_metadata(config)

    is_classification = config.type == 'classification'

    default = lambda: 0
    metrics_to_loss_mapping = dict(
        acc=(default, False), # lambda y, pred: 1.0 - accuracy_score(y, pred)
        auc=(lambda y, pred: 1.0 - roc_auc_score(y, pred), False),
        f1=(lambda y, pred: 1.0 - f1_score(y, pred), False),
        # logloss=(log_loss, True),
        mae=(mean_absolute_error, False),
        mse=(mean_squared_error, False),
        msle=(mean_squared_log_error, False),
        r2=(default, False), # lambda y, pred: 1.0 - r2_score(y, pred)
        rmse=(mean_squared_error, False),
    )
    loss_fn, continuous_loss_fn = metrics_to_loss_mapping[config.metric] if config.metric in metrics_to_loss_mapping else (None, False)
    if loss_fn is None:
        log.warning("Performance metric %s not supported: defaulting to %s.",
                    config.metric, 'accuracy' if is_classification else 'r2')
    if loss_fn is default:
        loss_fn = None

    training_params = {k: v for k, v in config.framework_params.items() if not k.startswith('_')}
    if 'algo' in training_params:
        training_params['algo'] = eval(training_params['algo'])  # evil eval: use get_extensions instead once https://github.com/openml/automlbenchmark/pull/141 is merged

    log.warning("Ignoring cores constraint of %s cores.", config.cores)
    log.info("Running hyperopt-sklearn with a maximum time of %ss on %s cores, optimizing %s.",
             config.max_runtime_seconds, 'all', config.metric)

    X_train = dataset.train.X_enc
    y_train = dataset.train.y_enc

    if is_classification:
        classifier = any_classifier('clf')
        regressor = None
    else:
        classifier = None
        regressor = any_regressor('rgr')

    estimator = HyperoptEstimator(classifier=classifier,
                                  regressor=regressor,
                                  loss_fn=loss_fn,
                                  continuous_loss_fn=continuous_loss_fn,
                                  trial_timeout=config.max_runtime_seconds,
                                  seed=config.seed,
                                  **training_params)

    with InterruptTimeout(config.max_runtime_seconds * 4/3, sig=signal.SIGQUIT):
        with InterruptTimeout(config.max_runtime_seconds, before_interrupt=ft.partial(kill_proc_tree, timeout=5, include_parent=False)):
            with Timer() as training:
                estimator.fit(X_train, y_train)

    log.info('Predicting on the test set.')
    X_test = dataset.test.X_enc
    y_test = dataset.test.y_enc
    with Timer() as predict:
        predictions = estimator.predict(X_test)

    if is_classification:
        probabilities = "predictions"  # encoding is handled by caller in `__init__.py`
    else:
        probabilities = None

    return result(output_file=config.output_predictions_file,
                  predictions=predictions,
                  truth=y_test,
                  probabilities=probabilities,
                  target_is_encoded=is_classification,
                  models_count=len(estimator.trials),
                  training_duration=training.duration,
                  predict_duration=predict.duration)
示例#35
0
class DummyProbe(Behavior):
    def __init__(self, logger, params):
        import pygame
        self.lick_timer = Timer()
        self.lick_timer.start()
        self.ready_timer = Timer()
        self.ready_timer.start()
        self.ready = False
        self.interface = 0
        pygame.init()
        self.screen = pygame.display.set_mode((800, 480))
        super(DummyProbe, self).__init__(logger, params)

    def get_cond_tables(self):
        return ['RewardCond']

    def is_ready(self, duration, since=0):
        if duration == 0: return True
        self.__get_events()
        elapsed_time = self.ready_timer.elapsed_time()
        return self.ready and elapsed_time >= duration

    def is_licking(self,since=0):
        probe = self.__get_events()
        if probe > 0: self.resp_timer.start()
        self.licked_probe = probe
        return probe

    def get_response(self, since=0):
        probe = self.is_licking(since)
        return probe > 0

    def is_correct(self):
        return np.any(np.equal(self.licked_probe, self.curr_cond['probe']))

    def prepare(self, condition):
        self.curr_cond = condition
        self.reward_amount = condition['reward_amount']

    def reward(self):
        self.update_history(self.licked_probe, self.reward_amount)
        self.logger.log('LiquidDelivery', dict(probe=self.licked_probe,
                                               reward_amount=self.reward_amount))
        print('Giving Water at probe:%1d' % self.licked_probe)
        return True

    def punish(self):
        print('punishing')
        probe = self.licked_probe if self.licked_probe > 0 else np.nan
        self.update_history(probe)

    def __get_events(self):
        probe = 0
        events = pygame.event.get()
        for event in events:
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_LEFT:
                    self.logger.log('Lick', dict(probe=1))
                    print('Probe 1 activated!')
                    probe = 1
                    self.lick_timer.start()
                elif event.key == pygame.K_RIGHT:
                    self.logger.log('Lick', dict(probe=2))
                    print('Probe 2 activated!')
                    probe = 2
                elif event.key == pygame.K_SPACE and not self.ready:
                    self.lick_timer.start()
                    self.ready = True
                    print('in position')
            elif event.type == pygame.KEYUP:
                if event.key == pygame.K_SPACE and self.ready:
                    self.ready = False
                    print('off position')
                    print(pygame.mouse.get_pos())
            elif event.type == pygame.MOUSEBUTTONDOWN:
                print(pygame.mouse.get_pos())
        return probe
示例#36
0
    lambda_epoch = lambda e: 1.0 if e < 20 + opt.stop_class_iter else (
        0.06 if e < 40 + opt.stop_class_iter else 0.012
        if e < 50 + opt.stop_class_iter else (0.0024))
    # lambda_epoch = lambda e: 1.0 if e < 10 + opt.stop_class_iter else (0.3 if e < 20 + opt.stop_class_iter
    #                                                                    else 0.1 if e < 30 + opt.stop_class_iter
    #                                                                    else 0.06 if e < 40 + opt.stop_class_iter
    #                                                                    else 0.012 if e < 50 + opt.stop_class_iter
    #                                                                    else (0.0024))

    lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                     lr_lambda=lambda_epoch,
                                                     last_epoch=-1)

    max_val_acc = 0.0

    timer = Timer()
    x_entropy = torch.nn.CrossEntropyLoss()

    # tensorboard writer
    writer = SummaryWriter(log_dir=opt.tensorboard_dir)

    # This is where things need to change in the data loader
    train_loader = dloader_train.get_dataloader()
    val_loader = dloader_val.get_dataloader()

    # if opt.luke_warm == 'True' and opt.classification == 'True':
    # num_epoch = opt.num_epoch + opt.stop_class_iter
    # else:
    num_epoch = opt.num_epoch

    # loss * opt.few_shot_loss_weight + loss_class * opt.classifier_loss_weight
    def __init__(self, cfg):
        feat_path = cfg['feat_path']
        label_path = cfg.get('label_path', None)
        knn_graph_path = cfg.get('knn_graph_path', None)

        self.k = cfg['k']
        self.feature_dim = cfg['feature_dim']
        self.is_norm_feat = cfg.get('is_norm_feat', True)
        self.save_decomposed_adj = cfg.get('save_decomposed_adj', False)

        self.th_sim = cfg.get('th_sim', 0.)
        self.max_conn = cfg.get('max_conn', 1)
        self.conf_metric = cfg.get('conf_metric')

        with Timer('read meta and feature'):
            if label_path is not None:
                self.lb2idxs, self.idx2lb = read_meta(label_path)
                self.inst_num = len(self.idx2lb)
                self.gt_labels = intdict2ndarray(self.idx2lb)
                self.ignore_label = False
            else:
                self.inst_num = -1
                self.ignore_label = True
            self.features = read_probs(feat_path, self.inst_num,
                                       self.feature_dim)
            if self.is_norm_feat:
                self.features = l2norm(self.features)
            if self.inst_num == -1:
                self.inst_num = self.features.shape[0]
            self.size = 1  # take the entire graph as input

        with Timer('read knn graph'):
            if os.path.isfile(knn_graph_path):
                knns = np.load(knn_graph_path)['data']
            else:
                if knn_graph_path is not None:
                    print('knn_graph_path does not exist: {}'.format(
                        knn_graph_path))
                knn_prefix = os.path.join(cfg.prefix, 'knns', cfg.name)
                knns = build_knns(knn_prefix, self.features, cfg.knn_method,
                                  cfg.knn)

            adj = fast_knns2spmat(knns, self.k, self.th_sim, use_sim=True)

            # build symmetric adjacency matrix
            adj = build_symmetric_adj(adj, self_loop=True)
            adj = row_normalize(adj)
            if self.save_decomposed_adj:
                adj = sparse_mx_to_indices_values(adj)
                self.adj_indices, self.adj_values, self.adj_shape = adj
            else:
                self.adj = adj

            # convert knns to (dists, nbrs)
            self.dists, self.nbrs = knns2ordered_nbrs(knns)

        print('feature shape: {}, k: {}, norm_feat: {}'.format(
            self.features.shape, self.k, self.is_norm_feat))

        if not self.ignore_label:
            with Timer('Prepare ground-truth label'):
                self.labels = confidence(feats=self.features,
                                         dists=self.dists,
                                         nbrs=self.nbrs,
                                         metric=self.conf_metric,
                                         idx2lb=self.idx2lb,
                                         lb2idxs=self.lb2idxs)
                if cfg.eval_interim:
                    _, self.peaks = confidence_to_peaks(
                        self.dists, self.nbrs, self.labels, self.max_conn)
示例#38
0
    def init_network(self,
                     graph,
                     input_tensors=None,
                     restore_iter=0,
                     prefix='Training_'):
        """ Helper method to initialize the tf networks used """
        with graph.as_default():
            with Timer('building TF network'):
                result = self.construct_model(
                    input_tensors=input_tensors,
                    prefix=prefix,
                    dim_input=self._dO,
                    dim_output=self._dT,
                    network_config=self.network_params)
            inputas, inputbs, outputas, outputbs, smaxas, smaxbs, test_output, lossesa, lossesb, flat_img_inputb, gradients_op = result
            if 'Testing' in prefix:
                self.obs_tensor = self.obsa
                self.state_tensor = self.statea
                self.test_act_op = test_output
                self.image_op = flat_img_inputb

            trainable_vars = tf.trainable_variables()
            total_losses1 = [
                tf.reduce_sum(lossesa[j]) / tf.to_float(self.meta_batch_size)
                for j in range(self.num_updates)
            ]
            total_losses2 = [
                tf.reduce_sum(lossesb[j]) / tf.to_float(self.meta_batch_size)
                for j in range(self.num_updates)
            ]

            if 'Training' in prefix:
                self.total_losses1 = total_losses1
                self.total_loss1 = total_losses1[0]
                self.total_losses2 = total_losses2
                self.outputas = outputas
                self.outputbs = outputbs
                self.smaxas = smaxas
                self.smaxbs = smaxbs
            elif 'Validation' in prefix:
                self.val_total_losses1 = total_losses1
                self.val_total_loss1 = total_losses1[0]
                self.val_total_losses2 = total_losses2
                self.val_outputas = outputas
                self.val_outputbs = outputbs
                self.val_smaxas = smaxas
                self.val_smaxbs = smaxbs

            if 'Training' in prefix:
                decay_steps = FLAGS.metatrain_iterations
                lr_decayed = tf.train.cosine_decay(self.meta_lr,
                                                   self.global_step,
                                                   decay_steps)
                #lr_decayed = tf.train.exponential_decay(self.meta_lr, self.global_step, 1000, 0.96, staircase=True)
                #lr_decayed = self.meta_lr
                self.train_op = tf.train.AdamOptimizer(lr_decayed).minimize(
                    self.total_losses2[self.num_updates - 1],
                    global_step=self.global_step)

                # Add summaries
                summ = [
                    tf.summary.scalar(prefix + 'Pre-update_loss',
                                      self.total_loss1)
                ]
                for k, v in self.weights.items():
                    summ.append(tf.summary.histogram('Weights_of_%s' % (k), v))
                for j in range(self.num_updates):
                    for task_id in range(smaxas[j].shape[0]):
                        imga = inputas[j][task_id, 23:, :, :, :]
                        imgb = inputbs[j][task_id, 23:, :, :, :]
                        summ.append(
                            tf.summary.image(
                                'Task_%d_IMG_A_Step_%d' % (task_id, j), imga,
                                1))
                        summ.append(
                            tf.summary.image(
                                'Task_%d_IMG_B_Step_%d' % (task_id, j), imgb,
                                1))
                        for filt_id in range(smaxas[j].shape[-1]):
                            filta = smaxas[j][task_id, :, :, :,
                                              filt_id:filt_id + 1]
                            filtb = smaxbs[j][task_id, :, :, :,
                                              filt_id:filt_id + 1]
                            summ.append(
                                tf.summary.image(
                                    'Task_%d_Spatial_Softmax_A_%d_Step_%d' %
                                    (task_id, filt_id, j), filta, 1))
                            summ.append(
                                tf.summary.image(
                                    'Task_%d_Spatial_Softmax_B_%d_Step_%d' %
                                    (task_id, filt_id, j), filtb, 1))

                    summ.append(
                        tf.summary.scalar(
                            prefix + 'Post-update_loss_step_%d' % j,
                            self.total_losses2[j]))
                    for k in range(len(self.sorted_weight_keys)):
                        summ.append(
                            tf.summary.histogram(
                                'Gradient_of_%s_step_%d' %
                                (self.sorted_weight_keys[k], j),
                                gradients_op[j][k]))

                self.train_summ_op = tf.summary.merge(summ)
            elif 'Validation' in prefix:
                # Add summaries
                summ = [
                    tf.summary.scalar(prefix + 'Pre-update_loss',
                                      self.val_total_loss1)
                ]
                for j in range(self.num_updates):
                    for task_id in range(smaxas[j].shape[0]):
                        imga = inputas[j][task_id, :, :, :]
                        imgb = inputbs[j][task_id, :, :, :]
                        summ.append(
                            tf.summary.image(
                                'Val_Task_%d_IMG_A_Step_%d' % (task_id, j),
                                imga, 1))
                        summ.append(
                            tf.summary.image(
                                'Val_Task_%d_IMG_B_Step_%d' % (task_id, j),
                                imgb, 1))
                        for filt_id in range(smaxas[j].shape[-1]):
                            filta = smaxas[j][task_id, :, :, :,
                                              filt_id:filt_id + 1]
                            filtb = smaxbs[j][task_id, :, :, :,
                                              filt_id:filt_id + 1]
                            summ.append(
                                tf.summary.image(
                                    'Val_Task_%d_Spatial_Softmax_A_%d_Step_%d'
                                    % (task_id, filt_id, j), filta, 1))
                            summ.append(
                                tf.summary.image(
                                    'Val_Task_%d_Spatial_Softmax_B_%d_Step_%d'
                                    % (task_id, filt_id, j), filtb, 1))

                    summ.append(
                        tf.summary.scalar(
                            prefix + 'Post-update_loss_step_%d' % j,
                            self.val_total_losses2[j]))
                self.val_summ_op = tf.summary.merge(summ)
示例#39
0
# heuristics = [two_clause_choice]
heuristics = [
    jeroslow_wang_literal_choice, jeroslow_wang_choice, two_clause_choice
]
# heuristics = [first_choice, random_choice, two_clause_choice]

n = 150
rand_problem = random_model.generate_random_problem(n, 4 * n)
print(rand_problem)

rand_instance = Instance()
rand_instance.parse_problem(rand_problem)
rand_instance.setup_watchlist()
rand_assignment = [None] * len(rand_instance.variables)

timer = Timer()
times = defaultdict(list)

for i in range(0, 5):
    print("-----STARTING RUN {}-----".format(i))
    for heuristic in heuristics:
        rand_instance = Instance()
        rand_instance.parse_problem(rand_problem)
        rand_instance.setup_watchlist()
        rand_assignment = [None] * len(rand_instance.variables)

        timer.start()
        sol, _ = solve(rand_instance, rand_assignment, heuristic,
                       max_unit_choice, False)
        tot_time = timer.stop()
        print(f"Elapsed time: {tot_time:0.4f} seconds")
def cross_validate_model_fold(chunk_input: WorkerInput) -> ModelResult:
    log("Execution fold", level=2)
    timer = Timer()
    classifier = chunk_input['classifier']
    X_train = chunk_input['X_train']
    y_train = chunk_input['y_train']
    X_test = chunk_input['X_test']
    return_model = chunk_input['return_model']

    if get_log_level() == 1:
        print(".")

    feature_names = \
        chunk_input['feature_names'] if \
            ('feature_names' in chunk_input and chunk_input['feature_names'] is not None) \
            else list(X_train.columns)

    classifier.fit(X_train, y_train, **chunk_input['fit_kwargs'])

    y_predict = Series(classifier.predict(X_test), index=X_test.index)
    y_train_predict = Series(classifier.predict(X_train), index=X_train.index)

    try:
        y_predict_probabilities_raw = classifier.predict_proba(X_test)
        y_train_predict_probabilities_raw = classifier.predict_proba(X_train)
    except AttributeError:
        y_predict_probabilities = y_predict
        y_train_predict_probabilities = y_train_predict
    else:
        probability_columns = [
            f'y_predict_probabilities_{i}'
            for i in range(y_predict_probabilities_raw.shape[1])
        ]
        y_predict_probabilities = DataFrame(y_predict_probabilities_raw,
                                            index=X_test.index,
                                            columns=probability_columns)
        y_train_predict_probabilities = DataFrame(
            y_train_predict_probabilities_raw,
            index=X_train.index,
            columns=probability_columns)

    if y_predict.dtype == np.float:
        y_predict = y_predict \
            .map(lambda v: 0 if v < 0 else v) \
            .map(lambda v: 1 if v > 1 else v) \
            .map(lambda v: round(v))

    try:
        feature_importance = Series(
            classifier[-1].feature_importances_,
            index=feature_names,
        )
    except (TypeError, AttributeError):
        try:
            classifier[-1].coef_
        except AttributeError:
            feature_importance = None
            logging.debug("No feature importance in the result")
        else:
            feature_importance = None
            # feature_importance = Series(classifier[-1].coef_[0], index=feature_names)

    if not return_model:
        try:
            classifier[-1].get_booster().__del__()
        except AttributeError:
            pass

    return ModelResult(y_test_score=y_predict_probabilities,
                       y_test_predict=y_predict,
                       y_train_predict=y_train_predict,
                       y_train_score=y_train_predict_probabilities,
                       feature_importance=feature_importance,
                       model=classifier[-1] if return_model else None,
                       elapsed=timer.elapsed_cpu())
示例#41
0
# Get the associated row indices
cols = grouped_purchased.StockCode.astype(
    pd.CategoricalDtype(categories=products, ordered=True)).cat.codes

train, test = train_test_split(rows.values, cols.values, quantity)

evaluator = Evaluator(test[0], test[1], test[2])
baseline_model = BaselinePredictor(train[1], train[2])
baseline_fpr, baseline_tpr, baseline_roc = evaluator.roc(
    lambda user, item: baseline_model.pred(item))

train_sparse = sparse.csr_matrix((train[2], (train[0], train[1])),
                                 shape=(len(customers), len(products)))

alpha = 15
with Timer() as cython_als_t:
    user_vecs, item_vecs = implicit.alternating_least_squares(
        (train_sparse * alpha).astype('double'),
        factors=32,
        regularization=0.1,
        iterations=50)
print(f"Time spent in implicit: {cython_als_t.interval}")

svd_predictor = lambda user, item: np.sum(user_vecs[user, :] * item_vecs[
    item, :])
fpr, tpr, roc = evaluator.roc(svd_predictor)

plt.clf()
plt.plot(baseline_fpr, baseline_tpr, label='baseline')
plt.plot(fpr, tpr, label='als')
plt.xlabel('False positive')
示例#42
0
def run_benchmark(n, ratio_min, ratio_max, ratio_step, timeout_seconds,
                  num_experiments, unit_preference_heuristics,
                  splitting_heuristics):

    # Result dictionary has keys: ratio, unit preference heuristic, splitting heuristic.
    results = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))

    # Create a new timer.
    timer = Timer()

    for ratio in np.arange(ratio_min, ratio_max + ratio_step, ratio_step):

        # Calculate the number of clauses for this n.
        l = int(ratio * n)

        # Repeat num_experiment runs with this ratio.
        for _ in range(num_experiments):

            # Generate a random model to test.
            problem = random_model.generate_random_problem(n, l)

            # Iterate over all heuristics.
            for up in unit_preference_heuristics:
                for split in splitting_heuristics:

                    instance = Instance()
                    instance.parse_problem(problem)
                    instance.setup_watchlist()

                    assignments = [None] * len(instance.variables)

                    # Init result variables.
                    solution = None
                    num_calls = None
                    solve_timeout = False

                    timer.start()

                    # Start another thread to attempt solving.
                    with concurrent.futures.ThreadPoolExecutor(
                            max_workers=1) as executor:

                        # Create a worker thread.
                        future = executor.submit(solve, instance, assignments,
                                                 split, up, False)

                        # Catch if this times out.
                        try:
                            solution, num_calls = future.result(
                                timeout=timeout_seconds)
                        except concurrent.futures.TimeoutError:
                            solve_timeout = True
                            timer.stop()

                    if solve_timeout:
                        solve_time = "TIMEOUT"
                        num_calls = -1
                        solution = "UNSAT"
                    else:
                        solve_time = timer.stop()

                    # Add result of this run to the results dictionary.
                    results[ratio][up.__name__][split.__name__].append(
                        (solve_time, num_calls, solution != "UNSAT"))
    return results
示例#43
0
文件: train.py 项目: wuqiangch/DG-Net
                                  config['batch_size'])
print('Every epoch need %d iterations' % config['epoch_iteration'])
nepoch = 0

print('Note that dataloader may hang with too much nworkers.')

while True:
    for it, ((images_a, labels_a, pos_a),
             (images_b, labels_b,
              pos_b)) in enumerate(zip(train_loader_a, train_loader_b)):
        trainer.update_learning_rate()
        images_a, images_b = images_a.cuda().detach(), images_b.cuda().detach()
        pos_a, pos_b = pos_a.cuda().detach(), pos_b.cuda().detach()
        labels_a, labels_b = labels_a.cuda().detach(), labels_b.cuda().detach()

        with Timer("Elapsed time in update: %f"):
            # Main training code
            trainer.dis_update(images_a, images_b, config)
            trainer.gen_update(images_a, labels_a, pos_a, images_b, labels_b,
                               pos_b, config, iterations)
            torch.cuda.synchronize()

        # Dump training stats in log file
        if (iterations + 1) % config['log_iter'] == 0:
            print("\033[1m Epoch: %02d Iteration: %08d/%08d \033[0m" %
                  (nepoch, iterations + 1, max_iter),
                  end=" ")
            write_loss(iterations, trainer, train_writer)

        # Write images
        if (iterations + 1) % config['image_save_iter'] == 0:
示例#44
0
    def _train(self, epochs, is_continue, patience=4):
        if not os.path.exists(self.model_name):
            os.makedirs(self.model_name)

        saver = tf.train.Saver(max_to_keep=1)
        if is_continue:
            saver.restore(self.sess,
                          tf.train.latest_checkpoint(self.model_name))
        else:
            self.sess.run(tf.global_variables_initializer())

        best_acc = 0
        nepoch_noimp = 0
        t = Timer()
        for e in range(epochs):
            t.start("Epoch {}".format(e + 1))

            total_train_loss = 0
            c = 0

            t.start("Create training examples")
            train_examples = self.data.create_train_examples(self)
            print("Number of training examples in {}: {}".format(
                e + 1, len(train_examples)))
            t.stop()

            num_batch_train = len(train_examples) // self.batch_size + 1
            display_step = num_batch_train // 4

            for idx, batch in enumerate(
                    self._next_batch(data=train_examples,
                                     num_batch=num_batch_train)):
                feed_dict = self._make_feed_dict(batch, self.keep_prob, True)
                _, train_loss = self.sess.run([self.train_op, self.loss_op],
                                              feed_dict=feed_dict)
                total_train_loss += train_loss
                c += 1
                if idx % display_step == 0:
                    print("Iter {} - Loss: {}".format(idx,
                                                      total_train_loss / c))

            if self.early_stopping:
                dev_acc = self._dev_acc()
                print("Dev accuracy (top {}): {}".format(1, dev_acc))
                if dev_acc > best_acc:
                    saver.save(self.sess, self.model_name + "model")
                    print('Saved the model at epoch {}'.format(e + 1))
                    best_acc = dev_acc
                    nepoch_noimp = 0
                else:
                    nepoch_noimp += 1
                    print("Number of epochs with no improvement: {}".format(
                        nepoch_noimp))
                    if nepoch_noimp >= patience:
                        break
            t.stop()
        if not self.early_stopping:
            saver.save(self.sess, self.model_name + "model")
            print('Saved the model')

        self.sess.close()
示例#45
0
def test_gcn_v(model, cfg, logger):
    for k, v in cfg.model['kwargs'].items():
        setattr(cfg.test_data, k, v)
    dataset = build_dataset(cfg.model['type'], cfg.test_data)

    folder = '{}_gcnv_k_{}_th_{}'.format(cfg.test_name, cfg.knn, cfg.th_sim)
    oprefix = osp.join(cfg.work_dir, folder)
    oname = osp.basename(rm_suffix(cfg.load_from))
    opath_pred_confs = osp.join(oprefix, 'pred_confs', '{}.npz'.format(oname))

    if osp.isfile(opath_pred_confs) and not cfg.force:
        data = np.load(opath_pred_confs)
        pred_confs = data['pred_confs']
        inst_num = data['inst_num']
        if inst_num != dataset.inst_num:
            logger.warn(
                'instance number in {} is different from dataset: {} vs {}'.
                format(opath_pred_confs, inst_num, len(dataset)))
    else:
        pred_confs, gcn_feat = test(model, dataset, cfg, logger)
        inst_num = dataset.inst_num

    logger.info('pred_confs: mean({:.4f}). max({:.4f}), min({:.4f})'.format(
        pred_confs.mean(), pred_confs.max(), pred_confs.min()))

    logger.info('Convert to cluster')
    with Timer('Predition to peaks'):
        pred_dist2peak, pred_peaks = confidence_to_peaks(
            dataset.dists, dataset.nbrs, pred_confs, cfg.max_conn)

    if not dataset.ignore_label and cfg.eval_interim:
        # evaluate the intermediate results
        for i in range(cfg.max_conn):
            num = len(dataset.peaks)
            pred_peaks_i = np.arange(num)
            peaks_i = np.arange(num)
            for j in range(num):
                if len(pred_peaks[j]) > i:
                    pred_peaks_i[j] = pred_peaks[j][i]
                if len(dataset.peaks[j]) > i:
                    peaks_i[j] = dataset.peaks[j][i]
            acc = accuracy(pred_peaks_i, peaks_i)
            logger.info('[{}-th conn] accuracy of peak match: {:.4f}'.format(
                i + 1, acc))
            acc = 0.
            for idx, peak in enumerate(pred_peaks_i):
                acc += int(dataset.idx2lb[peak] == dataset.idx2lb[idx])
            acc /= len(pred_peaks_i)
            logger.info(
                '[{}-th conn] accuracy of peak label match: {:.4f}'.format(
                    i + 1, acc))

    with Timer('Peaks to clusters (th_cut={})'.format(cfg.tau_0)):
        pred_labels = peaks_to_labels(pred_peaks, pred_dist2peak, cfg.tau_0,
                                      inst_num)

    if cfg.save_output:
        logger.info('save predicted confs to {}'.format(opath_pred_confs))
        mkdir_if_no_exists(opath_pred_confs)
        np.savez_compressed(opath_pred_confs,
                            pred_confs=pred_confs,
                            inst_num=inst_num)

        # save clustering results
        idx2lb = list2dict(pred_labels, ignore_value=-1)

        opath_pred_labels = osp.join(
            cfg.work_dir, folder, 'tau_{}_pred_labels.txt'.format(cfg.tau_0))
        logger.info('save predicted labels to {}'.format(opath_pred_labels))
        mkdir_if_no_exists(opath_pred_labels)
        write_meta(opath_pred_labels, idx2lb, inst_num=inst_num)

    # evaluation
    if not dataset.ignore_label:
        print('==> evaluation')
        for metric in cfg.metrics:
            evaluate(dataset.gt_labels, pred_labels, metric)
        # H and C-scores
        gt_dict = {}
        pred_dict = {}
        for i in range(len(dataset.gt_labels)):
            gt_dict[str(i)] = dataset.gt_labels[i]
            pred_dict[str(i)] = pred_labels[i]
        bm = ClusteringBenchmark(gt_dict)
        scores = bm.evaluate_vmeasure(pred_dict)
        # fmi_scores = bm.evaluate_fowlkes_mallows_score(pred_dict)
        print(scores)

    if cfg.use_gcn_feat:
        # gcn_feat is saved to disk for GCN-E
        opath_feat = osp.join(oprefix, 'features', '{}.bin'.format(oname))
        if not osp.isfile(opath_feat) or cfg.force:
            mkdir_if_no_exists(opath_feat)
            write_feat(opath_feat, gcn_feat)

        name = rm_suffix(osp.basename(opath_feat))
        prefix = oprefix
        ds = BasicDataset(name=name,
                          prefix=prefix,
                          dim=cfg.model['kwargs']['nhid'],
                          normalize=True)
        ds.info()

        # use top embedding of GCN to rebuild the kNN graph
        with Timer('connect to higher confidence with use_gcn_feat'):
            knn_prefix = osp.join(prefix, 'knns', name)
            knns = build_knns(knn_prefix,
                              ds.features,
                              cfg.knn_method,
                              cfg.knn,
                              is_rebuild=True)
            dists, nbrs = knns2ordered_nbrs(knns)

            pred_dist2peak, pred_peaks = confidence_to_peaks(
                dists, nbrs, pred_confs, cfg.max_conn)
            pred_labels = peaks_to_labels(pred_peaks, pred_dist2peak, cfg.tau,
                                          inst_num)

        # save clustering results
        if cfg.save_output:
            oname_meta = '{}_gcn_feat'.format(name)
            opath_pred_labels = osp.join(
                oprefix, oname_meta, 'tau_{}_pred_labels.txt'.format(cfg.tau))
            mkdir_if_no_exists(opath_pred_labels)

            idx2lb = list2dict(pred_labels, ignore_value=-1)
            write_meta(opath_pred_labels, idx2lb, inst_num=inst_num)

        # evaluation
        if not dataset.ignore_label:
            print('==> evaluation')
            for metric in cfg.metrics:
                evaluate(dataset.gt_labels, pred_labels, metric)
            # H and C-scores
            gt_dict = {}
            pred_dict = {}
            for i in range(len(dataset.gt_labels)):
                gt_dict[str(i)] = dataset.gt_labels[i]
                pred_dict[str(i)] = pred_labels[i]
            bm = ClusteringBenchmark(gt_dict)
            scores = bm.evaluate_vmeasure(pred_dict)
            # fmi_scores = bm.evaluate_fowlkes_mallows_score(pred_dict)
            print(scores)
示例#46
0
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=20,
                                                   gamma=0.5)

    def save_model(name):
        torch.save(model.state_dict(), osp.join(args.save_path, name + '.pth'))

    trlog = {}
    trlog['args'] = vars(args)
    trlog['train_loss'] = []
    trlog['val_loss'] = []
    trlog['train_acc'] = []
    trlog['val_acc'] = []
    trlog['max_acc'] = 0.0

    timer = Timer()

    for epoch in range(1, args.max_epoch + 1):
        lr_scheduler.step()

        model.train()

        tl = Averager()
        ta = Averager()

        for i, batch in enumerate(train_loader, 1):
            data, _ = [_.cuda() for _ in batch]
            p = args.shot * args.train_way
            data_shot, data_query = data[:p], data[p:]

            proto = model(data_shot)
示例#47
0
def train(net):

    net.gt_vox = tf.placeholder(tf.float32, net.vox_tensor_shape)
    # Add loss to the graph
    net.loss = loss_ce(net.pred_vox, repeat_tensor(net.gt_vox, net.im_batch))

    _t_dbg = Timer()

    # Add optimizer
    global_step = tf.Variable(0, trainable=False, name='global_step')
    decay_lr = tf.train.exponential_decay(args.lr,
                                          global_step,
                                          args.decay_steps,
                                          args.decay_rate,
                                          staircase=True)
    lr_sum = tf.summary.scalar('lr', decay_lr)
    optim = tf.train.AdamOptimizer(decay_lr).minimize(net.loss, global_step)
    init_op = tf.global_variables_initializer()
    saver = tf.train.Saver()

    # Add summaries for training
    net.loss_sum = tf.summary.scalar('loss', net.loss)
    net.vox_sum = voxel_sum(net)
    net.im_sum = image_sum(net.ims, net.batch_size, net.im_batch)
    merged = tf.summary.merge([net.vox_sum, net.im_sum])
    merged_scalars = tf.summary.merge([net.loss_sum, lr_sum])

    # Initialize dataset
    coord = tf.train.Coordinator()
    dset = ShapeNet(im_dir=im_dir,
                    split_file=args.split_file,
                    rng_seed=args.rng_seed,
                    vox_dir=vox_dir)
    mids = dset.get_smids('train')
    logger.info('Training with %d models', len(mids))

    items = ['im', 'K', 'R', 'vol']

    dset.init_queue(mids,
                    net.im_batch,
                    items,
                    coord,
                    qsize=32,
                    nthreads=args.prefetch_threads)

    iters = 0
    # Training loop
    pbar = tqdm(desc='Training Vox-LSM', total=args.niters)
    with tf.Session(config=get_session_config()) as sess:
        sum_writer = tf.summary.FileWriter(log_dir, sess.graph)
        if args.ckpt is not None:
            logger.info('Restoring from %s', args.ckpt)
            saver.restore(sess, args.ckpt)
        else:
            sess.run(init_op)

        try:
            while True:
                iters += 1
                _t_dbg.tic()
                batch_data = dset.next_batch(items, net.batch_size)
                logging.debug('Data read time - %.3fs', _t_dbg.toc())
                feed_dict = {
                    net.K: batch_data['K'],
                    net.Rcam: batch_data['R'],
                    net.gt_vox: batch_data['vol']
                }
                feed_dict[net.ims] = batch_data['im']

                _t_dbg.tic()
                if args.run_trace and (iters % args.sum_iters == 0
                                       or iters == 1 or iters == args.niters):
                    run_options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                    step_, _, merged_scalars_ = sess.run(
                        [global_step, optim, merged_scalars],
                        feed_dict=feed_dict,
                        options=run_options,
                        run_metadata=run_metadata)
                    sum_writer.add_run_metadata(run_metadata, 'step%d' % step_)
                else:
                    step_, _, merged_scalars_ = sess.run(
                        [global_step, optim, merged_scalars],
                        feed_dict=feed_dict)

                logging.debug('Net time - %.3fs', _t_dbg.toc())

                sum_writer.add_summary(merged_scalars_, step_)
                if iters % args.sum_iters == 0 or iters == 1 or iters == args.niters:
                    image_sum_, step_ = sess.run([merged, global_step],
                                                 feed_dict=feed_dict)
                    sum_writer.add_summary(image_sum_, step_)

                if iters % args.ckpt_iters == 0 or iters == args.niters:
                    save_f = saver.save(sess,
                                        osp.join(log_dir, 'mvnet'),
                                        global_step=global_step)
                    logger.info(' Model checkpoint - {:s} '.format(save_f))

                pbar.update(1)
                if iters >= args.niters:
                    break
        except Exception, e:
            logging.error(repr(e))
            dset.close_queue(e)
        finally:
示例#48
0
 def __init__(self, parent=None):
     self.timer = Timer()
     if parent:
         self.__dict__.update(parent.__dict__)
示例#49
0
class Behavior:
    """ This class handles the behavior variables """
    def __init__(self, logger, params):
        self.params = params
        self.resp_timer = Timer()
        self.resp_timer.start()
        self.logger = logger
        self.rew_probe = 0
        self.choices = np.array(np.empty(0))
        self.choice_history = list()  # History term for bias calculation
        self.reward_history = list()  # History term for performance calculation
        self.licked_probe = 0
        self.reward_amount = dict()
        self.curr_cond = []

    def is_ready(self, init_duration, since=0):
        return True, 0

    def get_response(self, since=0):
        return False

    def get_cond_tables(self):
        return []

    def reward(self):
        return True

    def punish(self):
        pass

    def give_odor(self, delivery_idx, odor_idx, odor_dur, odor_dutycycle):
        pass

    def cleanup(self):
        pass

    def get_cond_tables(self):
        return []

    def prepare(self, condition):
        pass

    def update_history(self, choice=np.nan, reward=np.nan):
        self.choice_history.append(choice)
        self.reward_history.append(reward)
        self.logger.total_reward = np.nansum(self.reward_history)

    def get_false_history(self, h=10):
        idx = np.logical_and(np.isnan(self.reward_history), ~np.isnan(self.choice_history))
        return np.sum(np.cumprod(np.flip(idx[-h:])))

    def is_sleep_time(self):
        now = datetime.now()
        start = now.replace(hour=0, minute=0, second=0) + self.logger.setup_info['start_time']
        stop = now.replace(hour=0, minute=0, second=0) + self.logger.setup_info['stop_time']
        if stop < start:
            stop = stop + timedelta(days=1)
        time_restriction = now < start or now > stop
        return time_restriction

    def is_hydrated(self, rew=False):
        if rew:
            return self.logger.total_reward >= rew
        elif self.params['max_reward']:
            return self.logger.total_reward >= self.params['max_reward']
        else:
            return False
示例#50
0
from utils import Timer
from utils import Vec2
from utils import pi
import math

DAY=25
PART='a'

print("###############################")
print ("Running solution for day {d} part {p}".format(d=DAY, p=PART))
print("###############################")

timer = Timer()

# Write your code here

lines = []
result = 0

def get_the_bread():
    print(lines)

def transform_subject(subj, loop_size):
    val = 1
    for i in range(loop_size):
        val *= subj
        val %= 20201227
    return val

def get_loop_size(subj, pub_key):
    val = 1
示例#51
0
def main():
    timer = Timer()
    args, writer = init()

    train_file = args.dataset_dir + 'train.json'
    val_file = args.dataset_dir + 'val.json'

    few_shot_params = dict(n_way=args.n_way, n_support=args.n_shot, n_query=args.n_query)
    n_episode = 10 if args.debug else 100
    if args.method_type is Method_type.baseline:
        train_datamgr = SimpleDataManager(train_file, args.dataset_dir, args.image_size, batch_size=64)
        train_loader = train_datamgr.get_data_loader(aug = True)
    else:
        train_datamgr = SetDataManager(train_file, args.dataset_dir, args.image_size,
                                       n_episode=n_episode, mode='train', **few_shot_params)
        train_loader = train_datamgr.get_data_loader(aug=True)

    val_datamgr = SetDataManager(val_file, args.dataset_dir, args.image_size,
                                     n_episode=n_episode, mode='val', **few_shot_params)
    val_loader = val_datamgr.get_data_loader(aug=False)

    if args.model_type is Model_type.ConvNet:
        pass
    elif args.model_type is Model_type.ResNet12:
        from methods.backbone import ResNet12
        encoder = ResNet12()
    else:
        raise ValueError('')

    if args.method_type is Method_type.baseline:
        from methods.baselinetrain import BaselineTrain
        model = BaselineTrain(encoder, args)
    elif args.method_type is Method_type.protonet:
        from methods.protonet import ProtoNet
        model = ProtoNet(encoder, args)
    else:
        raise ValueError('')

    from torch.optim import SGD,lr_scheduler
    if args.method_type is Method_type.baseline:
        optimizer = SGD(model.encoder.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay)
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.max_epoch, eta_min=0, last_epoch=-1)
    else:
        optimizer = torch.optim.SGD(model.encoder.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4,
                                    nesterov=True)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.5)

    args.ngpu = torch.cuda.device_count()
    torch.backends.cudnn.benchmark = True
    model = model.cuda()

    label = torch.from_numpy(np.repeat(range(args.n_way), args.n_query))
    label = label.cuda()

    if args.test:
        test(model, label, args, few_shot_params)
        return

    if args.resume:
        resume_OK =  resume_model(model, optimizer, args, scheduler)
    else:
        resume_OK = False
    if (not resume_OK) and  (args.warmup is not None):
        load_pretrained_weights(model, args)

    if args.debug:
        args.max_epoch = args.start_epoch + 1

    for epoch in range(args.start_epoch, args.max_epoch):
        train_one_epoch(model, optimizer, args, train_loader, label, writer, epoch)
        scheduler.step()

        vl, va = val(model, args, val_loader, label)
        if writer is not None:
            writer.add_scalar('data/val_acc', float(va), epoch)
        print('epoch {}, val, loss={:.4f} acc={:.4f}'.format(epoch, vl, va))

        if va >= args.max_acc:
            args.max_acc = va
            print('saving the best model! acc={:.4f}'.format(va))
            save_model(model, optimizer, args, epoch, args.max_acc, 'max_acc', scheduler)
        save_model(model, optimizer, args, epoch, args.max_acc, 'epoch-last', scheduler)
        if epoch != 0:
            print('ETA:{}/{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch)))
    if writer is not None:
        writer.close()
    test(model, label, args, few_shot_params)
示例#52
0
def main():
    t = Timer()
    seed_everything(cfg.common.seed)

    logger_path.mkdir(exist_ok=True)
    logging.basicConfig(filename=logger_path / 'train.log',
                        level=logging.DEBUG)

    dh.save(logger_path / 'config.yml', cfg)
    dh.save(logger_path / 'features.yml', features_params)

    with t.timer('load data'):
        train_x = factory.get_features(features, cfg.data.loader.train)
        test_x = factory.get_features(features, cfg.data.loader.test)
        train_y = factory.get_target(cfg.data.target)

    with t.timer('add oof'):
        if cfg.data.features.oof.name is not None:
            oof, preds = factory.get_oof(cfg)
            train_x['oof'] = oof
            test_x['oof'] = preds
            features.append('oof')

    with t.timer('make folds'):
        fold_df = factory.get_fold(cfg.validation, train_x, train_y)
        if cfg.validation.single:
            fold_df = fold_df[['fold_0']]
            fold_df /= fold_df['fold_0'].max()

    with t.timer('drop index'):
        if cfg.common.drop is not None:
            drop_idx = factory.get_drop_idx(cfg.common.drop)
            train_x = train_x.drop(drop_idx, axis=0).reset_index(drop=True)
            train_y = train_y.drop(drop_idx, axis=0).reset_index(drop=True)
            fold_df = fold_df.drop(drop_idx, axis=0).reset_index(drop=True)

    with t.timer('prepare for ad'):
        if cfg.data.adversarial_validation:
            train_x, train_y = factory.get_ad(cfg, train_x, test_x)

    with t.timer('train and predict'):
        trainer = Trainer(cfg)
        cv = trainer.train(train_df=train_x,
                           target_df=train_y,
                           fold_df=fold_df)
        preds = trainer.predict(test_x)
        trainer.save(run_name)

        run_name_cv = f'{run_name}_{cv:.3f}'
        logger_path.rename(f'../logs/{run_name_cv}')
        logging.disable(logging.FATAL)

    with t.timer('make submission'):
        sample_path = f'../data/input/sample_submission.feather'
        output_path = f'../data/output/{run_name_cv}.csv'
        make_submission(y_pred=preds,
                        target_name=cfg.data.target.name,
                        sample_path=sample_path,
                        output_path=output_path,
                        comp=False)
        if cfg.common.kaggle.submit:
            kaggle = Kaggle(cfg.compe.name, run_name_cv)
            kaggle.submit(comment)

    with t.timer('notify'):
        process_minutes = t.get_processing_time()
        message = f'''{cfg.model.name}\ncv: {cv:.3f}\ntime: {process_minutes}[min]'''
        send_line(notify_params.line.token, message)

        notion = Notion(token=notify_params.notion.token_v2)
        notion.set_url(url=notify_params.notion.url)
        notion.insert_rows({
            'name': run_name_cv,
            'created': now,
            'model': options.model,
            'local_cv': round(cv, 4),
            'time': process_minutes,
            'comment': comment
        })
class Trainer():
    def __init__(self):
        self._image_size = [28, 28]
        self._batch_size = 100
        self._num_classes = 10
        self._learning_rate = 1e-3
        self._max_epoch = 100
        self._out_dir = "model_unstable"
        self._log_dir = "log_unstable"
        self._result_dir = "produce_unstable.npy"
        self._ckpt_path = "model_stable/model_88_0.9293.ckpt"
        self._train_x = np.transpose(
            np.load("data/test_x.npy")[:5000, :, :, :], (0, 2, 3, 1))
        self._train_y = np.load("data/test_y.npy")[:5000]
        self._test_x = np.transpose(
            np.load("data/test_x.npy")[5000:, :, :, :], (0, 2, 3, 1))
        self._test_y = np.load("data/test_y.npy")[5000:]
        self._num_samples = len(self._train_y)
        self._test_nums = len(self._test_y)
        self._max_iter = self._num_samples // self._batch_size
        self.network = Model(self._image_size, self._batch_size,
                             self._num_classes)
        self.timer = Timer()

    def shuffle_data(self, images, labels):
        train_x = images.copy()
        train_y = labels.copy()
        state = np.random.get_state()
        np.random.shuffle(train_x)
        np.random.set_state(state)
        np.random.shuffle(train_y)
        return train_x, train_y

    def generate_blobs(self, x, y, start, end):
        blobs = {}
        images = x[start:end, :, :, :]
        labels = y[start:end]
        blobs["images"], blobs["labels"] = images, labels
        return blobs

    def fix_variables(self, sess):
        variables_to_restore = []
        for var in tf.compat.v1.global_variables():
            if "aux_layers" not in var.name:
                variables_to_restore.append(var)
        load_fn = slim.assign_from_checkpoint_fn(self._ckpt_path,
                                                 variables_to_restore,
                                                 ignore_missing_vars=True)
        load_fn(sess)
        print("Load Network: " + self._ckpt_path)

    def get_save_variables(self):
        variables_to_save = []
        for var in tf.compat.v1.global_variables():
            if "aux_layers" in var.name or "attention_layers" in var.name:
                variables_to_save.append(var)
        return variables_to_save

    def train(self):
        config = tf.compat.v1.ConfigProto()
        config.allow_soft_placement = True
        config.gpu_options.allow_growth = True
        with tf.compat.v1.Session(config=config) as sess:
            with tf.device("/cpu:0"):
                global_step = tf.Variable(0, trainable=False)
                learning_rate = tf.Variable(self._learning_rate,
                                            trainable=False)
            tf.compat.v1.set_random_seed(3)
            self.network.build_network(is_training=True,
                                       aux=True,
                                       aux_training=True)
            cross_entropy, regularizer_loss, norm, losses = self.network.add_loss(
            )
            tf.compat.v1.summary.scalar("cross_entropy", cross_entropy)
            tf.compat.v1.summary.scalar("regularizer_loss", regularizer_loss)
            train_op = tf.compat.v1.train.AdamOptimizer(
                learning_rate).minimize(cross_entropy + regularizer_loss,
                                        global_step=global_step)
            merged = tf.compat.v1.summary.merge_all()
            self.saver = tf.compat.v1.train.Saver(
                var_list=self.get_save_variables(), max_to_keep=5)
            summary_writer = tf.compat.v1.summary.FileWriter(
                self._log_dir, sess.graph)
            tf.compat.v1.global_variables_initializer().run()
            self.fix_variables(sess)
            sess.run([
                tf.compat.v1.assign(global_step, 0),
                tf.compat.v1.assign(learning_rate, self._learning_rate)
            ])
            best_acc, epoch_list, test_list = 0, [], []
            for epoch in range(self._max_epoch):
                start, end, iter = 0, self._batch_size, 1
                train_x, train_y = self.shuffle_data(self._train_x,
                                                     self._train_y)
                while iter <= self._max_iter:
                    self.timer.tic()
                    blobs = self.generate_blobs(train_x, train_y, start, end)
                    _cross_entropy, _regularizer_loss, _, _, step, summary = self.network.train_step(
                        sess, train_op, blobs, global_step, merged)
                    summary_writer.add_summary(summary, step)
                    self.timer.toc()
                    if iter % 25 == 0:
                        print(
                            ">>>Epoch: %d\n>>>Iter: %d\n>>>Cross_entropy: %.6f\n>>>Regularizer_loss: %.6f\n>>>Speed: %.6fs\n"
                            % (epoch + 1, iter, _cross_entropy,
                               _regularizer_loss, self.timer.average_time))
                    start = end
                    end = start + self._batch_size
                    iter += 1
                acc_mean = self.test_model(sess)
                if acc_mean > best_acc:
                    best_acc = acc_mean
                    if acc_mean > 0.9:
                        self.snapshot(sess, epoch + 1, acc_mean)
                epoch_list.append(epoch + 1)
                test_list.append(acc_mean)
            summary_writer.close()
            epoch_list, test_list = np.array(epoch_list), np.array(test_list)
            np.save(
                self._result_dir,
                np.concatenate(
                    (epoch_list[np.newaxis, ...], test_list[np.newaxis, ...]),
                    axis=0))
            plt.plot(epoch_list, test_list, color="green")
            plt.show()

    def test_model(self, sess):
        test_start, test_end, acc_sum = 0, self._batch_size, 0.
        while test_start < self._test_nums:
            images = self._test_x[test_start:test_end, :, :, :]
            labels = self._test_y[test_start:test_end]
            _, results = self.network.test_images(sess, images)
            acc_sum += np.sum((labels == results).astype(np.float32))
            test_start = test_end
            test_end = test_start + self._batch_size
        acc_mean = acc_sum / self._test_nums
        print("Accuracy of network in test set is %.6f.\n" % (acc_mean))
        return acc_mean

    def snapshot(self, sess, epoch, acc):
        network = self.network
        file_name = os.path.join(self._out_dir,
                                 "model_%d_%.4f.ckpt" % (epoch, acc))
        self.saver.save(sess, file_name)
        print("Wrote snapshot to: %s\n" % (file_name))
def load_coco_json(json_file,
                   image_root,
                   dataset_name=None,
                   extra_annotation_keys=None):
    """
    <ref: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/coco.py:72>
    Load a json file with COCO's instances annotation format.
    Currently supports instance detection, instance segmentation,
    and person keypoints annotations.
    Args:
        json_file (str): full path to the json file in COCO instances annotation format.
        image_root (str or path-like): the directory where the images in this json file exists.
        dataset_name (str): the name of the dataset (e.g., coco_2017_train).
            If provided, this function will also put "thing_classes" into
            the metadata associated with this dataset.
        extra_annotation_keys (list[str]): list of per-annotation keys that should also be
            loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
            "category_id", "segmentation"). The values for these keys will be returned as-is.
            For example, the densepose annotations are loaded in this way.
    Returns:
        list[dict]: a list of dicts in Detectron2 standard dataset dicts format. (See
        `Using Custom Datasets </tutorials/datasets.html>`_ )
    Notes:
        1. This function does not read the image files.
           The results do not have the "image" field.
    """
    from pycocotools.coco import COCO
    timer = Timer()
    json_file = Path(json_file)
    image_root = Path(image_root)
    assert json_file.is_file() and image_root.is_dir()
    with contextlib.redirect_stdout(io.StringIO()):
        coco_api = COCO(json_file)
    if timer.seconds() > 1:
        logger.info("Loading {} with pycocotools takes {:.2f} seconds.".format(
            json_file, timer.seconds()))

    # ----- Load COCO Categories ----- #
    cat_ids = sorted(coco_api.getCatIds())
    cats = coco_api.loadCats(cat_ids)

    if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
        if "coco" not in dataset_name:
            logger.warning("""
                Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
                """)
    id_map = {v: i for i, v in enumerate(cat_ids)}

    # sort indices for reproducible results
    img_ids = sorted(coco_api.imgs.keys())

    # ---- Load COCO Images & Annotations ---- #
    imgs = coco_api.loadImgs(img_ids)
    anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
    total_num_valid_anns = sum([len(x) for x in anns])
    total_num_anns = len(coco_api.anns)
    if total_num_valid_anns < total_num_anns:
        logger.warning(
            f"{json_file} contains {total_num_anns} annotations, but only "
            f"{total_num_valid_anns} of them match to images in the file.")

    if "minival" not in json_file.name:
        # The popular valminusminival & minival annotations for COCO2014 contain this bug.
        # However the ratio of buggy annotations there is tiny and does not affect accuracy.
        # Therefore we explicitly white-list them.
        ann_ids = [
            ann["id"] for anns_per_image in anns for ann in anns_per_image
        ]
        assert len(set(ann_ids)) == len(
            ann_ids), "Annotation ids in '{}' are not unique!".format(
                json_file)

    imgs_anns = list(zip(imgs, anns))
    logger.info("Loaded {} images in COCO format from {}".format(
        len(imgs_anns), json_file))

    # ---- Generate Dataset Dict ---- #
    dataset_dicts = []
    ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"
                ] + (extra_annotation_keys or [])
    num_instances_without_valid_segmentation = 0

    for (img_dict, anno_dict_list) in tqdm(imgs_anns,
                                           desc="parsing coco annotations"):
        record = {}
        record["file_name"] = str(image_root / img_dict["file_name"])
        record["height"] = img_dict["height"]
        record["width"] = img_dict["width"]
        image_id = record["image_id"] = img_dict["id"]

        objs = []

        for anno in anno_dict_list:
            # Check that the image_id in this annotation is the same as
            # the image_id we're looking at.
            # This fails only when the data parsing logic or the annotation file is buggy.

            # The original COCO valminusminival2014 & minival2014 annotation files
            # actually contains bugs that, together with certain ways of using COCO API,
            # can trigger this assertion.
            assert anno["image_id"] == image_id
            assert anno.get(
                "ignore",
                0) == 0, '"ignore" in COCO json file is not supported.'
            obj = {key: anno[key] for key in ann_keys if key in anno}
            segm = anno.get("segmentation", None)
            if segm:  # either list[list[float]] or dict(RLE)
                if isinstance(segm, dict):  # RLE case
                    if isinstance(segm["counts"], list):
                        # convert to compressed RLE
                        segm = mask_util.frPyObjects(segm, *segm["size"])
                else:  # polygon case
                    # filter out invalid polygons (< 3 points)
                    segm = [
                        poly for poly in segm
                        if len(poly) % 2 == 0 and len(poly) >= 6
                    ]
                    if len(segm) == 0:
                        num_instances_without_valid_segmentation += 1
                        logger.warning(f"""
                            Invalid segmentation annotation found for image {anno['image_id']}.
                            """)
                        continue  # ignore this instance

                obj["segmentation"] = segm

            keypts = anno.get("keypoints", None)
            if keypts:  # list[int]
                for idx, v in enumerate(keypts):
                    if idx % 3 != 2:
                        # COCO's segmentation coordinates are floating points in [0, H or W],
                        # but keypoint coordinates are integers in [0, H-1 or W-1]
                        # Therefore we assume the coordinates are "pixel indices" and
                        # add 0.5 to convert to floating point coordinates.
                        keypts[idx] = v + 0.5
                obj["keypoints"] = keypts

            obj["bbox_mode"] = BoxMode.XYWH_ABS
            obj["category_id"] = id_map[obj["category_id"]]
            obj["category_info"] = cats[obj["category_id"]]
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)

    if num_instances_without_valid_segmentation > 0:
        logger.warning(
            "Filtered out {} instances without valid segmentation. ".format(
                num_instances_without_valid_segmentation) +
            "There might be issues in your dataset generation process. "
            "A valid polygon should be a list[float] with even length >= 6.")
    return dataset_dicts
示例#55
0
    criterion = MultiBoxLoss(num_classes, mutual_guide=args.mutual_guide)
    priorbox = PriorBox(args.base_anchor_size, args.size)
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.cuda()

    if args.trained_model is not None:
        print('loading weights from', args.trained_model)
        state_dict = torch.load(args.trained_model)
        model.load_state_dict(state_dict, strict=True)
    else:
        print('Training {} on {} with {} images'.format(
            args.version, dataset.name, len(dataset)))
        os.makedirs(args.save_folder, exist_ok=True)
        epoch = 0
        timer = Timer()
        for iteration in range(max_iter):
            if iteration % epoch_size == 0:

                # create batch iterator

                rand_loader = data.DataLoader(dataset,
                                              args.batch_size,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=detection_collate)
                batch_iterator = iter(rand_loader)
                epoch += 1

            timer.tic()
            adjust_learning_rate(optimizer, epoch, iteration, args.warm_iter,
示例#56
0
 def run_train(self, epochs, is_continue=False, patience=4):
     self.epochs = epochs
     timer = Timer()
     timer.start("Training model")
     self._train(epochs, is_continue=is_continue, patience=patience)
     timer.stop()
示例#57
0
                      **XGB_TRAIN_PARAMS)
    preds[val_idx] = model.predict(d_val)
    preds_test += model.predict(d_test)

    importance_fold = model.get_score(importance_type='weight')
    sum_imp = sum(importance_fold.values())
    for f, s in importance_fold.items():
        if f not in importance:
            importance[f] = 0
        importance[f] += s / sum_imp
    return preds, preds_test, importance


if __name__ == "__main__":

    t = Timer()
    with t.timer(f'fix seed RANDOM_STATE:{RANDOM_STATE}'):
        seed_everything(RANDOM_STATE)

    with t.timer(f'read label'):
        data_path = f'{INPUT_DIR}/train_data/train_task_1_2.csv'
        train = pd.read_csv(data_path)
        if TARGET_TASK == '1':
            y_train = train['IsCorrect'].values
        elif TARGET_TASK == '2':
            y_train = (train['AnswerValue'] - 1).values

    skip_fr = False
    if skip_fr is False:
        with t.timer(f'read features'):
示例#58
0
class Interface:
    def __init__(self, logger):
        self.logger = logger
        self.probe = 0
        self.lick_tmst = 0
        self.ready_tmst = 0
        self.ready_dur = 0
        self.ready = False
        self.logging = False
        self.timer_probe1 = Timer()
        self.timer_probe2 = Timer()
        self.timer_ready = Timer()
        self.thread = ThreadPoolExecutor(max_workers=2)
        self.probes = (LiquidCalibration()
                       & dict(setup=self.logger.setup)).fetch('probe')
        self.weight_per_pulse = dict()
        self.pulse_dur = dict()
        for probe in list(set(self.probes)):
            key = dict(setup=self.logger.setup, probe=probe)
            dates = (LiquidCalibration() & key).fetch('date', order_by='date')
            key['date'] = dates[-1]  # use the most recent calibration
            self.pulse_dur[probe], pulse_num, weight = \
                (LiquidCalibration.PulseWeight() & key).fetch('pulse_dur', 'pulse_num', 'weight')
            self.weight_per_pulse[probe] = np.divide(weight, pulse_num)

    def give_air(self, probe, duration, log=True):
        pass

    def give_liquid(self, probe, duration=False, log=True):
        pass

    def give_odor(self, odor_idx, duration, log=True):
        pass

    def give_sound(self, sound_freq, duration, dutycycle):
        pass

    def get_last_lick(self):
        probe = self.probe
        self.probe = 0
        return probe, self.lick_tmst

    def probe1_licked(self, channel):
        self.lick_tmst = self.logger.log('Lick', dict(
            probe=1
        )) if self.logging else self.logger.logger_timer.elapsed_time()
        self.timer_probe1.start()
        self.probe = 1

    def probe2_licked(self, channel):
        self.lick_tmst = self.logger.log('Lick', dict(
            probe=2
        )) if self.logging else self.logger.logger_timer.elapsed_time()
        self.timer_probe2.start()
        self.probe = 2

    def in_position(self):
        return True, 0

    def create_pulse(self, probe, duration):
        pass

    def calc_pulse_dur(
        self, reward_amount
    ):  # calculate pulse duration for the desired reward amount
        actual_rew = dict()
        for probe in list(set(self.probes)):
            duration = np.interp(reward_amount / 1000,
                                 self.weight_per_pulse[probe],
                                 self.pulse_dur[probe])
            self.create_pulse(probe, duration)
            actual_rew[probe] = np.max((np.min(self.weight_per_pulse[probe]),
                                        reward_amount / 1000)) * 1000  # in uL
        return actual_rew

    def cleanup(self):
        pass
示例#59
0
    def init_network(self,
                     graph,
                     input_tensors=None,
                     restore_iter=0,
                     prefix='Training_'):
        """ Helper method to initialize the tf networks used """
        with graph.as_default():
            with Timer('building TF network'):
                result = self.construct_model(
                    input_tensors=input_tensors,
                    prefix=prefix,
                    dim_input=self._dO,
                    dim_output=self._dU,
                    network_config=self.network_params)
            outputas, outputbs, test_output, lossesa, lossesb, final_eept_lossesb, flat_img_inputb, gradients = result
            if 'Testing' in prefix:
                self.obs_tensor = self.obsa
                self.state_tensor = self.statea
                self.test_act_op = test_output
                self.image_op = flat_img_inputb

            trainable_vars = tf.trainable_variables()
            total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(
                self.meta_batch_size)
            total_losses2 = [
                tf.reduce_sum(lossesb[j]) / tf.to_float(self.meta_batch_size)
                for j in range(self.num_updates)
            ]
            total_final_eept_losses2 = [
                tf.reduce_sum(final_eept_lossesb[j]) /
                tf.to_float(self.meta_batch_size)
                for j in range(self.num_updates)
            ]

            if 'Training' in prefix:
                self.total_loss1 = total_loss1
                self.total_losses2 = total_losses2
                self.total_final_eept_losses2 = total_final_eept_losses2
            elif 'Validation' in prefix:
                self.val_total_loss1 = total_loss1
                self.val_total_losses2 = total_losses2
                self.val_total_final_eept_losses2 = total_final_eept_losses2

            if 'Training' in prefix:
                self.train_op = tf.train.AdamOptimizer(self.meta_lr).minimize(
                    self.total_losses2[self.num_updates - 1])
                # Add summaries
                summ = [
                    tf.summary.scalar(prefix + 'Pre-update_loss',
                                      self.total_loss1)
                ]
                for j in xrange(self.num_updates):
                    summ.append(
                        tf.summary.scalar(
                            prefix + 'Post-update_loss_step_%d' % j,
                            self.total_losses2[j]))
                    summ.append(
                        tf.summary.scalar(
                            prefix + 'Post-update_final_eept_loss_step_%d' % j,
                            self.total_final_eept_losses2[j]))
                    for k in xrange(len(self.sorted_weight_keys)):
                        summ.append(
                            tf.summary.histogram(
                                'Gradient_of_%s_step_%d' %
                                (self.sorted_weight_keys[k], j),
                                gradients[j][k]))
                self.train_summ_op = tf.summary.merge(summ)
            elif 'Validation' in prefix:
                # Add summaries
                summ = [
                    tf.summary.scalar(prefix + 'Pre-update_loss',
                                      self.val_total_loss1)
                ]
                for j in xrange(self.num_updates):
                    summ.append(
                        tf.summary.scalar(
                            prefix + 'Post-update_loss_step_%d' % j,
                            self.val_total_losses2[j]))
                    summ.append(
                        tf.summary.scalar(
                            prefix + 'Post-update_final_eept_loss_step_%d' % j,
                            self.val_total_final_eept_losses2[j]))
                self.val_summ_op = tf.summary.merge(summ)
示例#60
0
    """
    print(formatting(com.rank, format_action('receive', rows=rws)))

    inv = gauss(np.array(rws, dtype=np.float64), com, n)

    print(formatting(com.rank, format_action('inverse', rows=inv)))

    return inv


comm = MPI.COMM_WORLD
master = 0
A, b, rows = [], [], []

if comm.rank == master:
    t = Timer('TOTAL')
    n = int(inp.readline())

    for line in inp:
        A.append(str_to_row(line))
        b.append(int(line[-2:-1]))

    a = np.array(A, dtype=np.float64)
    det = la.det(np.array(A))

    t_inv = Timer('inversion')

    if not det:
        print(
            formatting(
                comm.rank,