Example #1
0
	def onFinishRace( self, event ):
		if Model.race is None or not Utils.MessageOKCancel(self, _('Finish Race Now?'), _('Finish Race')):
			return
			
		with Model.LockRace() as race:
			race.finishRaceNow()
			if race.numLaps is None:
				race.numLaps = race.getMaxLap()
			SetNoDataDNS()
			Model.resetCache()
		
		Utils.writeRace()
		self.refresh()
		mainWin = Utils.getMainWin()
		if mainWin:
			mainWin.refresh()
		
		OutputStreamer.writeRaceFinish()
		OutputStreamer.StopStreamer()
		try:
			ChipReader.chipReaderCur.StopListener()
		except:
			pass

		if getattr(Model.race, 'ftpUploadDuringRace', False):
			realTimeFtpPublish.publishEntry( True )
Example #2
0
	def setState( self ):
		if self.iUndo is None:
			return
		with Model.LockRace() as race:
			raceNew = pickle.loads( self.undoStack[self.iUndo] )
			Model.setRace( raceNew )
		updateUndoStatus()
Example #3
0
	def doNumSelect( self, event ):
		grid = event.GetEventObject()
		self.iLap = None
		
		if self.isEmpty:
			return
		row, col = event.GetRow(), event.GetCol()
		self.iRow, self.iCol = row, col
		if row >= self.labelGrid.GetNumberRows():
			return
			
		if grid == self.lapGrid and self.lapGrid.GetCellValue(row, col):
			try:
				colName = self.lapGrid.GetColLabelValue( col )
				self.iLap = int( reLapMatch.match(colName).group(1) )
			except:
				pass
		
		value = self.labelGrid.GetCellValue( row, 1 )
		numSelect = value if value else None
		if self.numSelect != numSelect:
			self.numSelect = numSelect
			self.showNumSelect()
		mainWin = Utils.getMainWin()
		if mainWin:
			historyCategoryChoice = mainWin.history.categoryChoice
			historyCat = FixCategories( historyCategoryChoice )
			if historyCat is not None:
				cat = FixCategories( self.categoryChoice )
				if historyCat != cat:
					Model.setCategoryChoice( self.categoryChoice.GetSelection(), 'resultsCategory' )
					SetCategory( historyCategoryChoice, cat )
			mainWin.setNumSelect( numSelect )
Example #4
0
def ChangeProperties( parent ):
	propertiesDialog = PropertiesDialog( parent, -1, _("Change Properties"), showFileFields = False, refreshProperties = True, size=(600,400) )
	propertiesDialog.properties.setEditable( True )
	try:
		if propertiesDialog.ShowModal() != wx.ID_OK:
			raise NameError('User Cancel')
		mainWin = Utils.getMainWin()
		dir = os.path.dirname( mainWin.fileName )
		
		newBaseName = propertiesDialog.properties.getFileName()
		newFName = os.path.join( dir, newBaseName )
		
		if newFName != mainWin.fileName:
			if Utils.MessageOKCancel(parent, _("The filename will be changed to:\n\n{}\n\nContinue?").format(newBaseName), _("Change Filename?")):
				if os.path.exists(newFName):
					if not Utils.MessageOKCancel(parent, _("This file already exists:\n\n{}\n\nOverwrite?").format(newFName), _("Overwrite Existing File?")):
						raise NameError('User Cancel')
					
		propertiesDialog.properties.update()
		mainWin.fileName = newFName
		Model.resetCache()
		mainWin.writeRace()
		Utils.refresh()
		wx.CallAfter( Utils.refreshForecastHistory )
			
	except (NameError, AttributeError, TypeError):
		pass
	
	propertiesDialog.Destroy()
Example #5
0
def getNav(postion,all=False):
    '''通过位置postion,返回一个元组(当前位置的记录,当前位置后代的记录,词条级别与其位置)
           当 all=False 当前位置后代 不包含 更改版本
            词条级别与其位置的映射 是指 位置root/a/b 生成的有序列表[('root','root'),('root/a','root'),('root/a/b','root')])
    '''
    articles=Model.wiki_article_get(postion=postion,using='yes')
    childrens,postions=Model.wiki_article_get_all_childrens_postion(postion,all,)
    #遍历列表,获取其中的字典元素,生成列表lis
    lis=[]
    def g(li):
        if type(li)==list:
            for i in li:
                g(i)
        elif type(li)==dict:
            lis.append(li)
        else:
            pass
    g(childrens)
    #形成文章级别的各个连接对应关系,并形成有序元组的列表
    i2,count,pos=0,postion.count('/'),{'root':'root'}
    while count :
        i1=postion.find('/',i2)
        i2=postion.find('/',i1+1)
        if i2==-1:i2=None
        pos.update({postion[:i2]:postion[i1+1:i2]})
        count-=1  
    temp=sorted(pos.items(),key=lambda e:e[0],reverse =False)
    pos=temp
    return articles,lis,pos   
Example #6
0
	def handleConstantDown(self,key):
		if key == 'e' and Model.camove.count("forward") == 0:
			Model.getGame().getPlayer().running()
			Model.camove.append("forward")
		elif key == 'd' and Model.camove.count("backward") == 0:
			Model.camove.append("backward")
		elif key == 'w' and Model.camove.count("sleft") == 0:
			Model.camove.append("sleft")
		elif key == 'r' and Model.camove.count("sright") == 0:
			Model.camove.append("sright")
		elif key == 'f' and Model.camove.count("tright") == 0:
			Model.camove.append("tright")
		elif key == 's' and Model.camove.count("tleft") == 0:
			Model.camove.append("tleft")
		elif key == '5' and Model.camove.count("cameraright") == 0:
			Model.camove.append("cameraright")
		elif key == '1' and Model.camove.count("cameraleft") == 0:
			Model.camove.append("cameraleft")
		elif key == 'x' and Model.camove.count("zoomout") == 0:
			Model.camove.append("zoomout")
		elif key == 'v' and Model.camove.count("zoomin") == 0:
			Model.camove.append("zoomin")
		elif key == '4' and Model.camove.count("cameradown") == 0:
			Model.camove.append("cameradown")
		elif key == '2' and Model.camove.count("cameraup") == 0:
			Model.camove.append("cameraup")
Example #7
0
def StartRaceNow():
	global undoResetTimer
	if undoResetTimer and undoResetTimer.IsRunning():
		undoResetTimer.Stop()
	undoResetTimer = None
	JChip.reset()
	
	undo.clear()
	undo.pushState()
	with Model.LockRace() as race:
		if race is None:
			return
		
		if not getattr(race, 'enableJChipIntegration', False):
			race.resetStartClockOnFirstTag = False
		Model.resetCache()
		race.startRaceNow()
		
	OutputStreamer.writeRaceStart()
	VideoBuffer.ModelStartCamera()
	
	# Refresh the main window and switch to the Record pane.
	mainWin = Utils.getMainWin()
	if mainWin is not None:
		mainWin.showPageName( _('Record') )
		mainWin.refresh()
	
	# For safety, clear the undo stack after 8 seconds.
	undoResetTimer = wx.CallLater( 8000, undo.clear )
	
	if getattr(race, 'ftpUploadDuringRace', False):
		realTimeFtpPublish.publishEntry( True )
Example #8
0
    def _combine_hosts(self):
        ct = ConfiguredTest()
        ct.resources = Resources.resources.resources()
        ct.hosts = {}
        ct.end_policy = Schedule.get_schedule().test_end_policy()
        ct.setup_phase_delay = Schedule.get_schedule().setup_phase_delay()
        ct.triggers = Schedule.get_schedule().triggers()

        for h in Model.get_model().hosts():
            host = ConfiguredHost()
            host.model = h
            host.device = h.bound()
            host.schedule = Schedule.get_schedule().host_schedule(h['name'])

            resources = set(h.needed_resources())
            for event in host.schedule:
                resources.update(event.command().needed_resources())

            def resolve_resource(r):
                if isinstance(r, str):
                    return Utils.resolve_resource_name(r)
                return r

            host.resources = set(map(resolve_resource, resources))

            ct.hosts[h['name']] = host

        ct.sanity_check()

        ct.model = Model.get_model()
        ct.laboratory = Laboratory.get_laboratory()
        ct.schedule = Schedule.get_schedule()
        ct.mapping = Mapping.get_mapping()

        self._configured_test = ct
Example #9
0
def move(forward,left):
	if forward != 0 and left != 0:
		forward = forward/math.sqrt(2)
		left = left/math.sqrt(2)

	player = Model.getGame().getPlayer()
	camera = Model.getCamera()

	fx = -math.cos(player.horizon)
	fy = math.sin(player.horizon)
	lx = math.cos(player.horizon+(math.pi/2))
	ly = -math.sin(player.horizon+(math.pi/2))

	player.dX(-forward*fx)
	camera.location[0]-=forward*fx
	camera.lookAt[0]-=forward*fx
	player.dY(-forward*fy)
	camera.location[2]-=forward*fy
	camera.lookAt[2]-=forward*fy

	player.dX(left*lx)
	camera.location[0]+=left*lx
	camera.lookAt[0]+=left*lx
	player.dY(left*ly)
	camera.location[2]+=left*ly
	camera.lookAt[2]+=left*ly
 def addPersonalityTypes(self, agents):
     """
     Stores all of the needed data for each personality.
     
     agents: list of Person objects
     """
     
     if len(self.acceptedTypes) == 0 or (VType.personalityGraphs in self.acceptedTypes):
         for agent in agents:
             self.personalityPostsSent[agent.p_type - 1] += agent.posts_sent
             self.personalityConnections[agent.p_type - 1] += len(agent.affinity_map)
             self.personalityFriends[agent.p_type - 1] += len(agent.friends)
             self.personalityEnemies[agent.p_type - 1] += len(agent.enemies)
             
             friendsDistance = 0.0
             for friend in agent.friends:
                 friendsDistance += M.find_distance(agent, friend)
                 
             enemiesDistance = 0.0
             for enemy in agent.enemies:
                 enemiesDistance += M.find_distance(agent, enemy)
             
             if len(agent.friends) != 0:
                 self.personalityFriendsDistance[agent.p_type - 1] += friendsDistance / len(agent.friends)
             if len(agent.enemies) != 0:
                 self.personalityEnemiesDistance[agent.p_type - 1] += enemiesDistance / len(agent.enemies)
             
         self.personalityConnections = (N.array(self.personalityConnections) / N.array(P.categoryNumOfPeople(agents))).tolist()
         self.personalityFriends = (N.array(self.personalityFriends) / N.array(P.categoryNumOfPeople(agents))).tolist()
         self.personalityEnemies = (N.array(self.personalityEnemies) / N.array(P.categoryNumOfPeople(agents))).tolist()
         self.personalityFriendsDistance = (N.array(self.personalityFriendsDistance) / N.array(P.categoryNumOfPeople(agents))).tolist()
         self.personalityEnemiesDistance = (N.array(self.personalityEnemiesDistance) / N.array(P.categoryNumOfPeople(agents))).tolist()
         self.updatePersonalityGraphs()
Example #11
0
def findFreeRoom(building,floor=None,stime=None,etime=None):
  print "Got free room request in: ",building, " ", floor, " from: ",stime, " to: ", etime

  if stime == None:
    stime = float(datetime.datetime.now().timetuple()[3])
  if etime == None:
    etime = 19.0

  if stime >= 19.0:
    etime == 24.0

  if floor == None:
    try:
      b = Model.findBuilding(building)
      r = b.getAllRooms()
    except:
      raise
  else:
    try:
      f = Model.findFloor(building,floor)
      r = f.getAllRooms()
    except:
      raise
  rooms = filter(lambda x: isAllocateableRoom(x),r)
  ret = []
  for room in rooms:
#    print "Reading location from", room.building, " ", room.floor, " ", room.number 
    if ETHReadRoomAllocation.isRoomFree(room,stime,etime):
      ret.append(room.getDetailedInfo())

  return ret
Example #12
0
	def handleUpKey(self,key,x,y):
		if key == 'e' and Model.camove.count("forward") > 0:
			Model.getGame().getPlayer().standing()
			Model.camove.remove("forward")
		elif key == 'd' and Model.camove.count("backward") > 0:
			Model.camove.remove("backward")
		elif key == 'w' and Model.camove.count("sleft") > 0:
			Model.camove.remove("sleft")
		elif key == 'r' and Model.camove.count("sright") > 0:
			Model.camove.remove("sright")
		elif key == 'f' and Model.camove.count("tright") > 0:
			Model.camove.remove("tright")
		elif key == 's' and Model.camove.count("tleft") > 0:
			Model.camove.remove("tleft")
		elif key == '5' and Model.camove.count("cameraright") > 0:
			Model.camove.remove("cameraright")
		elif key == '1' and Model.camove.count("cameraleft") > 0:
			Model.camove.remove("cameraleft")
		elif key == 'x' and Model.camove.count("zoomout") > 0:
			Model.camove.remove("zoomout")
		elif key == 'v' and Model.camove.count("zoomin") > 0:
			Model.camove.remove("zoomin")
		elif key == '4' and Model.camove.count("cameradown") > 0:
			Model.camove.remove("cameradown")
		elif key == '2' and Model.camove.count("cameraup") > 0:
			Model.camove.remove("cameraup")
Example #13
0
	def setCategory( self, category ):
		for i, c in enumerate(Model.race.getCategories( startWaveOnly=False ) if Model.race else [], 1):
			if c == category:
				SetCategory( self.categoryChoice, c )
				Model.setCategoryChoice( i, 'resultsCategory' )
				return
		SetCategory( self.categoryChoice, None )
		Model.setCategoryChoice( 0, 'resultsCategory' )
Example #14
0
 def POST(self):
     SName=self.SName
     kw=self.getAttrDict('TFalg','TUid','TTitle','TContents','TScore','TTime')
     kw['TId']=self.getId()
     kw['TRecommend']=False
     kw['TClickCount']=0
     Model.perInfoCreditEdit(self.TUid,int("-"+self.TScore))
     rowNum=Model.TAdd(SName,**kw)
     return self.resAjax(rowNum,kw['TId'])
Example #15
0
def draw():
	Util.setup2D()

	glColor(.1,.1,.5)
	glRectf(0,Model.getWinfo().h/2,Model.getWinfo().w,Model.getWinfo().h)
	glColor(.1,.5,.1)
	glRectf(0,0,Model.getWinfo().w,Model.getWinfo().h/2)

	Util.finish2D()
Example #16
0
 def GET(self,category):
     if category=='baseInfor':
         res=Model.admin_meeting_Qurery(category=category)
     elif category=='participants':
         res=Model.admin_meeting_Qurery(meetingID=web.input()['_id']) #参加同一个会议的人员信息的meetingID值相同等于会议记录的_id
     if res:
         count=len(res)
         t = '{' + 'data:' + json.dumps(res, cls=object_encoder) + ',total:'+str(count)+'}'
         return t
Example #17
0
 def GET(self):
     _id=web.input(id='-1')['_id']
     record=Model.wiki_article_get(_id=_id)
     if getNav(record[0]['postion'])[1]: #此次条下不能有文章
         return response(False,'删除失败')
     if Model.wiki_del(_id):
         return response(True,'删除成功')
     else:
         return response(False,'删除失败')
Example #18
0
	def handleKey(self,key,x,y):
		if key == 'q':
			sys.exit(1)	
		elif key == 'v':
			pass
		elif key == 'c':
			pass
		elif key == 'g':
			Model.changeView("maingame")
def delete_all(request):
    if str(request.path).__contains__('apps'):
        result = Model.delete_app()
    elif str(request.path).__contains__('policies'):
        result = Model.delete_policy()
    else:
        result = Model.delete_cron()
    if result:
        return web.Response(status=200,body='OK'.encode('utf-8'))
    return web.Response(status=200,body='Error!'.encode('utf-8'))
Example #20
0
def circle(horizontal,vertical,distance,x,y):
	r = 100
	height = Model.getGame().getPlayer().z + Model.getGame().getPlayer().physical.staticHeight
	lx = -math.cos(horizontal)*distance+x
	ly = math.sin(horizontal)*distance+y
	lz = math.sin(vertical)*distance*2+height
	cx = math.cos(horizontal)*r+x
	cy = -math.sin(horizontal)*r+y
	cz = -math.sin(vertical)*r*2+height
	return [lx,lz,ly,cx,cz,cy]
def delete(request):
    if str(request.path).__contains__('apps'):
        result = Model.delete_app(request.match_info.get('name'))
    elif str(request.path).__contains__('policies'):
        result = Model.delete_policy(request.match_info.get('policy_uuid'))
    else:
        result = Model.delete_cron(request.match_info.get('cron_uuid'))
    if result:
        return web.Response(status=200,body='OK'.encode('utf-8'))
    return web.Response(status=200,body='Error!'.encode('utf-8'))
Example #22
0
	def commit( self ):
		success = SetNewFilename( self, self )
		self.doCommit()
		Model.resetCache()
		mainWin = Utils.getMainWin()
		if mainWin:
			wx.CallAfter( mainWin.writeRace, False )
		wx.CallAfter( Utils.refreshForecastHistory )
		if not success and mainWin:
			wx.CallAfter( mainWin.showPageName, _("Properties") )
Example #23
0
def updateCamera():
	camera = Model.getCamera()
	player = Model.getGame().getPlayer()

	locs = circle(camera.horizon,camera.vertical,camera.distance,player.getX(),player.getY())
	camera.location[0] = locs[0]
	camera.location[1] = locs[1]
	camera.location[2] = locs[2]
	camera.lookAt[0]=locs[3]
	camera.lookAt[1] = locs[4]
	camera.lookAt[2]=locs[5]
Example #24
0
def closeday():
        #Close the program and aggregate values in the list. Call get functions from model to do this
		ReportCall1()
		CRM()
		printCloseSales()
		custID = view.inputCustID()
		model.setCustID(CustID)
		model.setCustomerNAME(custID,name)
		model.setSKU(custID, view.inputSKU())
		model.setCC(custID,view.inputCC())
		model.setSales(custID, view.inputSales())
Example #25
0
def GetResults( category ):
	# If the spreadsheet changed, clear the cache to update the results with new data.
	try:
		excelLink = Model.race.excelLink
		externalInfo = excelLink.read()
		if excelLink.readFromFile:
			Model.resetCache()
	except Exception as e:
		pass
		
	return GetResultsWithData( category )
Example #26
0
def ExtractRaceResultsCrossMgr( raceInSeries ):
	fileName = raceInSeries.fileName
	try:
		with open(fileName, 'rb') as fp, Model.LockRace() as race:
			race = pickle.load( fp )
			isFinished = race.isFinished()
			race.tagNums = None
			race.resetAllCaches()
			Model.setRace( race )
		
		ResetExcelLinkCache()
		Model.resetCache()

	except IOError as e:
		return False, e, []
		
	raceURL = getattr( race, 'urlFull', None )
	raceResults = []
	for category in race.getCategories( startWaveOnly = False ):
		if not category.seriesFlag:
			continue
		
		results = GetResults( category, True )
		for rr in results:
			if rr.status != Model.Rider.Finisher:
				continue
			info = {
				'raceURL':		raceURL,
				'raceInSeries':	raceInSeries,
			}
			for fTo, fFrom in [('firstName', 'FirstName'), ('lastName', 'LastName'), ('license', 'License'), ('team', 'Team')]:
				info[fTo] = getattr(rr, fFrom, '')
			info['categoryName'] = category.fullname
			
			for fTo, fFrom in [('raceName', 'name'), ('raceOrganizer', 'organizer')]:
				info[fTo] = getattr(race, fFrom, '')
			info['raceFileName'] = fileName
			if race.startTime:
				info['raceDate'] = race.startTime
			else:
				try:
					d = race.date.replace('-', ' ').replace('/', ' ')
					fields = [int(v) for v in d.split()] + [int(v) for v in race.scheduledStart.split(':')]
					info['raceDate'] = datetime.datetime( *fields )
				except:
					info['raceDate'] = None
			
			info['bib'] = int(rr.num)
			info['rank'] = int(rr.pos)
			raceResults.append( RaceResult(**info) )
		
	Model.race = None
	return True, 'success', raceResults
Example #27
0
def setup2D():
	glMatrixMode(GL_PROJECTION)
	glPushMatrix()
	glLoadIdentity()

	gluOrtho2D(0,Model.getWinfo().w,0,Model.getWinfo().h)
	glMatrixMode(GL_MODELVIEW)
	glPushMatrix()
	glLoadIdentity()

	glDisable(GL_DEPTH_TEST)
	glDisable(GL_LIGHTING)
Example #28
0
 def GET(self):
     _id=web.input(_id=None)['_id']
     res=Model.admin_meeting_Qurery(category='baseInfor')
     re=Model.admin_meeting_Qurery(_id=_id)
     if not res:
         kw={'category':'baseInfo'}
         ll=['_id','title','editor','content','holder','city','cyc','date','addr','start','deadline','linkman','phone']
         for i in ll:
             kw[i]=''*14
     if not re:
         re=(not res) and [kw] or [res[0]]
     return  render_base.meeting(res,re[0])
Example #29
0
 def POST(self,category):
     data=web.input()
     yes=0
     if category=='baseIfor':
         yes=Model.admin_meeting_add(data)
     elif category=='participants':
         data['category']='participants'
         yes=Model.admin_meeting_add(data)
     if yes:
         return response(True,'Ok')
     else:
         return response(False,'数据库忙')
Example #30
0
def draw():
	Util.setup2D()

	glColor(.15,.21,.41)
	barwidth = 400
	x1 = (Model.getWinfo().w/2)-(barwidth/2)
	x2 = (Model.getWinfo().w/2)+(barwidth/2)
	y1 = 20
	y2 = 50
	glRectf(x1,y1,x2,y2)

	Util.finish2D()
Example #31
0
def getCurrentHtml():
    return Model.getCurrentHtml()
Example #32
0
    def initialize_model(self, paper_size):

        width = paper_size[0]
        height = paper_size[1]
        self.model = model.Model(self, width, height)
Example #33
0
def process(imagename,
            img_arr=None,
            batch_size=10,
            model=None,
            weight_loc='../YNet/stage2/pretrained_model_st2/ynet_c1.pth',
            output_dir="out",
            output_prefix="unknown"):
    """ Run the CNN Segmentation
    
    The intermediate results will be saved into _seg_label.png (segmentation label), 
    _seg_viz.png (segmentation visualization), and CSV (features for all tiles) files.
    These files can be used for future diagnosis prediction.
    
    Note: we assume the input image is at least 384 x 384 pixel.
    Here, we use uint8 [0-255] to represent images for consistency. If the 
    result of some function is not uint8, cast it to uint8.
    
    Args:
        imagename (str): Location of the input image
        img_arr (numpy.array): if the image array is given, then we do not read
                            from the image name
        batch_size (int): batch size for the CNN. It depends on the hardware.
                         With larger batch size, the CNN can finish running
                         faster.
        model (torch.Module): the pyTorch model. If the model is given, then
                   this function would not load the model from hard drive.
        weight_loc (str): if the model is not specified, load the weights of 
                        model from this location.
        output_dir (str): output directory
        output_prefix (str): the prefix for the output files (e.g. subject id,
                      image id, roi id, etc.)
        
    Returns:
        output (np.array): The predicted segmentation with [h, w] shape
    """

    # %% Load Model
    if model is None:
        model = Net.ResNetC1_YNet(8, 5)
        model.load_state_dict(torch.load(weight_loc, map_location="cpu"))

    if torch.cuda.is_available():
        model = model.cuda()
    model.eval()

    output_log_name = os.path.join(output_dir, output_prefix + "_seg.log")
    output_mask_name = os.path.join(output_dir,
                                    output_prefix + "_seg_label.png")
    output_rgb_name = os.path.join(output_dir, output_prefix + "_seg_viz.png")
    output_sp_name = os.path.join(output_dir,
                                  output_prefix + "_seg_sp_viz.png")
    of_freq = os.path.join(output_dir,
                           output_prefix + "_SuperpixelFrequency.csv")
    of_cooc = os.path.join(output_dir,
                           output_prefix + "_SuperpixelCooccurrence.csv")

    # %% Setup constants
    aoi_width = 384
    aoi_height = 384

    aoi_center_w = 128
    aoi_center_h = 128

    border_w = (aoi_width - aoi_center_w) // 2
    border_h = (aoi_height - aoi_center_h) // 2

    # %%
    if img_arr is None:
        whole_img = cv2.imread(imagename).astype(np.float32)
        whole_img /= 255
    else:
        whole_img = img_arr.copy()

    # assumption, the whole image has at least width and height for one patch
    assert (whole_img.shape[0] > aoi_height and whole_img.shape[1] > aoi_width)

    image_dict = {}

    nrows = math.ceil((whole_img.shape[0] - 2 * border_h) / aoi_center_h)
    ncols = math.ceil((whole_img.shape[1] - 2 * border_w) / aoi_center_w)

    for row_id in range(nrows):
        for col_id in range(ncols):
            idx = (row_id, col_id)

            h0 = aoi_center_h * row_id
            h1 = h0 + aoi_height
            if h1 > whole_img.shape[0]:  # the aoi exceed the whole image
                h1 = whole_img.shape[0]
                h0 = h1 - aoi_height

            w0 = aoi_center_w * col_id
            w1 = w0 + aoi_width
            if w1 > whole_img.shape[1]:  # the aoi exceed the whole image
                w1 = whole_img.shape[1]
                w0 = w1 - aoi_width

            img_aoi = whole_img[h0:h1, w0:w1, :]

            assert (img_aoi.shape == (aoi_height, aoi_width, 3))

            image_dict[idx] = img_aoi

    # %%
    fstream = open(output_log_name, "w")

    tic = time.time()
    output = np.zeros(shape=[whole_img.shape[0], whole_img.shape[1]])
    #    output_dx_prob = np.zeros(shape=[whole_img.shape[0], whole_img.shape[1], 5])

    # The dx should be (5, 1) for each tile/instance, but I reshaped/expanded to (h, w, 5) so that
    # it can be viewed as an image format. Note that there are only 4 dx labels, but we use
    # 5 here because the legacy code use 1 - 4 to represent classes, and hence 0 is redundant.

    # We use batch here, so that the code can run slightly faster
    aoi_locs = list(image_dict.keys())  # the (row_id, col_id) pairs
    aoi_idx = 0  # an iterator for the aois

    while aoi_idx < len(aoi_locs):
        # Note: we cannot use for loop here because we are not sure about the
        # batch size. We have another inner for loop.
        imgs_this_batch = []

        if aoi_idx % (batch_size * 100) == 0:
            print(aoi_idx, "out of", len(aoi_locs), "locations have done")
            fstream.flush()

        # Setup the batch for processing
        for batch_i in range(batch_size):
            if aoi_idx >= len(aoi_locs):
                break
            img = image_dict[aoi_locs[aoi_idx]]  # get the aoi image section
            img = img.transpose((2, 0, 1))
            img = img.reshape(1, 3, aoi_width, aoi_height)
            imgs_this_batch.append(img)
            aoi_idx += 1

        # Cast numpy batch to PyTorch batch
        img_tensor = torch.from_numpy(np.concatenate(imgs_this_batch))
        img_variable = Variable(img_tensor)
        if torch.cuda.is_available():
            img_variable = img_variable.cuda()

        # Process
        img_out, sal_out = model(img_variable)
        # diagnostic_lvls = torch.argmax(sal_out, dim=1)
        sal_out = torch.softmax(sal_out, dim=1)
        sal_out = sal_out.detach().cpu().numpy()

        # Store the result batch to "all_outputs"
        num_imgs_in_batch = img_variable.shape[0]
        for batch_i, img_idx in enumerate(
                range(aoi_idx - num_imgs_in_batch, aoi_idx)):

            # Clean and organize the output from CNN
            img_out_norm = img_out[batch_i]
            prob, classMap = torch.max(img_out_norm, 0)
            #            classMap_numpy = classMap.data.cpu().numpy()
            segClassMap_np = classMap.data.cpu().numpy()[border_h:-border_h,
                                                         border_w:-border_w]
            #            im_pil = Image.fromarray(np.uint8(classMap_numpy))
            #            im_pil.putpalette(pallete)
            #            all_outputs[aoi_locs[img_idx]] = im_pil

            # reshape to the same size of the image tile
            expanded_dx_prob = np.ones(
                (img_out.shape[2], img_out.shape[3], 5)) * sal_out[batch_i]
            expanded_dx_prob = expanded_dx_prob[border_h:-border_h, border_w:
                                                -border_w]  # cut the border

            # Put the result into the numpy
            row_id, col_id = aoi_locs[img_idx]

            h0 = aoi_center_h * row_id
            h1 = h0 + aoi_height
            if h1 > whole_img.shape[0]:  # the aoi exceed the whole image
                h1 = whole_img.shape[0]
                h0 = h1 - aoi_height

            w0 = aoi_center_w * col_id
            w1 = w0 + aoi_width
            if w1 > whole_img.shape[1]:  # the aoi exceed the whole image
                w1 = whole_img.shape[1]
                w0 = w1 - aoi_width

            output[h0 + border_h:h1 - border_h,
                   w0 + border_w:w1 - border_w] = segClassMap_np
            #            output_dx_prob[h0+border_h:h1-border_h, w0+border_w:w1-border_w, :] = np.array(expanded_dx_prob)

            # Write into fstream
            out_data = [
                row_id, col_id, w0 + border_w, w1 - border_w, h0 + border_h,
                h1 - border_h
            ]
            out_data += list(sal_out[batch_i].reshape(-1))
            out_data += get_seg_features(segClassMap_np)
            fstream.write(",".join([str(_) for _ in out_data]))
            fstream.write("\n")

#            del segClassMap_np, im_pil, expanded_dx_prob # release some memory
        del img_variable, img_out, sal_out, prob, classMap  # release some memory. not sure helpful or not

    toc = time.time()
    print("it takes %.2f seconds to run the CNN" % (toc - tic))

    fstream.close()
    # %% Save all results
    # Save to image
    cv2.imwrite(output_mask_name, output)

    outpil = Image.fromarray(np.uint8(output))
    outpil.putpalette(pallete)
    outpil.save(output_rgb_name)

    # %% Get the superpixel features
    #    pdb.set_trace()
    whole_img = (whole_img * 255).astype(np.uint8)
    output = output.astype(np.uint8)
    freq, cooc = mask_to_superpixel_co_occurence(whole_img,
                                                 output,
                                                 tile_size=5000,
                                                 viz_fname=output_sp_name)

    open(of_freq, "w").write(",".join([str(_) for _ in freq]))
    open(of_cooc, "w").write(",".join([str(_) for _ in cooc]))

    print(time.ctime())
    print("*" * 30, "Done saving", imagename, "*" * 30)

    return output
Example #34
0
    file1 = "../topicclass/topicclass_valid.txt"
    devx, devy = PreProcess(file1, cl)
    testset = TxtCNNDataset(devx, devy)
    test_loader = DataLoader(dataset, batch_size=64, shuffle=True,
                             collate_fn=collate_fn)

    file2 = "./topicclass/topicclass_test.txt"
    testx, testy = PreProcess(file2, cl,word_vec)
    finalset = TxtCNNDataset(testx, testy)
    final_loader = DataLoader(finalset, batch_size=64, shuffle=False,
                              collate_fn=collate_fn)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    learningRate = 1e-2
    weightDecay = 5e-3
    model = model.TextCNN(16, 300, 100, 0.5)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learningRate, weight_decay=weightDecay)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)

    train(model,args.epoch,train_loader, test_loader,args.savedpath)
    labels = predict(model,test_loader)
    list2 = []
    for i in labels:
        for j in i:
            list2.append(j)
    result = [cl[i] for i in list2]
    index = [i for i in range(len(list2))]
    data = pd.DataFrame(result, index=index)
    data.sort_index(inplace=True)
    data.to_csv("./testresult1.csv")
Example #35
0
def trainValidateSegmentation(args):
    '''
    Main function for trainign and validation
    :param args: global arguments
    :return: None
    '''
    # check if processed data file exists or not
    if not os.path.isfile(args.cached_data_file):
        dataLoad = ld.LoadData(args.data_dir, args.classes,
                               args.cached_data_file)
        data = dataLoad.processData()
        if data is None:
            print('Error while pickling data. Please check.')
            exit(-1)
    else:
        data = pickle.load(open(args.cached_data_file, "rb"))

    q = args.q
    p = args.p
    # load the model
    if not args.decoder:
        model = net.ESPNet_Encoder(args.classes, p=p, q=q)
        args.savedir = args.savedir + '_enc_' + str(p) + '_' + str(q) + '/'
    else:
        model = net.ESPNet(args.classes, p=p, q=q, encoderFile=args.pretrained)
        args.savedir = args.savedir + '_dec_' + str(p) + '_' + str(q) + '/'

    if args.onGPU:
        model = model.cuda()

    # create the directory if not exist
    if not os.path.exists(args.savedir):
        os.mkdir(args.savedir)

    if args.visualizeNet:
        x = Variable(torch.randn(1, 3, args.inWidth, args.inHeight))

        if args.onGPU:
            x = x.cuda()

        y = model.forward(x)
        g = viz.make_dot(y)
        g.render(args.savedir + 'model.png', view=False)

    total_paramters = netParams(model)
    print('Total network parameters: ' + str(total_paramters))

    # define optimization criteria
    weight = torch.from_numpy(
        data['classWeights'])  # convert the numpy array to torch
    if args.onGPU:
        weight = weight.cuda()

    criteria = CrossEntropyLoss2d(weight)  #weight

    if args.onGPU:
        criteria = criteria.cuda()

    print('Data statistics')
    print(data['mean'], data['std'])
    print(data['classWeights'])

    #compose the data with transforms
    trainDataset_main = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1024, 512),
        myTransforms.RandomCropResize(32),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64).
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale1 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1536, 768),  # 1536, 768
        myTransforms.RandomCropResize(100),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale2 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1280, 720),  # 1536, 768
        myTransforms.RandomCropResize(100),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale3 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(768, 384),
        myTransforms.RandomCropResize(32),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale4 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(512, 256),
        #myTransforms.RandomCropResize(20),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64).
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    valDataset = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1024, 512),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    # since we training from scratch, we create data loaders at different scales
    # so that we can generate more augmented data and prevent the network from overfitting

    trainLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        data['trainIm'], data['trainAnnot'], transform=trainDataset_main),
                                              batch_size=args.batch_size + 2,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    trainLoader_scale1 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'],
                               data['trainAnnot'],
                               transform=trainDataset_scale1),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)

    trainLoader_scale2 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'],
                               data['trainAnnot'],
                               transform=trainDataset_scale2),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)

    trainLoader_scale3 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'],
                               data['trainAnnot'],
                               transform=trainDataset_scale3),
        batch_size=args.batch_size + 4,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)

    trainLoader_scale4 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'],
                               data['trainAnnot'],
                               transform=trainDataset_scale4),
        batch_size=args.batch_size + 4,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)

    valLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        data['valIm'], data['valAnnot'], transform=valDataset),
                                            batch_size=args.batch_size + 4,
                                            shuffle=False,
                                            num_workers=args.num_workers,
                                            pin_memory=True)

    if args.onGPU:
        cudnn.benchmark = True

    start_epoch = 0

    if args.resume:
        if os.path.isfile(args.resumeLoc):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resumeLoc)
            start_epoch = checkpoint['epoch']
            #args.lr = checkpoint['lr']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    logFileLoc = args.savedir + args.logFile
    if os.path.isfile(logFileLoc):
        logger = open(logFileLoc, 'a')
    else:
        logger = open(logFileLoc, 'w')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write(
            "\n%s\t%s\t%s\t%s\t%s\t" %
            ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))
    logger.flush()

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr, (0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=5e-4)
    # we step the loss by 2 after step size is reached
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=args.step_loss,
                                                gamma=0.5)

    for epoch in range(start_epoch, args.max_epochs):

        scheduler.step(epoch)
        lr = 0
        for param_group in optimizer.param_groups:
            lr = param_group['lr']
        print("Learning rate: " + str(lr))

        # train for one epoch
        # We consider 1 epoch with all the training data (at different scales)
        train(args, trainLoader_scale1, model, criteria, optimizer, epoch)
        train(args, trainLoader_scale2, model, criteria, optimizer, epoch)
        train(args, trainLoader_scale4, model, criteria, optimizer, epoch)
        train(args, trainLoader_scale3, model, criteria, optimizer, epoch)
        lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr = train(
            args, trainLoader, model, criteria, optimizer, epoch)

        # evaluate on validation set
        lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val = val(
            args, valLoader, model, criteria)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': str(model),
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lossTr': lossTr,
                'lossVal': lossVal,
                'iouTr': mIOU_tr,
                'iouVal': mIOU_val,
                'lr': lr
            }, args.savedir + 'checkpoint.pth.tar')

        #save the model also
        model_file_name = args.savedir + '/model_' + str(epoch + 1) + '.pth'
        torch.save(model.state_dict(), model_file_name)

        with open(args.savedir + 'acc_' + str(epoch) + '.txt', 'w') as log:
            log.write(
                "\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f"
                % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val))
            log.write('\n')
            log.write('Per Class Training Acc: ' + str(per_class_acc_tr))
            log.write('\n')
            log.write('Per Class Validation Acc: ' + str(per_class_acc_val))
            log.write('\n')
            log.write('Per Class Training mIOU: ' + str(per_class_iu_tr))
            log.write('\n')
            log.write('Per Class Validation mIOU: ' + str(per_class_iu_val))

        logger.write("\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.7f" %
                     (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr))
        logger.flush()
        print("Epoch : " + str(epoch) + ' Details')
        print(
            "\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f"
            % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val))
    logger.close()
Example #36
0
 def walk_init_trajectory(self, tf=2, dt=0.01):
     hip = Model.get_traj(0.0, 0.3234, 0.0, 0.0, tf, dt)
     knee = Model.get_traj(0.0, 0.815, 0.0, 0., tf, dt)
     ankle = Model.get_traj(-0.349, 0.07, 0.0, 0.0, tf, dt)
     return hip, knee, ankle
Example #37
0
def save_model(model, path):
    torch.save(model.state_dict(), path)
Example #38
0
class DemoController:
    def __init__(self, root):
        self.root = root

        #creates an instance of demo viewer: a blank canvas on top
        #and control panel at bottom
        self.__viewer = DemoViewer(root)

        #set callbacks in the viewer, so that viewer can communicate back
        self.__viewer.setReturnCallBack(self.mainMenu)
        self.__viewer.setStartCallBack(self.startDemo)
        self.__viewer.setStopCallBack(self.stopDemo)

    #--------------------HANDLERS------------------
    # These functions responds to GUI events

    #Narrative: prepare for demo animation, create a model, set up viewer
    # and create a thread that runs the animation
    #Precondition: "Start Demo" is clicked,
    #disc number is received from viewer
    #Postcondition: everything set up and the animation thread begins
    def startDemo(self, discNum):
        self.__model = Model(discNum)

        #tells viewer to draw poles and discs
        self.__viewer.setupView(discNum,self.__model.getState(), \
                                self.__model.getDiscDict())

        #create a different thread to do animation
        self.__thrd = WorkingThread(self.solveHanoi,
                                    (discNum, "P1", "P2", "P3"))

        #begins animation
        self.__thrd.run()

    #Narrative: stop the animation by calling the thread's stop method, which
    #changed the condition flag
    #Precondition: "Stop Demo" is clicked in viewer
    #Postcondition: animation thread is stopped
    def stopDemo(self):
        self.__thrd.stop()

    #call the return callback
    def mainMenu(self):
        self.returnCallBack()

    #---------------- CALLBACK SETTER-------------------
    #used by main controller to create a callback

    #returns to the main menu
    def addReturnCallBack(self, func):
        self.returnCallBack = func

    #---------------- THE RECURSIVE SOLVER --------------------
    # solves the hanoi puzzle by recursion with the condition that
    #the thread is not stopped
    # after move is calculated, the controller calls viewer and model to update

    #NOTE: solveHanoi runs in a separate thread so that GUI events can still
    #get through

    def solveHanoi(self, n, start, temp, end):

        # check if the thread is stopped
        if not self.__thrd.isStopped():
            if n > 0:
                self.solveHanoi(n - 1, start, end, temp)

                #again, check if the thread is stopped
                if not self.__thrd.isStopped():

                    #call the function to display; the try/except handles
                    #when user abruptly closes the window
                    try:
                        self.move(n, start, end)
                    except:
                        return

                    self.solveHanoi(n - 1, temp, start, end)

    #called by the recursive solver.
    #Tells demo viewer to display each move on canvas
    #Tell model to update
    def move(self, n, start, end):

        #reformat the tower number into a tag so the the viewer canvas can
        #find it
        n = "D" + str(n)

        #get the number of discs on the destination pole so that the viewer
        #will know how high to put it
        endNum = self.__model.getPoleDiscNum(end)

        #calls viewer's animation function
        self.__viewer.animate(n, start, end, endNum)

        #updates model
        self.__model.update(n, start, end)
Example #39
0
def train():
    # 初始化日志和模型路径
    OtherUtils.initPaths(FLAGS.model_path, FLAGS.log_path)

    # 初始化输入文件
    train_data_helper = dh.TrainDataHelper(FLAGS.embedding_size)
    train_data_helper.initialize(FLAGS.pretrained_embedding_file)

    train_datas = train_data_helper.read_input_file(FLAGS.train_file,
                                                    type="train")
    train_data_size = len(train_datas)
    valid_datas = train_data_helper.read_input_file(FLAGS.valid_file,
                                                    type="valid")
    valid_data_size = len(valid_datas)

    # Build a graph and rnn object
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            gpu_options=gpu_options,
            log_device_placement=FLAGS.log_device_placement)
        sess = tf.Session(config=session_conf)
        with sess.as_default():
            model = Model.Model(d_emb=FLAGS.embedding_size,
                                d_hiddens=dims_hidden,
                                d_fc=dims_fc)
            model.build()

            # 获取train_operator
            train_op = ModelUtils.train_step(model.loss,
                                             FLAGS.learning_rate,
                                             model.global_step,
                                             decay=False)

            saver = tf.train.Saver(tf.global_variables(),
                                   max_to_keep=FLAGS.num_checkpoints)

            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

            def train_step(batch_ids_a, batch_ids_b, batch_labels):
                batch_labels = np.array(batch_labels)
                feed_dict = {
                    model.inputs_a: batch_ids_a,
                    model.inputs_b: batch_ids_b,
                    model.input_y: batch_labels,
                    model.dropout_keep_rate: FLAGS.dropout_keep_rate,
                }
                _, loss, scores = sess.run(
                    [train_op, model.loss, model.scores], feed_dict=feed_dict)
                tp, fp, tn, fn = Metrix.get_accu(scores[:], batch_labels[:],
                                                 FLAGS.accu_threshold)
                return loss, tp, fp, tn, fn

            def validation_step(batch_ids_a, batch_ids_b, batch_labels):
                batch_labels = np.array(batch_labels)
                feed_dict = {
                    model.inputs_a: batch_ids_a,
                    model.inputs_b: batch_ids_b,
                    model.input_y: batch_labels,
                    model.dropout_keep_rate: 1.0,
                }
                loss, scores = sess.run([model.loss, model.scores],
                                        feed_dict=feed_dict)
                tp, fp, tn, fn = Metrix.get_accu(scores[:], batch_labels[:],
                                                 FLAGS.accu_threshold)
                return loss, tp, fp, tn, fn

            with tf.device("/gpu:0"):
                batch_per_epoch = train_data_size / FLAGS.batch_size
                if train_data_size % FLAGS.batch_size != 0:
                    batch_per_epoch += 1
                valid_batch_sum = valid_data_size / FLAGS.batch_size
                if valid_data_size % FLAGS.batch_size != 0:
                    valid_batch_sum += 1

                best_val_loss = 1000
                best_val_accu = 0.0
                best_val_recall = 0.0
                best_val_prec = 0.0
                best_val_f1 = -1
                best_epoch = -1
                for epoch in range(FLAGS.epochs):
                    total_loss = 0.0
                    tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
                    batches = train_data_helper.batch_iter(train_datas,
                                                           FLAGS.batch_size,
                                                           shuffle=True)
                    for idx, batch in enumerate(batches):
                        batch_ids_a, batch_ids_b, batch_labels = train_data_helper.trans_batch_to_inputs(
                            batch)
                        _loss, _tp, _fp, _tn, _fn = train_step(
                            batch_ids_a, batch_ids_b, batch_labels)
                        total_loss += _loss
                        tp += _tp
                        tn += _tn
                        fp += _fp
                        fn += _fn
                        if idx != 0 and idx % (batch_per_epoch / 10) == 0:
                            tmp_loss = total_loss / idx
                            tmp_accu = (tp + tn) / (tp + tn + fp + fn)
                            per = idx / (batch_per_epoch / 10)
                            mess = "Epoch: %d, percent: %d0%%, loss: %f, accu: %f" % (
                                epoch, per, tmp_loss, tmp_accu)
                            logging.info(mess)
                            logging.info(
                                "Epoch: %d, percent: %d0%%, tp=%d, tn=%d, fp=%d, fn=%d"
                                % (epoch, per, int(tp), int(tn), int(fp),
                                   int(fn)))

                    total_loss = total_loss / batch_per_epoch
                    accu = (tp + tn) / (tp + tn + fp + fn)
                    mess = "Epoch %d: train result - loss %f, accu %f" % (
                        epoch, total_loss, accu)
                    logging.info(mess)

                    total_loss = 0.0
                    tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
                    batches = train_data_helper.batch_iter(valid_datas,
                                                           FLAGS.batch_size,
                                                           shuffle=False)
                    for batch in batches:
                        batch_ids_a, batch_ids_b, batch_labels = train_data_helper.trans_batch_to_inputs(
                            batch)
                        loss_, _tp, _fp, _tn, _fn = validation_step(
                            batch_ids_a, batch_ids_b, batch_labels)
                        total_loss += loss_
                        tp += _tp
                        tn += _tn
                        fp += _fp
                        fn += _fn
                    total_loss = total_loss / valid_batch_sum
                    accu, recall, f1, prec = Metrix.eva(tp, tn, fp, fn)
                    mess = "Evaluation: loss %f, acc %f, recall %f, precision %f, f1 %f" % \
                           (total_loss, accu, recall, prec, f1)
                    logging.info(mess)
                    logging.info("Evaluation: tp=%d, tn=%d, fp=%d, fn=%d" %
                                 (int(tp), int(tn), int(fp), int(fn)))

                    # checkpoint_prefix = "%s/model" % FLAGS.model_path
                    # path = saver.save(sess, checkpoint_prefix, global_step=epoch)
                    # print("Saved model checkpoint to {0}".format(path))
                    if best_val_loss > total_loss:
                        best_val_loss = total_loss
                        best_val_accu = accu
                        best_val_recall = recall
                        best_val_prec = prec
                        best_val_f1 = f1
                        best_epoch = epoch
                        checkpoint_prefix = "%s/model" % FLAGS.model_path
                        path = saver.save(sess,
                                          checkpoint_prefix,
                                          global_step=epoch)
                        print("Saved model checkpoint to {0}".format(path))
                logging.info(
                    "Best epoch=%d, loss=%f, accu=%.4f, recall=%.4f, prec=%.4f, f1=%.4f",
                    best_epoch, best_val_loss, best_val_accu, best_val_recall,
                    best_val_prec, best_val_f1)
        logging.info("Training done")
Example #40
0
    def refresh(self):
        self.clock.Start()
        self.button.Enable(False)
        self.startRaceTimeCheckBox.Enable(False)
        self.settingsButton.Enable(False)
        self.button.SetLabel(StartText)
        self.button.SetForegroundColour(wx.Colour(100, 100, 100))
        self.chipTimingOptions.SetSelection(0)
        self.chipTimingOptions.Enable(False)

        with Model.LockRace() as race:
            if race:
                self.settingsButton.Enable(True)

                # Adjust the chip recording options for TT.
                if getattr(race, 'isTimeTrial', False):
                    race.resetStartClockOnFirstTag = False
                    race.skipFirstTagRead = False

                if getattr(race, 'resetStartClockOnFirstTag', True):
                    self.chipTimingOptions.SetSelection(
                        self.iResetStartClockOnFirstTag)
                elif getattr(race, 'skipFirstTagRead', False):
                    self.chipTimingOptions.SetSelection(self.iSkipFirstTagRead)

                if race.startTime is None:
                    self.button.Enable(True)
                    self.button.SetLabel(StartText)
                    self.button.SetForegroundColour(wx.Colour(0, 128, 0))

                    self.startRaceTimeCheckBox.Enable(True)
                    self.startRaceTimeCheckBox.Show(True)

                    self.chipTimingOptions.Enable(
                        getattr(race, 'enableJChipIntegration', False))
                    self.chipTimingOptions.Show(
                        getattr(race, 'enableJChipIntegration', False))
                elif race.isRunning():
                    self.button.Enable(True)
                    self.button.SetLabel(FinishText)
                    self.button.SetForegroundColour(wx.Colour(128, 0, 0))

                    self.startRaceTimeCheckBox.Enable(False)
                    self.startRaceTimeCheckBox.Show(False)

                    self.chipTimingOptions.Enable(False)
                    self.chipTimingOptions.Show(False)

                # Adjust the time trial display options.
                if getattr(race, 'isTimeTrial', False):
                    self.chipTimingOptions.Enable(False)
                    self.chipTimingOptions.Show(False)

            self.GetSizer().Layout()

        self.setWrappedRaceInfo()
        self.checklist.refresh()

        mainWin = Utils.getMainWin()
        if mainWin is not None:
            mainWin.updateRaceClock()
Example #41
0
    def __init__(self, parent, id=wx.ID_ANY):
        wx.Dialog.__init__(self,
                           parent,
                           id,
                           _("Start Race at Time:"),
                           style=wx.DEFAULT_DIALOG_STYLE | wx.TAB_TRAVERSAL)

        font = wx.Font(24, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
                       wx.FONTWEIGHT_NORMAL)

        self.startSeconds = None
        self.timer = None
        self.futureRaceTime = None

        race = Model.getRace()
        autoStartLabel = wx.StaticText(self,
                                       label=_('Automatically Start Race at:'))

        # Make sure we don't suggest a start time in the past.
        value = race.scheduledStart
        startSeconds = Utils.StrToSeconds(value) * 60
        nowSeconds = GetNowSeconds()
        if startSeconds < nowSeconds:
            startOffset = 3 * 60
            startSeconds = nowSeconds - nowSeconds % startOffset
            startSeconds = nowSeconds + startOffset
            value = u'{:02d}:{:02d}'.format(startSeconds / (60 * 60),
                                            (startSeconds / 60) % 60)

        self.autoStartTime = masked.TimeCtrl(self,
                                             fmt24hr=True,
                                             display_seconds=False,
                                             value=value,
                                             size=wx.Size(60, -1))

        self.pagesLabel = wx.StaticText(self,
                                        label=_('After Start, Switch to:'))
        mainWin = Utils.getMainWin()
        if mainWin:
            pageNames = [name for a, b, name in mainWin.attrClassName]
        else:
            pageNames = [
                _('Actions'),
                _('Record'),
                _('Results'),
                _('Passings'),
                _('RiderDetail'),
                _('Chart'),
                _('Animation'),
                _('Recommendations'),
                _('Categories'),
                _('Properties'),
                _('Primes'),
                _('Situation'),
                _('LapCounter'),
            ]
        pageNames = pageNames[1:]  # Skip the Actions screen.
        self.pages = wx.Choice(self, choices=pageNames)
        self.pages.SetSelection(0)  # Record screen.

        self.countdown = CountdownClock(self, size=(400, 400), tFuture=None)
        self.Bind(EVT_COUNTDOWN, self.onCountdown)

        self.okBtn = wx.Button(self, wx.ID_OK, label=_('Start at Above Time'))
        self.Bind(wx.EVT_BUTTON, self.onOK, self.okBtn)

        self.start30 = wx.Button(self, label=_('Start in 30s'))
        self.start30.Bind(wx.EVT_BUTTON,
                          lambda event: self.startInFuture(event, 30))
        self.start60 = wx.Button(self, label=_('Start in 60s'))
        self.start60.Bind(wx.EVT_BUTTON,
                          lambda event: self.startInFuture(event, 60))

        self.cancelBtn = wx.Button(self, wx.ID_CANCEL)
        self.Bind(wx.EVT_BUTTON, self.onCancel, self.cancelBtn)

        vs = wx.BoxSizer(wx.VERTICAL)

        border = 8
        hs = wx.BoxSizer(wx.HORIZONTAL)
        hs.Add(autoStartLabel,
               border=border,
               flag=wx.LEFT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)
        hs.Add(self.autoStartTime,
               border=border,
               flag=wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)
        hs.Add(self.pagesLabel,
               border=border,
               flag=wx.LEFT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)
        hs.Add(self.pages,
               border=border,
               flag=wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_BOTTOM
               | wx.ALIGN_CENTER_VERTICAL)
        vs.Add(hs)

        hs = wx.BoxSizer(wx.HORIZONTAL)
        hs.Add(self.okBtn, border=border, flag=wx.ALL)
        hs.Add(self.start30, border=border, flag=wx.TOP | wx.BOTTOM | wx.RIGHT)
        hs.Add(self.start60, border=border, flag=wx.TOP | wx.BOTTOM | wx.RIGHT)
        self.okBtn.SetDefault()
        hs.AddStretchSpacer()
        hs.Add(self.cancelBtn, border=border, flag=wx.ALL)
        vs.Add(hs, flag=wx.EXPAND)

        vs.Add(self.countdown,
               1,
               border=border,
               flag=wx.ALL | wx.ALIGN_CENTRE | wx.EXPAND)

        self.SetSizerAndFit(vs)

        self.CentreOnParent(wx.BOTH)
        wx.CallAfter(self.SetFocus)

        wx.CallLater(100, self.autoStartTime.SetSize, (48, -1))
Example #42
0
                    self.startRaceTimeCheckBox.Show(False)

                    self.chipTimingOptions.Enable(False)
                    self.chipTimingOptions.Show(False)

                # Adjust the time trial display options.
                if getattr(race, 'isTimeTrial', False):
                    self.chipTimingOptions.Enable(False)
                    self.chipTimingOptions.Show(False)

            self.GetSizer().Layout()

        self.setWrappedRaceInfo()
        self.checklist.refresh()

        mainWin = Utils.getMainWin()
        if mainWin is not None:
            mainWin.updateRaceClock()


if __name__ == '__main__':
    app = wx.App(False)
    mainWin = wx.Frame(None, title="CrossMan", size=(1024, 600))
    actions = Actions(mainWin)
    Model.newRace()
    Model.race.enableJChipIntegration = False
    Model.race.isTimeTrial = False
    actions.refresh()
    mainWin.Show()
    app.MainLoop()
Example #43
0
def getCurrentTTStartListHtml():
    return Model.getCurrentTTStartListHtml()
Example #44
0
def getCurrentTTCountdownHtml():
    return Model.getCurrentTTCountdownHtml()
Example #45
0
 def doChooseCategory(self, event):
     Model.setCategoryChoice(self.categoryChoice.GetSelection(),
                             'raceAnimationCategory')
     self.refresh()
Example #46
0
def build_model(fname, filter_cp = lambda chan, p: True, filter_uncertainty = lambda unc: True, include_mc_uncertainties = False, variables = {}, rmorph_method = 'renormalize-lognormal'):
    """
    Build a Model from a text-based datacard as used in LHC Higgs analyses

    See https://twiki.cern.ch/twiki/bin/viewauth/CMS/SWGuideHiggsAnalysisCombinedLimit

    Note that not the complete set of features is supported, in particular no unbinned fits.
    Supported uncertainties are: lnN (symmetric and asymmetric), lnU, gmN, shape

    The 'shape' uncertainty uses a slightly different interpolation: the Higgs tool uses a quadratic interpolation with linear extrapolation
    whereas theta uses a cubic interpolation and linear extrapolation. It is expected that this has negligible impact
    on the final result, but it might play a role in extreme cases.
    
    Parameters:
    
    * ``fname`` is the filename of the datacard to process
    * ``filter_cp`` is a function which, for a given pair of a channel name and process name (as given in the model configuration file), returns ``True`` if this channel/process should be kept and ``False`` otherwise. The default is to keep all channel/process combinations.
    * ``filter_uncertainty`` is a filter function for the uncertainties. The default is to keep all uncertainties
    * ``include_mc_uncertainties`` if ``True`` use the histogram uncertainties of shapes given in root files for Barlow-Beeston light treatment of MC stat. uncertainties
    * ``variables`` is a dictionary for replacing strings in the datacards. For example, use ``variables = {'MASS': '125'}`` to replace each appearance of '$MASS' in the datacard with '125'. Both key and value should be strings.
    * ``rmorph_method`` controls how the rate part of a shape uncertainty is handled: "renormalize-lognormal" will re-scale the plus and minus histogram to the nominal one,
      perform the morphing on those histograms, re-scale the morphed histogram to the nominal one and add an exponential (=log-normal) rate factor using the same parameter as is used
      for the interpolation. Instead "morph" will simply interpolate between the nominal, plus and minus histograms as they are.
    """
    model = Model()
    lines = [l.strip() for l in file(fname)]
    lines = [(lines[i], i+1) for i in range(len(lines)) if not lines[i].startswith('#') and lines[i]!='' and not lines[i].startswith('--')]
    
    cmds = get_cmds(lines[0])
    while cmds[0] != 'imax':
        print 'WARNING: ignoring line %d ("%s") at beginning of file as first token is "%s", not "imax", although not marked as comment' % (lines[0][1], lines[0][0], cmds[0])
        lines = lines[1:]
        cmds = get_cmds(lines[0])
    assert cmds[0]=='imax', "Line %d: Expected imax statement as first statement in the file" % lines[0][1]
    imax = cmds[1]
    if imax !='*': imax = int(imax)
    lines = lines[1:]

    cmds = get_cmds(lines[0])
    assert cmds[0]=='jmax', "Line %d: Expected 'jmax' statement directly after 'imax' statement" % lines[0][1]
    #jmax = int(cmds[1])
    lines = lines[1:]

    cmds = get_cmds(lines[0])
    assert cmds[0]=='kmax', "Line %d: Expected 'kmax' statement directly after 'jmax' statement" % lines[0][1]
    if cmds[1] == '*': kmax = -1
    else: kmax = int(cmds[1])
    lines = lines[1:]

    shape_lines = []
    shape_observables = set()
    cmds = get_cmds(lines[0])
    while cmds[0].lower() == 'shapes':
        assert len(cmds) in (5,6)
        if len(cmds) == 5: cmds.append('')
        shape_lines.append(cmds[1:]) # (process, channel, file, histogram, histogram_with_systematics)
        obs = cmds[2]
        shape_observables.add(obs)
        lines =lines[1:]
        cmds = get_cmds(lines[0])
        
    pshape_lines = []
    while cmds[0].lower() == 'pshape':
        assert len(cmds) >= 4 # pshape channel proc command
        pshape_lines.append([cmds[1], cmds[2], ' '.join(cmds[3:])]) # (process, channel, command)
        if _debug: print "Line %d: found pshape line %s" % (lines[0][1], str(pshape_lines[-1]))
        lines =lines[1:]
        cmds = get_cmds(lines[0])

    assert cmds[0].lower() in ('bin', 'observation'), "Line %d: Expected 'bin' or 'observation' statement" % lines[0][1]
    if cmds[0].lower() == 'bin':
        # prepend a 'c' so we can use numbers as channel names:
        channel_labels = cmds[1:]
        if imax=='*': imax = len(channel_labels)
        assert len(channel_labels) == imax, "Line %d: Number of processes from 'imax' and number of labels given in 'bin' line (%s) mismatch" % (lines[0][1], str(channel_labels))
        lines = lines[1:]
        cmds = get_cmds(lines[0])
    else:
        channel_labels = [ '%d' % i for i in range(1, imax + 1)]
    assert cmds[0].lower()=='observation', "Line %d: Expected 'observation' statement directly after fist 'bin' statement" % lines[0][1]
    observed_flt = [float(o) for o in cmds[1:]]
    observed_int = map(lambda f: int(f), observed_flt)
    if observed_flt != observed_int: raise RuntimeError, "Line %d: non-integer events given in 'observed' statement!" % lines[0][1]
    if imax=='*': imax = len(observed_int)
    assert len(observed_int) == imax, "Line %d: Number of processes from 'imax' and number of bins given in 'observed' mismatch: imax=%d, given in observed: %d" % (lines[0][1], imax, len(observed))
    for i in range(len(channel_labels)):
        theta_obs = transform_name_to_theta(channel_labels[i])
        model.set_data_histogram(theta_obs, Histogram(0.0, 1.0, [observed_flt[i]]))
    lines = lines[1:]

    cmds = get_cmds(lines[0])
    assert cmds[0] == 'bin', "Line %d: Expected 'bin' statement"% lines[0][1]
    # save the channel 'headers', to be used for parsing the next line:
    channels_for_table = cmds[1:]
    for c in channels_for_table:
        if c not in channel_labels: raise RuntimeError, "Line % d: unknown channel '%s'" % (lines[0][1], c)
    lines = lines[1:]
    n_cols = len(channels_for_table)

    cmds = get_cmds(lines[0])
    assert cmds[0]=='process'
    processes1 = cmds[1:]
    if len(processes1) != n_cols:
        raise RuntimeError, "Line %d: 'bin' statement and 'process' statement have different number of elements" % lines[0][1]
    lines = lines[1:]

    cmds = get_cmds(lines[0])
    assert cmds[0]=='process', "Line %d: Expected second 'process' line directly after first" % lines[0][1]
    processes2 = cmds[1:]
    if n_cols != len(processes2):
        raise RuntimeError, "Line %d: 'process' statements have different number of elements" % lines[0][1]
    lines = lines[1:]
    
    # get process names and numeric process ids:
    if(all(map(is_int, processes1))):
        process_ids_for_table = [int(s) for s in processes1]
        processes_for_table = processes2
    else:
        if not all(map(is_int, processes2)): raise RuntimeError("just before line %d: one of these 'process' lines should contain only numbers!" % lines[0][1])
        process_ids_for_table = [int(s) for s in processes2]
        processes_for_table = processes1
    
    # build a list of columns to keep ( = not filtered by filter_cp):
    column_indices = []
    for i in range(n_cols):
        if filter_cp(channels_for_table[i], processes_for_table[i]): column_indices.append(i)

    # check process label / id consistency:
    p_l2i = {}
    p_i2l = {}
    for i in range(n_cols):
        p_l2i[processes_for_table[i]] = process_ids_for_table[i]
        p_i2l[process_ids_for_table[i]] = processes_for_table[i]
    # go through again to make check, also save signal processes:
    signal_processes = set()
    for i in range(n_cols):
        if p_l2i[processes_for_table[i]] != process_ids_for_table[i] or p_i2l[process_ids_for_table[i]] != processes_for_table[i]:
            raise RuntimeError, "Line %d: mapping process id <-> process label (defined via the two 'process' lines) is not one-to-one as expected!" % lines[0][1]
        if p_l2i[processes_for_table[i]] <= 0:
            signal_processes.add(processes_for_table[i])

    cmds = get_cmds(lines[0])
    assert cmds[0]=='rate', "Line %d: Expected 'rate' statement after the two 'process' statements" % lines[0][1]
    if n_cols != len(cmds)-1:
        raise RuntimeError, "Line %d: 'rate' statement does specify the wrong number of elements" % lines[0][1]
    for i in column_indices:
        theta_obs, theta_proc = transform_name_to_theta(channels_for_table[i]), transform_name_to_theta(processes_for_table[i])
        n_exp = float(cmds[i+1])
        #print o,p,n_exp
        hf = HistogramFunction()
        hf.set_nominal_histo(Histogram(0.0, 1.0, [n_exp]))
        #print "setting prediction for (theta) channel '%s', (theta) process '%s'" % (theta_obs, theta_proc)
        model.set_histogram_function(theta_obs, theta_proc, hf)
        assert model.get_histogram_function(theta_obs, theta_proc) is not None
    lines = lines[1:]
    
    kmax  = len(lines)

    if kmax != len(lines):
        raise RuntimeError, "Line %d--end: wrong number of lines for systematics (expected kmax=%d, got %d)" % (lines[0][1], kmax, len(lines))
    
    # save uncertainty names to avoid duplicates:
    uncertainty_names = set()
    # shape systematics is a dictionary (channel) --> (process) --> (parameter) --> (factor)
    # factors of 0 are omitted.
    shape_systematics = {}
    for i in range(kmax):
        if _debug: print "processing line %d" % lines[i][1]
        cmds = get_cmds(lines[i])
        assert len(cmds) >= len(processes_for_table) + 2, "Line %d: wrong number of entries for uncertainty '%s'" % (lines[i][1], cmds[0])
        if not filter_uncertainty(cmds[0]): continue
        if cmds[0] in uncertainty_names:
            raise RuntimeError, "Uncertainty '%s' specified more than once; this is not supported." % cmds[0]
        uncertainty_names.add(cmds[0])
        uncertainty = transform_name_to_theta(cmds[0])
        if cmds[1] == 'gmN':
            values = cmds[3:]
            n_affected = 0
            k = float(cmds[2])
            for icol in column_indices:
                if values[icol]=='-': continue
                val = float(values[icol])
                if val==0.0: continue
                obsname = transform_name_to_theta(channels_for_table[icol])
                procname = transform_name_to_theta(processes_for_table[icol])
                # add the same parameter (+the factor in the table) as coefficient:
                model.get_coeff(obsname, procname).add_factor('id', parameter = uncertainty)
                n_affected += 1
                n_exp = model.get_histogram_function(obsname, procname).get_nominal_histo()[2][0]
                if max(n_exp, val*k) != 0:
                     if abs(n_exp - val*k)/max(n_exp, val*k) > 0.03:
                            raise RuntimeError, "gmN uncertainty %s for process %s is inconsistent: the rate expectation should match k*theta but N_exp=%f, k*theta=%f!" % (cmds[0], procname, n_exp, val*k)
            if n_affected > 0:
                n_obs_sb = float(cmds[2]) # the number of observed events in the sideband
                n_model_sb = n_obs_sb     # the number of events in the model template in the sideband. This is pretty arbitrary, as a scale factor is used to fit this anyway.
                if n_model_sb == 0.0: n_model_sb = 1.0
                hf = HistogramFunction()
                hf.set_nominal_histo(Histogram(0.0, 1.0, [n_model_sb]))
                obs_sb = '%s_sideband' % uncertainty
                model.set_histogram_function(obs_sb, 'proc_%s_sideband' % uncertainty, hf)
                model.set_data_histogram(obs_sb, Histogram(0.0, 1.0, [n_obs_sb]))
                model.get_coeff(obs_sb, 'proc_%s_sideband' % uncertainty).add_factor('id', parameter = uncertainty)
                # as mean, use the value at the observation such that toys reproduce this value ...
                model.distribution.set_distribution(uncertainty, 'gauss', mean = n_obs_sb / n_model_sb, width = float("inf"), range = (0.0, float("inf")))
        elif cmds[1] in ('lnN', 'lnU'):
            n_affected = 0
            values = cmds[2:]
            for icol in column_indices:
                if values[icol]=='-': continue
                if '/' in values[icol]:
                    p = values[icol].find('/')
                    lambda_minus = -math.log(float(values[icol][0:p]))
                    lambda_plus = math.log(float(values[icol][p+1:]))
                else:
                    lambda_minus = math.log(float(values[icol]))
                    lambda_plus = lambda_minus
                if lambda_plus == 0.0 and lambda_minus == 0.0: continue
                obsname = transform_name_to_theta(channels_for_table[icol])
                procname = transform_name_to_theta(processes_for_table[icol])
                n_affected += 1
                model.get_coeff(obsname, procname).add_factor('exp', parameter = uncertainty, lambda_minus = lambda_minus, lambda_plus = lambda_plus)
            if n_affected > 0:
                if cmds[1] == 'lnN':  model.distribution.set_distribution(uncertainty, 'gauss', mean = 0.0, width = 1.0, range = default_rate_range)
                else:  model.distribution.set_distribution(uncertainty, 'gauss', mean = 0.0, width = inf, range = [-1.0, 1.0])
        elif cmds[1] == 'gmM':
            values = cmds[2:]
            values_f = set([float(s) for s in values if float(s)!=0.0])
            if len(values_f)>1: raise RunetimeError, "gmM does not support different uncertainties"
            if len(values_f)==0: continue
            n_affected = 0
            for icol in column_indices:
                obsname = transform_name_to_theta(channels_for_table[icol])
                procname = transform_name_to_theta(processes_for_table[icol])
                model.get_coeff(obsname, procname).add_factor('id', parameter = uncertainty)
                n_affected += 1
            if n_affected > 0:
                model.distribution.set_distribution(uncertainty, 'gamma', mean = 1.0, width = float(values[icol]), range = (0.0, float("inf")))
        elif cmds[1] in 'shape':
            factors = cmds[2:]
            n_affected = 0
            for icol in column_indices:
                if factors[icol] == '-' or float(factors[icol]) == 0.0: continue
                factor = float(factors[icol])
                obsname = transform_name_to_theta(channels_for_table[icol])
                procname = transform_name_to_theta(processes_for_table[icol])
                n_affected += 1
                add_entry(shape_systematics, channels_for_table[icol], processes_for_table[icol], cmds[0], factor)
            if n_affected > 0:
                model.distribution.set_distribution(uncertainty, 'gauss', mean = 0.0, width = 1.0, range = default_shape_range)
        else: raise RuntimeError, "Line %d: unknown uncertainty type %s" % (lines[0][1], cmds[1])
    # add shape systematics:
    if '*' in shape_observables: shape_observables = set(channel_labels)
    data_done = set()
    searchpaths = ['.', os.path.dirname(fname)]
    if _debug: print "adding shapes now ..."
    psb = ParametricShapeBuilder(model)
    # loop over processes and observables:
    for icol in column_indices:
        obs = channels_for_table[icol]
        if _debug: print "adding shape for channel '%s'" % obs
        if obs not in shape_observables: continue
        proc = processes_for_table[icol]
        found_matching_shapeline = False
        # try all lines in turn, until adding the shapes from that file succeeds:
        for l in shape_lines: # l = (process, channel, file, histogram, histogram_with_systematics)
            try:
                if _debug: print "   shape line: %s" % str(l)
                if l[1]!='*' and l[1]!=obs: continue
                if obs not in data_done and l[0] in ('*', 'data_obs', 'DATA'):
                    try:
                        add_shapes(model, obs, 'DATA', {}, l[2], l[3], '', include_mc_uncertainties, searchpaths = searchpaths, variables = variables, rhandling = rmorph_method)
                        data_done.add(obs)
                    except RuntimeError: pass # ignore missing data
                if l[0]!='*' and l[0]!=proc: continue
                uncs = {}
                if obs in shape_systematics: uncs = shape_systematics[obs].get(proc, {})
                if _debug: print "   adding shapes for channel %s, process %s, trying file %s, line %s" % (obs, proc, l[2], ' '.join(l))
                add_shapes(model, obs, proc, uncs, l[2], l[3], l[4], include_mc_uncertainties, searchpaths = searchpaths, variables = variables, rhandling = rmorph_method)
                found_matching_shapeline = True
                break
            except NotFoundException: pass # ignore the case that some histo has not been found for now; raise a RuntimeError later if no line matched
        if found_matching_shapeline: continue
        # now that nothing is found, also try the pshapelines
        for l in pshape_lines:
            if l[0] != proc or l[1] != obs: continue
            print "Trying to apply pshape line %s" % str(l)
            theta_obs, theta_proc = transform_name_to_theta(obs), transform_name_to_theta(proc)
            psb.apply_(theta_obs, theta_proc, l[2])
            found_matching_shapeline = True
        if not found_matching_shapeline:
            raise RuntimeError, "Did not find all the (nominal / systematics) histograms for channel '%s', process '%s'" % (obs, proc)
    model.set_signal_processes([transform_name_to_theta(proc) for proc in signal_processes])
    if include_mc_uncertainties: model.bb_uncertainties = True
    return model
Example #47
0
            self.hbs.Layout()
            raceTime = race.lastRaceTime() if race.isRunning(
            ) else self.animation.t
            raceIsRunning = race.isRunning()

        self.finishTop.SetValue(getattr(race, 'finishTop', False))

        animationData = GetAnimationData(category, True)
        self.animation.SetData(
            animationData, raceTime,
            dict((cd['name'], cd) for cd in GetCategoryDetails()))
        self.animation.SetOptions(getattr(race, 'reverseDirection', False),
                                  getattr(race, 'finishTop', False))
        if raceIsRunning:
            if not self.animation.IsAnimating():
                self.animation.StartAnimateRealtime()


if __name__ == '__main__':
    app = wx.App(False)
    mainWin = wx.Frame(None, title="CrossMan", size=(600, 400))
    Model.setRace(Model.Race())
    Model.getRace()._populate()
    Model.getRace().finishRaceNow()
    raceAnimation = RaceAnimation(mainWin)
    raceAnimation.refresh()
    raceAnimation.animation.Animate(2 * 60)
    raceAnimation.animation.SuspendAnimate()
    mainWin.Show()
    app.MainLoop()
    testname = args.t
    modlname = args.m
    outputfile = args.o
    nn = args.type

    # load the data
    test = Loading.LoadTst(testname)
    numb = len(test)

    test = Loading.DataSet(test, 1)
    test = DataLoader(test, batch_size=64, shuffle=False)
    print("[Done] Segmenting and vectorizing all data!")

    # load done-trained model
    model = None
    if nn == "lstm": model = Model.RNN("lstm", "lstm")
    if nn == "gru": model = Model.RNN("gru", "gru")
    if nn == "dnn": model = Model.DNN()
    check = torch.load(modlname)
    model.load_state_dict(check)
    if gpu: model.cuda()
    print("[Done] Initializing all model!")

    # set to evaluation mode
    model.eval()

    predt = torch.LongTensor()
    if gpu:
        predt = predt.cuda()

    for ind, tst in enumerate(test):
Example #49
0
 def stance_trajectory(self, tf=2, dt=0.01):
     hip = Model.get_traj(0.0, -0.5, 0.0, 0.0, tf, dt)
     knee = Model.get_traj(0.0, 0.50, 0.0, 0., tf, dt)
     ankle = Model.get_traj(-0.349, -0.2, 0.0, 0.0, tf, dt)
     return hip, knee, ankle
Example #50
0
# load image
image_path1 = im_dir + args.image1
I = Image.open(image_path1).resize((256, 256))
loaded_image_array1 = np.array(I, dtype=np.float) / 255

img_nrows, img_ncols, _ = loaded_image_array1.shape

AVE = np.average(loaded_image_array1, axis=(0, 1))
image_processed = loaded_image_array1 - AVE

ref = tf.expand_dims(tf.constant(image_processed, dtype=tf.float32), axis=0)

x = tf.Variable(np.random.randn(3, img_nrows, img_ncols, 3), dtype=tf.float32)

# model construction
model = Model.model1(args.layer_S)

out, h_dict, var_list, _ = model.run(x)
out_ref, h_dict_ref, _, _ = model.run(ref)

layer_name_ = [
    'relu1', 'relu2', 'block1_pool', 'relu3', 'relu4', 'block2_pool', 'relu5',
    'relu6', 'block3_pool', 'relu7', 'relu8', 'block4_pool', 'relu9', 'relu10',
    'block5_pool'
]
layer_name = layer_name_[0:args.layer_D]
layer_c = [
    'composite1', 'composite2', 'composite3', 'composite4', 'composite5'
]
layer_name += layer_c[0:args.layer_S]
Example #51
0
def load_model(model,path):
    pretrained = torch.load(path)
    model.load_state_dict(pretrained)
    return model
Example #52
0
# symptoms_train, symptoms_test, result_train, result_test = train_test_split(symptoms, result, random_state = 69, shuffle = True)

# model = RandomForestClassifier(n_estimators = 200, random_state = 42)
# model.fit(symptoms_train, result_train)

# user_proba = model.predict_proba(user_input)

# neg_prob = user_proba[0][0]*100
# pos_prob = user_proba[0][1]*100

# print("The prob that you have covid is: ", pos_prob, "% :D")

import Model
import Feedback

rand_forest, features = Model.train_data('Covid Dataset.csv')
# Feedback.feedback()
user_input = []

for i in features:
    state = False
    while state == False:
        option = input('Do you have/ have experienced: ' + i + '  ')
        if (option == "Y" or option == "y"):
            state = True
            user_input.append(1)
        elif (option == "N" or option == "n"):
            state = True
            user_input.append(0)
        else:
            print("Invalid")
Example #53
0
model_dir = './Model/%s_%dPhase_%dEpoch_%.5fLearnrate_%dRank' % (
    dataset, phase, epoch_num, learn_rate, rank)
result_dir = 'Result/%s_%dCkpt_%dPhase_%dEpoch_%.5fLearnrate_%dRank' % (
    dataset, cpkt_model_number, phase, epoch_num, learn_rate, rank)
test_data_dir = './Data/Test/%s%d' % (dataset, block_size)
if not os.path.exists(result_dir):
    os.makedirs(result_dir)

Cu = tf.placeholder(tf.float32, [None, block_size, block_size, channel])
X_output = tf.placeholder(tf.float32, [None, block_size, block_size, channel])
b = tf.zeros(shape=(tf.shape(X_output)[0], channel - 1, tf.shape(X_output)[2],
                    tf.shape(X_output)[3]))

y = Encode_CASSI(X_output, Cu)
x0 = Init_CASSI(y, Cu, channel)
Prediction = Model.Interface(x0, Cu, phase, rank, channel, reuse=False)

# Model
cost_all = tf.reduce_mean(tf.square(Prediction - X_output))
optm_all = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost_all)
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
sess = tf.Session(config=config)

saver.restore(sess, './%s/model_%d.cpkt' % (model_dir, cpkt_model_number))

filepaths = os.listdir(test_data_dir)
ImgNum = len(filepaths)
def overlap(GRID, submodels=[""], all=False):

    t0 = time.time()
    folder = './Subgrids/'
    num = int(ltxt(os.popen("ls -1 %s*.dat| wc -l" % folder)))
    data = os.popen("ls -1 %s*.dat" % folder).read().split('\n', num)[:-1]

    if all:
        names = [name for name in data]  # if '_S.' in name or '_MSW' in name]
        files = [ltxt(name) for name in names]

    else:
        submodels = [folder + sub for sub in submodels]
        names = [name for name in submodels]
        files = [ltxt(name) for name in names]

    detected = [tmp.split(folder)[1] for tmp in data]
    read = [tmp.split(folder)[1] for tmp in names]
    print('Number of files detected:', num, '\nFiles detected:', detected,
          '\nNumber of files read:', len(files), '\nFiles read:', read)

    NTotal = GRID.NPoints
    Nx, Ny, Nz = GRID.Nodes

    cm3_to_m3 = 1e6
    dens_back = 5e5 * cm3_to_m3  #Background density
    temp_back = 150.  #Background temperature
    gamma = 7. / 5  #Gamma for diatomic molecules
    kb = 1.38064852e-23  #Boltzmann constant
    H_mass = 1.6733e-27  #kg

    DENS = np.ones(NTotal)  #, dtype='float64') * 0.5 # * dens_back
    TEMP = np.zeros(NTotal)  # * temp_back * dens_back

    ab0 = 5e-8
    ABUND = np.zeros(NTotal)  #np.ones(NTotal) * ab0

    gtd0 = 100.
    GTD = np.zeros(NTotal)  #np.ones(NTotal) * gtd0

    VEL = [np.zeros(NTotal), np.zeros(NTotal), np.ones(NTotal) * 1 * 70000]

    #----------------------
    #----------------------

    #-------------------
    #SUBGRIDS DEFINITION
    #-------------------

    NFiles = len(files)
    CountFiles = np.arange(NFiles)
    lenFiles = [len(file) for file in files]

    dens_tmp = [[{}, {}] for i in CountFiles]
    temp_tmp = [{} for i in CountFiles]
    vel_tmp = [[{} for i in CountFiles] for i in range(3)]
    abund_tmp = [{} for i in CountFiles]
    gtd_tmp = [{} for i in CountFiles]

    hg = 0
    IDList = [[] for i in CountFiles]

    Xgrid, Ygrid, Zgrid = GRID.XYZgrid

    for m in range(len(files)):
        for n in files[m]:

            x, y, z = n[1], n[2], n[3]

            i = mindistance(x, Xgrid, Nx)
            j = mindistance(y, Ygrid, Ny)
            k = mindistance(z, Zgrid, Nz)

            Num = i * (Ny) * (Nz) + j * (Nz) + k
            #ID for the Global Grid

            #if Num in IDList[m]: #Really slow as the size of IDList increases
            try:
                dens_tmp[m][0][Num] += n[4]
                dens_tmp[m][1][Num] += 1
                temp_tmp[m][Num] += n[4] * n[5]
                vel_tmp[0][m][Num] += n[4] * n[6]
                vel_tmp[1][m][Num] += n[4] * n[7]
                vel_tmp[2][m][Num] += n[4] * n[8]
                abund_tmp[m][Num] += n[4] * n[9]
                gtd_tmp[m][Num] += n[4] * n[10]
            except KeyError:
                #else:
                dens_tmp[m][0][Num] = n[4]
                dens_tmp[m][1][Num] = 1
                temp_tmp[m][Num] = n[4] * n[5]
                vel_tmp[0][m][Num] = n[4] * n[6]
                vel_tmp[1][m][Num] = n[4] * n[7]
                vel_tmp[2][m][Num] = n[4] * n[8]
                abund_tmp[m][Num] = n[4] * n[9]
                gtd_tmp[m][Num] = n[4] * n[10]
                IDList[m].append(Num)

#        hg+=1
#        if hg%50000 == 0: print (hg)

        print('Finished with the file: %s' % names[m])

    print('Calculating combined densities, temperatures, etc....')
    for m in range(NFiles):
        for ind in IDList[m]:

            dens_tot = dens_tmp[m][0][ind]

            temp_tmp[m][ind] = temp_tmp[m][ind] / dens_tot
            abund_tmp[m][ind] = abund_tmp[m][ind] / dens_tot
            gtd_tmp[m][ind] = gtd_tmp[m][ind] / dens_tot
            vel_tmp[0][m][ind] = vel_tmp[0][m][ind] / dens_tot
            vel_tmp[1][m][ind] = vel_tmp[1][m][ind] / dens_tot
            vel_tmp[2][m][ind] = vel_tmp[2][m][ind] / dens_tot
            dens_tmp[m][0][ind] = dens_tot / dens_tmp[m][1][ind]

            #----------------
            #FOR GLOBAL GRID
            #----------------

            dens_dum = dens_tmp[m][0][ind]
            temp_dum = temp_tmp[m][ind]
            vel0_dum = vel_tmp[0][m][ind]
            vel1_dum = vel_tmp[1][m][ind]
            vel2_dum = vel_tmp[2][m][ind]
            abund_dum = abund_tmp[m][ind]
            gtd_dum = gtd_tmp[m][ind]

            DENS[ind] += dens_dum
            TEMP[ind] += dens_dum * temp_dum
            VEL[0][ind] += dens_dum * vel0_dum
            VEL[1][ind] += dens_dum * vel1_dum
            VEL[2][ind] += dens_dum * vel2_dum
            ABUND[ind] += dens_dum * abund_dum
            GTD[ind] += dens_dum * gtd_dum

    TEMP = TEMP / DENS
    ABUND = ABUND / DENS
    GTD = GTD / DENS

    VEL[0] = VEL[0] / DENS
    VEL[1] = VEL[1] / DENS
    VEL[2] = VEL[2] / DENS

    VEL = Model.Struct(**{'x': VEL[0], 'y': VEL[1], 'z': VEL[2]})

    ind = np.where(DENS == 1.0)
    DENS[ind] = 1.e9
    ABUND[ind] = ab0
    GTD[ind] = gtd0
    TEMP = np.where(TEMP == 0., 30, TEMP)

    Model.DataTab_LIME(DENS, TEMP, VEL, ABUND, GTD, GRID)
    AllProp = Model.Struct(
        **{
            'GRID': GRID,
            'density': DENS,
            'temperature': TEMP,
            'vel': VEL,
            'abundance': ABUND,
            'gtd': GTD
        })

    print('Ellapsed time: %.3fs' % (time.time() - t0))

    return AllProp
Example #55
0
    print('Load trainset from save')
else:
    trainset = New_Dataset(train_file, splitter, batch_size)
    f = file('Input/trainset.save', 'wb')
    cPickle.dump(trainset, f, protocol=cPickle.HIGHEST_PROTOCOL)
    f.close()
print('Load trainset sucessfully')
testset = Test_Dataset(test_file, splitter, batch_size, trainset)
print('Load testset sucessfully')
testDataset2 = ImpDataset.ImpDataset('')
print('Load testDataset2 sucessfully')
lr = 0.0001
reg = 0.01
topK = 10

model = Model(trainset, testset, testDataset2, trainset.num_user,
              trainset.num_item, dim, reg, lr, 'Model/')
print('Creat model sucessfully')

# model.train(trainset.epoch)  #for test

t1 = time()
ahit, andcg = model.test(topK)
best_hr, best_ndcg, best_iter = ahit, andcg, -1
print('Init: HR = %.4f, NDCG = %.4f\t [%.1f s]' % (ahit, andcg, time() - t1))

for i in xrange(1, num_epoch):
    model.train(trainset.epoch)
    t2 = time()
    ahit, andcg = model.test(topK, i)
    print('Epoch %s: HR = %.4f, NDCG = %.4f\t [%.1f s]' %
          (i, ahit, andcg, t2 - t1))
Example #56
0
    def do_EOF(self, line):
        return True

    def do_print(self, line):
        for each in myModel.data:
            print(each)

    def do_quit(self, line):
        return True


#--- Set up Modbus Interface
timeout = .02
ser.flush()
interface = tkRtu.RtuMaster(ser)
interface.set_timeout(timeout)

#----- MAIN -----
mydevices = Device.MakeDevicesfromCfg("contherm.cfg", interface.execute)
mydevices['test'] = Device.Dummy()
logger.info("Created devices: %s" % mydevices)

myModel = Model.FileWriterModel(devices=mydevices, targetfile="test.csv")
logger.debug("Model initiated with devices: %s" % myModel.clock.callbacks)

console = logger_console()

if __name__ == '__main__':
    myModel.start()
    console.cmdloop()
Example #57
0
    #     transforms.RandomCrop(size=448),
    #     transforms.ToTensor(),  # turn a PIL.Image [0,255] shape(H,W,C) into [0,1.0] shape(C,H,W) torch.FloatTensor
    #     transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    # ])
    test_set = CUB200.CUB(root='./DATA/CUB_200_2011',
                          is_train=False,
                          transform=test_transform,
                          data_len=None)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=8,
                                              drop_last=False,
                                              pin_memory=True)

    net = Model.MyNet()
    if cuda_flag:
        net = net.cuda()
    ckpt = torch.load(load_ckpt)
    net.load_state_dict(ckpt['net_state_dict'])
    net.eval()
    with torch.no_grad():
        total = 0
        test_correct1 = 0
        test_correct2 = 0
        test_correct3 = 0
        test_correct4 = 0
        test_correct5 = 0
        test_bar = tqdm(test_loader)
        for data in test_bar:
            test_bar.set_description("Testing eval")
Example #58
0
    config["model"]["recurrent_type"] = "LSTM"
    config["model"]["bidirectional"] = True
    config["model"]["stride_mode"] = stride_mode

    if device is None:
        device = 'cuda'
    else:
        device = 'cuda' if device == 'gpu' else 'cpu'

    os.makedirs(output_path, exist_ok=True)

    logger = logging.getLogger("logger")
    handler = setHandler(filename=os.path.join(output_path, "inference.log"))
    logger.addHandler(handler)

    model = Model.UNet_recurrent(config)
    #print(model)
    #pdb.set_trace()

    if checkpoint_load_path is None:
        checkpoint_load_path = config["inference"]["checkpoint"]

    load_checkpoint(checkpoint_load_path, model)

    if rir_file is None:
        rir_file = "/user/HS228/jz00677/PYTHON_project/RIR_Generator/rir_5R_stepT60_test.csv"
    model = model.to(device)
    inference_step_T60(device,
                       output_path=output_path,
                       model=model,
                       rir_file=rir_file,
Example #59
0
def main(args, tokenize):
    args.log_path = args.log_path + args.data_name + '_' + args.model_name + '.log'
    data_path = args.data_path + args.data_name + '.pt'
    standard_data_path = args.data_path + args.data_name + '_standard.pt'

    # init logger
    logger = utils.get_logger(args.log_path)

    # load data
    logger.info('loading data......')
    total_data = torch.load(data_path)
    standard_data = torch.load(standard_data_path)
    train_data = total_data['train']
    dev_data = total_data['dev']
    test_data = total_data['test']
    dev_standard = standard_data['dev']
    test_standard = standard_data['test']

    # init model
    logger.info('initial model......')
    model = Model.BERTModel(args)
    if args.ifgpu:
        model = model.cuda()

    # print args
    logger.info(args)

    if args.mode == 'test':
        logger.info('start testing......')
        test_dataset = Data.ReviewDataset(train_data, dev_data, test_data,
                                          'test')
        # load checkpoint
        logger.info('loading checkpoint......')
        checkpoint = torch.load(args.checkpoint_path)
        model.load_state_dict(checkpoint['net'])
        model.eval()

        batch_generator_test = Data.generate_fi_batches(dataset=test_dataset,
                                                        batch_size=1,
                                                        shuffle=False,
                                                        ifgpu=args.ifgpu)
        # eval
        logger.info('evaluating......')
        f1 = test(model, tokenize, batch_generator_test, test_standard,
                  args.beta, logger)

    elif args.mode == 'train':
        args.save_model_path = args.save_model_path + args.data_name + '_' + args.model_name + '.pth'
        train_dataset = Data.ReviewDataset(train_data, dev_data, test_data,
                                           'train')
        dev_dataset = Data.ReviewDataset(train_data, dev_data, test_data,
                                         'dev')
        test_dataset = Data.ReviewDataset(train_data, dev_data, test_data,
                                          'test')
        batch_num_train = train_dataset.get_batch_num(args.batch_size)

        # optimizer
        logger.info('initial optimizer......')
        param_optimizer = list(model.named_parameters())
        optimizer_grouped_parameters = [{
            'params': [p for n, p in param_optimizer if "_bert" in n],
            'weight_decay':
            0.01
        }, {
            'params': [p for n, p in param_optimizer if "_bert" not in n],
            'lr':
            args.learning_rate,
            'weight_decay':
            0.01
        }]
        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=args.tuning_bert_rate,
                          correct_bias=False)

        # load saved model, optimizer and epoch num
        if args.reload and os.path.exists(args.checkpoint_path):
            checkpoint = torch.load(args.checkpoint_path)
            model.load_state_dict(checkpoint['net'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            start_epoch = checkpoint['epoch'] + 1
            logger.info(
                'Reload model and optimizer after training epoch {}'.format(
                    checkpoint['epoch']))
        else:
            start_epoch = 1
            logger.info('New model and optimizer from epoch 0')

        # scheduler
        training_steps = args.epoch_num * batch_num_train
        warmup_steps = int(training_steps * args.warm_up)
        scheduler = get_linear_schedule_with_warmup(
            optimizer,
            num_warmup_steps=warmup_steps,
            num_training_steps=training_steps)

        # training
        logger.info('begin training......')
        best_dev_f1 = 0.
        for epoch in range(start_epoch, args.epoch_num + 1):
            model.train()
            model.zero_grad()

            batch_generator = Data.generate_fi_batches(
                dataset=train_dataset,
                batch_size=args.batch_size,
                ifgpu=args.ifgpu)

            for batch_index, batch_dict in enumerate(batch_generator):

                optimizer.zero_grad()

                # q1_a
                f_aspect_start_scores, f_aspect_end_scores = model(
                    batch_dict['forward_asp_query'],
                    batch_dict['forward_asp_query_mask'],
                    batch_dict['forward_asp_query_seg'], 0)
                f_asp_loss = utils.calculate_entity_loss(
                    f_aspect_start_scores, f_aspect_end_scores,
                    batch_dict['forward_asp_answer_start'],
                    batch_dict['forward_asp_answer_end'])
                # q1_b
                b_opi_start_scores, b_opi_end_scores = model(
                    batch_dict['backward_opi_query'],
                    batch_dict['backward_opi_query_mask'],
                    batch_dict['backward_opi_query_seg'], 0)
                b_opi_loss = utils.calculate_entity_loss(
                    b_opi_start_scores, b_opi_end_scores,
                    batch_dict['backward_opi_answer_start'],
                    batch_dict['backward_opi_answer_end'])
                # q2_a
                f_opi_start_scores, f_opi_end_scores = model(
                    batch_dict['forward_opi_query'].view(
                        -1, batch_dict['forward_opi_query'].size(-1)),
                    batch_dict['forward_opi_query_mask'].view(
                        -1, batch_dict['forward_opi_query_mask'].size(-1)),
                    batch_dict['forward_opi_query_seg'].view(
                        -1, batch_dict['forward_opi_query_seg'].size(-1)), 0)
                f_opi_loss = utils.calculate_entity_loss(
                    f_opi_start_scores, f_opi_end_scores,
                    batch_dict['forward_opi_answer_start'].view(
                        -1, batch_dict['forward_opi_answer_start'].size(-1)),
                    batch_dict['forward_opi_answer_end'].view(
                        -1, batch_dict['forward_opi_answer_end'].size(-1)))
                # q2_b
                b_asp_start_scores, b_asp_end_scores = model(
                    batch_dict['backward_asp_query'].view(
                        -1, batch_dict['backward_asp_query'].size(-1)),
                    batch_dict['backward_asp_query_mask'].view(
                        -1, batch_dict['backward_asp_query_mask'].size(-1)),
                    batch_dict['backward_asp_query_seg'].view(
                        -1, batch_dict['backward_asp_query_seg'].size(-1)), 0)
                b_asp_loss = utils.calculate_entity_loss(
                    b_asp_start_scores, b_asp_end_scores,
                    batch_dict['backward_asp_answer_start'].view(
                        -1, batch_dict['backward_asp_answer_start'].size(-1)),
                    batch_dict['backward_asp_answer_end'].view(
                        -1, batch_dict['backward_asp_answer_end'].size(-1)))
                # q_3
                sentiment_scores = model(
                    batch_dict['sentiment_query'].view(
                        -1, batch_dict['sentiment_query'].size(-1)),
                    batch_dict['sentiment_query_mask'].view(
                        -1, batch_dict['sentiment_query_mask'].size(-1)),
                    batch_dict['sentiment_query_seg'].view(
                        -1, batch_dict['sentiment_query_seg'].size(-1)), 1)
                sentiment_loss = utils.calculate_sentiment_loss(
                    sentiment_scores, batch_dict['sentiment_answer'].view(-1))

                # loss
                loss_sum = f_asp_loss + f_opi_loss + b_opi_loss + b_asp_loss + args.beta * sentiment_loss
                loss_sum.backward()
                optimizer.step()
                scheduler.step()

                # train logger
                if batch_index % 10 == 0:
                    logger.info(
                        'Epoch:[{}/{}]\t Batch:[{}/{}]\t Loss Sum:{}\t '
                        'forward Loss:{};{}\t backward Loss:{};{}\t Sentiment Loss:{}'
                        .format(epoch, args.epoch_num, batch_index,
                                batch_num_train, round(loss_sum.item(), 4),
                                round(f_asp_loss.item(), 4),
                                round(f_opi_loss.item(), 4),
                                round(b_asp_loss.item(), 4),
                                round(b_opi_loss.item(), 4),
                                round(sentiment_loss.item(), 4)))

            # validation
            batch_generator_dev = Data.generate_fi_batches(dataset=dev_dataset,
                                                           batch_size=1,
                                                           shuffle=False,
                                                           ifgpu=args.ifgpu)
            f1 = test(model, tokenize, batch_generator_dev, dev_standard,
                      args.inference_beta, logger)
            # save model and optimizer
            if f1 > best_dev_f1:
                best_dev_f1 = f1
                logger.info('Model saved after epoch {}'.format(epoch))
                state = {
                    'net': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'epoch': epoch
                }
                torch.save(state, args.save_model_path)

            # test
            batch_generator_test = Data.generate_fi_batches(
                dataset=test_dataset,
                batch_size=1,
                shuffle=False,
                ifgpu=args.ifgpu)
            f1 = test(model, tokenize, batch_generator_test, test_standard,
                      args.inference_beta, logger)

    else:
        logger.info('Error mode!')
        exit(1)
Example #60
0
def bbox_correct(bbox, example_bboxes):
    for output_bbox in example_bboxes:
        if test_bbox.overlaps(output_bbox):
            return True
    else:
        return False


if __name__ == '__main__':
    combined_dataset = load_inria(
        '/mnt/data/Datasets/pedestrians/INRIA/INRIAPerson')
    nn_im_w = 64
    nn_im_h = 160
    with tf.Session() as sess:
        model = Model.BooleanModel(sess)
        model.load('saved_model/', nn_im_w, nn_im_h)

        image_count = 0
        HOG_TP_count = 0
        HOG_FP_count = 0

        NN_TP_count = 0
        NN_FP_count = 0
        NN_FN_count = 0
        for image, example_bboxes in basic_dataset_iterator(
                combined_dataset.test, 320, 240):
            for test_bbox, confidence in generate_bboxes(image):
                nn_confidence = nn_eval_image(model, image, nn_im_w, nn_im_h)
                reject = (nn_confidence + confidence) / 2 < 0.5