示例#1
0
 def update(self, itemid, data):
     assert(self.table_name != '')
     if data.has_key('imagefile') and len(data.imagefile['value']) > 10:
         data['itemtype'] = self.table_name
         data.Imageid = new_image_id = Image().insert(data) # set Imageid for update
         Image().setItemID(new_image_id, itemid)
     Model.update(self,itemid,data)
	def __init__(self):
		Model.__init__(self)
		self.source = vtk.vtkSphereSource()
		self.source.SetCenter(0.0, 0.0, 0.0)
		self.source.SetRadius(0.5)
		self.source.Update()
		self.Update()
示例#3
0
 def insert(self,data):
     if data.has_key('imagefile'):
         assert(data.imagefile.has_key('filename') and data.imagefile.has_key('value'))
         data.imagefile['filetype'] = data.imagefile['filename'].rpartition('.')[2].lower()
         validated_msg = self._insertValidate(data)
         # 如果validated_msg is not None, 则post的图片数据有错
         if validated_msg is not None:
             raise Exception(validated_msg)
         # 插入数据
         new_id = Model.insert(self,data)
         file_path = '%s%d.%s' % (self._getSaveDir(data), new_id, data.imagefile['filetype'])
         # 更新数据库中的uri字段
         self._getDB().update('update '+self.table_name+' set uri=%s where '+self.table_name+'id=%s' ,('%s%d.%s' % (self._getUriBase(data), new_id, data.imagefile['filetype']) ,new_id))
         # 创建文件夹
         if not os.path.exists(file_path.rpartition('/')[0]):
             os.mkdir(file_path.rpartition('/')[0])
         # 保存图片
         with open(file_path,'w') as f:
             f.write(data.imagefile['value'])
         # 压缩图片
         if data.has_key('ifResize'):
             pass
         else:
             self.resizeImage(file_path)
     else:
         new_id = Model.insert(self, data)
     return new_id
示例#4
0
文件: h2o_DL.py 项目: Aakash282/1ia
	def __init__(self, ID, params):
		Model.__init__(self, ID, params)
		h2o.init()

		datadir = os.path.expanduser('~') +'/FSA/data/'
		trainingFile = datadir + params[1][0]
		valFile = datadir + params[1][1]
		testingFile = datadir + params[1][2]


		self.trainData = h2o.import_file(path=trainingFile)
		self.valData = h2o.import_file(path=valFile)
		#self.valData = self.trainData
		self.testData = h2o.import_file(path=testingFile)

		# print self.trainData.col_names()
		# drop the invalid columns
		self.trainData = self.trainData.drop("away_score").drop("home_score")
		self.valData = self.valData.drop("away_score").drop("home_score")
		self.testData = self.testData.drop("away_score").drop("home_score")

		self.params = params

		if self.params[0] == False:
			self.trainData = self.trainData.drop('spread')
			# self.valData   = self.valData.drop('spread')
			self.testData  = self.testData.drop('spread')

		# for h2o, creating the model is the same as training the model so
		# need to hold of here
		self.model = None
def runFull():
    nbrUserids = KNNSearch.Search([ 1.0 , -1.2,  1.0, 7.79 ], 2000)
    
#     print "Neighbours\n"
#     pprint.pprint(nbrUserids)
    
    split = FeatureSet.featureExtract(nbrUserids)
    
#     print "split\n"
#     pprint.pprint(split)
    
#     testData = getTestData()
#     print "Test Data\n"
#     pprint.pprint(testData);
#     
#     sys.exit(0)
    
    featureSet = split[0][0]
    interested = split[0][1]
    notinterested = split[0][2]
    
    z = [True] * len(featureSet[0])
    w = [True] * len(featureSet[0])
    
    C = 0.03
    #C = 0.3
    model = Model(compress=z, has_none=w, C=C)
    model.fit(featureSet, interested)
    
    testData = getTestData()
    
    result = runModel(model, testData)
    
    print result
 def __init__(self, bactDensity, chemDensity, dt, lamda, d, e):
     self.motility = d
     self.chemSens = lamda * bactDensity/(1+e*bactDensity)
     self.liveCycle = 0
     self.chemProd = bactDensity
     self.chemDegr = 1
     Model.__init__(self, bactDensity, chemDensity, dt)
	def __init__(self, amt, radius):
		Model.__init__(self)
		self.source = vtk.vtkAppendPolyData()
		for i in range(amt):
			opX = 1.0
			opY = 1.0
			opZ = 1.0
			if random() > 0.5:
				opX *= -1.0
			if random() > 0.5:
				opY *= -1.0
			if random() > 0.5:
				opZ *= -1.0
			sRad = 0.25 + ( random() * 0.25 )
			x = float(random() * radius) * opX
			y = float(random() * radius) * opY
			z = float(random() * radius) * opZ
			s = vtk.vtkSphereSource()
			s.SetCenter(x,y,z)
			s.SetRadius(float(sRad))
			s.Update()
			self.source.AddInput(s.GetOutput())
		#add center
		s = vtk.vtkSphereSource()
		s.SetCenter(0.0, 0.0, 0.0)
		s.SetRadius(0.5)
		s.Update()
		self.source.AddInput(s.GetOutput())
		self.Update()
示例#8
0
class Controller():
    '''Main class for controlling the operations of the application'''
    def __init__(self):
        #Create a new Tkinter interface
        self.root = Tk()
        #Set an exit protocol
        self.root.protocol("WM_DELETE_WINDOW", self.exitRoot)

        #Create a model
        self.model = Model()
        self.model.loadConfig() #Load default configuration parameters
        #self.view  = View()

        #Start timer thread
        self.txTimer = TimerThread(self, "tmr")
        #Create joystick interface
        self.jsFrame = JoystickFrame(self.root)
        self.joystick = self.jsFrame.getJSHandle()
        self.statusBox = self.jsFrame.getTextHandle()

        #Initialise a telnet rxtx thread for wireless communication
        self.rxThread = RxTxThread(self,"rxtxthread", self.model.getHost(), self.model.getPort())
        if (self.rxThread.getTN() == 0):
            self.statusBox.setText('Could not establish a connection. Terminating...')
            return
        #Start Threads
        self.rxThread.start()
        self.txTimer.start()

        self.statusBox.setText('Connected\n')

        print self.rxThread.getRXText()

        self.rxThread.checkConnection()
        self.root.mainloop()

    def processMessages(self, messages):
        '''Displays received messages in a window box'''
        for msg in messages:
            self.statusBox.setText(msg + '\n')

    def transmitControl(self):
        '''Transmits the coordinates of the joystick if it it being actuated.
        Not complete in interfacing.'''
        if not self.joystick.isReleased():         #Joystick in use
            spdL,spdR =  self.joystick.getSpeeds() #Retrieve position as speeds
            print spdL, spdR
        if self.jsFrame.keyReady():                # WASD Control
            keyChar = self.jsFrame.getJSKey()      # Retrieve valid keypresses
            self.statusBox.setText("Pressed: "+keyChar+"\n")

            self.rxThread.txCmd(keyChar)           #Transmit typed character

    def exitRoot(self):
        '''Protocol for exiting main application'''
        self.rxThread.txCmd('!') #Stop robot
        self.txTimer.pause(True) #Stop timer
        self.rxThread.stop()     #Stop thread
        self.root.destroy()     
 def __init__(self, controller):
   from collections import OrderedDict
   Model.__init__(self, controller)
   self._currentPlotSet = None
   self._plotSets = OrderedDict()    # key: plotSet ID, value: instance of XYPlotSetModel. We use an OrderedDict so that
                                     # when removing elemetns, we can easily re-select the last-but-one.
   self._lockRepaint = False  # if True, repaint routines are blocked.
   self._plotSetsRepaint = set() # plot waiting for repaint/update while repaint is locked
示例#10
0
	def __init__(self):
		Model.__init__(self)
		self.source = vtk.vtkCubeSource()
		self.source.SetCenter(0.0, 0.0, 0.0)
		self.source.SetXLength(1.0)
		self.source.SetYLength(1.0)
		self.source.SetZLength(1.0)
		self.source.Update()
		self.Update()
示例#11
0
class Controller:
    """ a 'middleman' between the View (visual aspects) and the Model (information) of the application.
        It ensures decoupling between both.
    """

    def __init__(self, app):
        # initialize the model and view
        # * The model handles all the data, and signal-related operations
        # * The view handles all the data visualization
        self.model = Model()
        self.view = View()

        # subscribe to messages sent by the view
        pub.subscribe(self.parse_file, "FILE PATH CHANGED")
        pub.subscribe(self.reprocess_fft, "FFT CONTROLS CHANGED")

        # subscribe to messages sent by the model
        pub.subscribe(self.signal_changed, "SIGNAL CHANGED")
        pub.subscribe(self.signal_changed, "FFT CHANGED")

        self.view.Show()

    def parse_file(self, message):
        """
        Handles "FILE PATH CHANGED" messages, send by the View. It tells the model to parse a new file.
        message.data should contain the path of the new file
        """
        try:
            self.model.parse_file(message.data)

        except Exception as exception:
            self.view.show_exception(
                "Error reading file", "The following error happened while reading the file:\n%s" % str(exception)
            )

    def reprocess_fft(self, message):
        """
        Handler "FFT CONTROLS CHANGED" messages from the View. It tells the model to re-process the fft.
        message.data should contain the array [window, slices, max_peaks]
        """
        self.model.reprocess_fft(*message.data)

    def signal_changed(self, message):
        """
        Handles "SIGNAL CHANGED" messages sent by the model. Tells the view to update itself.
        message is ignored
        """
        self.view.signal_changed(self.model)

    def fft_changed(self, message):
        """
        Handles "FFT CHANGED" messages sent by the model. Tells the view to update itself.
        message is ignored
        """
        self.view.fft_changed(self.model)
示例#12
0
 def __init__(self,root='',database_path='data/',database_name='mydatabase.db'):
     Model.__init__(self,root,database_path,database_name)
     self.name = 'courses'
     self.columns["name"] = 'TEXT'
     self.columns["semester"] = 'TEXT'
     self.columns["type"] = 'TEXT'
     self.columns["lecture_group"] = 'TEXT'
     self.columns["day"] = 'TEXT'
     self.columns["start_time"] = 'TEXT'
     self.columns["end_time"] = 'TEXT'
     self.columns["venue"] = 'TEXT'
示例#13
0
	def generateJson(self,outputDictionary,analyzeData,key):
		inner_json_list = []
		questions = analyzeData[key]
		for question in questions:
			answers = outputDictionary[question]
			dict_of_answers =  dict(Counter(answers))
			DataModel = Model(key,question,dict_of_answers) 	
			DataModelJson = DataModel.toJson()
			inner_json_list.append(DataModelJson)
		inner_json = json.dumps({'data':inner_json_list})	
		return inner_json
示例#14
0
class Controller(object):
  def __init__(self):
    self.model = Model()
    self.view = View()

  def run(self):
    name = self.view.welcome()
    if name == 'Ken':
      print(self.model.say_hello(name))
    else:
      print(self.model.say_bye())
示例#15
0
 def __reBuildViewTree(self):
     """Creates a new Model using the current folder"""
     self.view.filesTree.buttons.set_sensitive(False)
     if len(self.folders) == 1:
         self.model = Model([self.folders[0]], self.view.progressBar)
     else:
         self.model = Model(self.folders, self.view.progressBar, group=True)
     self.saveCache()
     self.__refreshViewTree()
     self.view.vbox.remove(self.view.progressBar)
     self.model.lastUpdate = time.time()
     self.view.filesTree.buttons.set_sensitive(True)
示例#16
0
class Controller:
    def __init__(self):
        self.model = Model()
        self.view = View()
        self.firstTime = True
        self.result = 0.0
        self.a = 0.0
        self.b = 0.0
        self.usePrev = 0
    
    def run(self):
        while True:
            self.view.printMenu()
            selection = self.view.inputSelection()
            if selection <= 0:
                continue
            elif selection == 5:
                self.view.terminate()
                return
            elif not self.firstTime:
                if self.model.isNumber(self.result):
                    self.usePrev = self.view.usePrevious(self.result)
                else:
                    self.usePrev = 0
            else:
                self.firstTime = False
            if self.usePrev == 0:
                # Enter both operands
                self.a = self.view.oneOp(1)
                self.b = self.view.oneOp(2)
            elif self.usePrev == 1:
                # Enter second operand
                self.a = self.result
                self.view.printOp(1, self.a)
                self.b = self.view.oneOp(2)
            elif self.usePrev == 2:
                # Enter first operand
                self.a = self.view.oneOp(1)
                self.b = self.result
                self.view.printOp(2, self.b)
            else:
                # ERROR: Should never reach this block
                self.view.printInvalidArg()
                continue
            self.view.printCalc(selection, self.a, self.b)
            self.result = self.model.calculate(selection, self.a, self.b)
            self.view.printResult(self.result)
            if self.view.anotherOp():
                continue
            else:
                return
示例#17
0
def test1(transition_samples):
    def momentum(current, previous, decay):
        new = current + decay * previous
        return new

    w_init = [-1.3, -1.2, -1, -0.8, -0.8, -1.4, -1.5, -3.0, -2.0, -1.0, -0.3, -0.5, -8.0, -3.0]

    # w_init /=np.linalg.norm(w_init)
    steps = 10
    diff = []
    m = DiscModel()
    model = Model(m, w_init)
    initial_transition = model.transition_f
    policy = caus_ent_backward(model.transition, model.reward_f, 3, steps, conv=0.1, z_states=None)
    start_states = [400, 45, 65, 67, 87, 98, 12, 34, 54, 67, 54, 32, 34, 56, 80, 200, 100, 150]
    # statistics = [generate_test_statistic(policy,model,start_state,steps) for start_state in start_states]
    statistics, dt_states_base = generate_test_statistic(policy, model, start_states, steps)

    model.w = [-1, -1.2, -1, -0.8, -0.8, -4.4, -2, -2.0, -3.0, -1.0, -2.3, -1.5, -4.0, -3.0]
    # model.w =[-2.,-0.6,-4.,-4.,-3.,-5.,-2.,-0.5,-4.,-0.8,-4.,-3.,-5.]
    # model.w /=np.linalg.norm(model.w)
    model.buildRewardFunction()
    if transition_samples != 1:
        model.buildTransitionFunction(transition_samples, learn=False)
    transition_diff = np.sum(np.absolute(initial_transition - model.transition_f))
    initial_transition = 0
    gamma = 0.04
    iterations = 110
    for i in range(iterations):
        policy2 = caus_ent_backward(model.transition, model.reward_f, 1, steps, conv=0.1, z_states=None)
        # gradients = np.array([(statistics[j] - generate_test_statistic(policy,model,start_state,steps)) for j,start_state in enumerate(start_states)])
        state_freq, dt_states_train = generate_test_statistic(policy2, model, start_states, steps)
        gradients = statistics - state_freq
        if i == 0:
            image = np.absolute(dt_states_train - dt_states_base)
            gradient = gradients
        else:
            gradient = momentum(gradients, prev, 0.8)
            image = np.append(image, np.absolute(dt_states_train - dt_states_base), axis=1)
        model.w = model.w * np.exp(-gamma * gradient)
        # model.w /=np.linalg.norm(model.w)
        prev = gradient
        gamma = gamma * 1.04

        model.buildRewardFunction()
        print "Iteration", i
        print "Gradient", gradient
        print "New Weights", model.w
        print "Real weights", w_init
        print "Policy Difference", np.sum(np.sum(np.absolute(policy - policy2)))
        diff.append(np.sum(np.sum(np.absolute(policy - policy2))))
    policy_diff = np.sum(np.sum(np.absolute(policy - policy2)))
    w_diff = np.absolute(w_init - model.w)
    grad = np.sum(np.absolute(gradient))
    return image, diff, grad, w_diff, transition_diff
 def __init__(self,configModel,utils,config,strTrial):
     Model.__init__(self,configModel,utils,strTrial)
     self.configPath = utils.MODEL_CONFIG_PATH   + self.tag + \
                                           '_t' + strTrial
     self.numIter              = config.SVD_NUM_ITER
     self.SVDBufferPath        = utils.SVDFEATURE_BUFFER_BINARY
     self.learningRate         = config.SVD_LEARNING_RATE
     self.regularizationItem   = config.SVD_REGULARIZATION_ITEM
     self.regularizationUser   = config.SVD_REGULARIZATION_USER
     self.regularizationGlobal = config.SVD_REGULARIZATION_GLOBAL
     self.numFactor            = config.SVD_NUM_FACTOR
     self.activeType           = config.SVD_ACTIVE_TYPE
     self.modelOutPath         = utils.SVDFEATURE_MODEL_OUT_PATH
     self.SVDFeatureBinary     = utils.SVDFEATURE_BINARY
     self.SVDFeatureInferBinary= utils.SVDFEATURE_INFER_BINARY
示例#19
0
def run_trial(size=11, graph_type='paper', pi=.5, phi=.5, delta=.5):

  model = Model()
  model.updatePi(pi)
  model.updatePhi(phi)
  model.updateDelta(delta)
  model.updateSize(size)
  typeFunctions = {'ER': model.graphErdosRenyi,
                   'BA': model.graphBarabasiAlbert,
                   'WS': model.graphWattsStrogatz,
                   'paper': model.graphPaperGraph,
                   'cool': model.graphCoolGraph}
  typeFunctions[graph_type]

  return sum(model.calculateEquilibrium()[0])
示例#20
0
 def test_file_input(self):
     self.myFiler = Filer()
     self.myValidator = Validator()
     self.myModel = Model(self.myFiler,self.myValidator)
     self.myFiler.read("TestData.csv")
     self.myModel.toDataSet()
     self.failIfEqual(self.myModel.get_data_set(), None)
示例#21
0
 def getAll(self, env=None):
     imglinks = Model.getAll(self,env)
     image_model = Image()
     for imglink in imglinks:
         img = image_model.get(imglink.Imageid)
         imglink['uri'] = img.uri if img is not None else ''
     return imglinks
示例#22
0
文件: User.py 项目: ajiexw/old-zarkpy
    def insert(self, data):
        new_id = Model.insert(self, data)
        if data.has_key('imagefile'):
            self._savePortraitImg(new_id, data.imagefile)
            self._setPortraitDefault(new_id)

        return new_id
示例#23
0
文件: User.py 项目: ajiexw/old-zarkpy
    def update(self, item_id, data):
        from controller import ImageConvert #  放到这里是为了避免在import model时中途import controller.* 导致import依赖
        # 成为达人时同步到第三方网站, 这是逻辑代码, 但是放到这里了
        old_item = self.get(item_id)
        daren_title = data.get('daren_title','').encode('utf-8','ignore').strip()
        if old_item and old_item.has_key('daren_title') and (old_item.daren_title.strip() != daren_title) and len(daren_title) > 0:
            user = self.get(item_id)
            if user and user.cover_url:
                pic = ImageConvert().getSmallPortrait(user.cover_url, user.small_portrait, 150)
            else:
                pic = None

            getController('Share').shareAll(old_item.Userid,
                    '【来凹凹啦, 找到最漂亮的自己】@凹凹啦 美妆前沿 我是凹凹啦美妆达人了, 我分享的美妆心得都在这里哦! 求围观, 求关注! 我的美妆主页',
                    url='/user/'+str(old_item.Userid), pic=pic )
            # 设置用户的达人勋章
            if user:
                self._getDB().delete("delete from UserHasMedal where Userid = %s and Medalid in (select Medalid from Medal where name like 'daren_')", (user.Userid, ))
                daren_medal_id = self._getDB().fetchFirst("select Medalid from Medal where name=%s ", ('daren_' + daren_title,))
                if daren_medal_id:
                    self._getDB().insert('insert into UserHasMedal (Userid, Medalid ) values (%s, %s);', (user.Userid, daren_medal_id))
            

        # 更新图片
        ret = Model.update(self, item_id, data)
        if data.has_key('imagefile'):
            self._savePortraitImg(item_id, data.imagefile)
            self._setPortraitDefault(item_id)
        if data.has_key('imagefile') or data.has_key('small_portrait') or data.has_key('big_portrait'):
            self._removeSmallFiles(item_id)

        return ret
 def test_get_gender(self):
     self.myModel = Model()
     self.myModel.data_handler.read_in("TestData.csv")
     self.expected = 2
     self.failUnlessEqual(len(self.myModel.get_gender()), self.expected)
     self.failUnlessEqual(self.myModel.get_gender()[0], 2)
     self.failUnlessEqual(self.myModel.get_gender()[1], 5)
示例#25
0
    def __init__(self, fit_configuration, comm=None, doParallelParticles = False, debug=False):


        self.model = Model( fit_configuration, fit_manager=self)
        self.num_waveforms = self.model.num_waveforms
        self.num_det_params = self.model.num_det_params

        if comm is None:
            self.comm = MPI.COMM_WORLD
        else:
            self.comm = comm
        self.rank = self.comm.Get_rank()
        self.num_workers = self.comm.Get_size() - 1

        self.doParallelParticles = doParallelParticles

        self.tags = self.enum('CALC_LIKE', 'CALC_WF', 'EXIT')

        self.numCalls = 0
        self.LastMem = memory_usage_psutil()

        self.debug = debug
        self.debug_mem_file = "memory_info.txt"

        self.num_wf_params = fit_configuration.num_wf_params
示例#26
0
class Kontroler(Thread):
    def __init__(self, kolejka_zdarzen, kolejka_makiet):
        Thread.__init__(self)
        self._kolejka_zdarzen = kolejka_zdarzen
        self.kolejka_makiet = kolejka_makiet
        self._model = Model()
        self._stworz_mape_dzialania()

    def _stworz_mape_dzialania(self):
        self._zdarzenie2strategia = {
            type( ZdarzenieKoniec() ) : StrategiaKoniec(self._model),
            type( ZdarzenieOblicz(None, None) ) : StrategiaOblicz(self._model),
        }

    def run(self):
        while True:
            zdarzenie = self._kolejka_zdarzen.get()
            strategia = self._zdarzenie2strategia[type(zdarzenie)]
            try:
                strategia.update(zdarzenie)
            except (Exception) as exc:
                print(exc)
                continue
            if self._model.koniec == True:
                return
            makieta = self._model.pobierz_makiete()
            print "utworzono makiete {0}".format(makieta.dane)
            self.kolejka_makiet.put(makieta)
 def __init__(self, path):
     self.myModel = Model()
     if path != "" or path is not None:
         self.load_data(path)
     self.myView = View(self)
     self.myModel.data_handler.load_pickle_data()
     self.myView.cmdloop()
示例#28
0
    def __init__(self):
        #Create a new Tkinter interface
        self.root = Tk()
        #Set an exit protocol
        self.root.protocol("WM_DELETE_WINDOW", self.exitRoot)

        #Create a model
        self.model = Model()
        self.model.loadConfig() #Load default configuration parameters
        #self.view  = View()

        #Start timer thread
        self.txTimer = TimerThread(self, "tmr")
        #Create joystick interface
        self.jsFrame = JoystickFrame(self.root)
        self.joystick = self.jsFrame.getJSHandle()
        self.statusBox = self.jsFrame.getTextHandle()

        #Initialise a telnet rxtx thread for wireless communication
        self.rxThread = RxTxThread(self,"rxtxthread", self.model.getHost(), self.model.getPort())
        if (self.rxThread.getTN() == 0):
            self.statusBox.setText('Could not establish a connection. Terminating...')
            return
        #Start Threads
        self.rxThread.start()
        self.txTimer.start()

        self.statusBox.setText('Connected\n')

        print self.rxThread.getRXText()

        self.rxThread.checkConnection()
        self.root.mainloop()
示例#29
0
文件: area.py 项目: hekevintran/Rpg
	def getNeighbourgFromDirection(idArea, direction):
		"""
		area.model.getNeighbourgFromDirection(idArea, direction) -> dict()

		Returns the neighbourg of the area given in arguments from a given
		direction.

		@param idArea integer id of the reference area
		@direction string direction (from the reference area) of the area to
		return, must be a value of area.directions.

		@return dict informations of the found area, empty dict if not found.
		"""
		if direction not in (directions):
			raise exception('Unknown direction')

		query = "\
			SELECT\
				ad.id_area,\
				ad.id_region,\
				ad.id_next_area_north,\
				ad.id_next_area_east,\
				ad.id_next_area_south,\
				ad.id_next_area_west\
			FROM\
				area AS ad\
				JOIN area AS ap ON ad.id_area = ap.id_next_area_%s\
			WHERE\
				ap.id_area = ?\
		" % direction

		return Model.fetchOneRow(query, [idArea])
示例#30
0
 def __init__(self, path):
     self.myModel = Model(Filer(), Validator())
     if path != "" or path is not None:
         self.load_data(path)
     self.myView = View(self)
     self.myModel.pickle_data()
     self.myView.cmdloop()
示例#31
0
import torch
from dataset import Data
import Criterion
from RNN import RNN
from Model import Model
""" Currently implemented without momentum """
device = 'cpu'

trainData = Data(test=False, m_train=1184, D=154)
criterion = Criterion.Criterion()
layer = RNN(154, 128)
classifier = Model(layer)

batch_size = 1
epochs = 40
alpha = 0.005  # generally use high Learning rate in RNN since vanishing gradients
# 0.01 was showing good results in overfit data
for epoch in range(epochs):
    correct = 0
    count = 0
    totloss = 0
    tot2loss = 0
    if (epoch >= 7):
        alpha = 0.002
    if (epoch >= 25):
        alpha = 0.001
    for i in range(0, trainData.m, batch_size):  # CHANGED
        # print(i)
        # print("Whh", layer.Whh)
        # print("Wxh", layer.Wxh)
        # print("Why", layer.Why)
示例#32
0
from Model import Model
import numpy as np
from Intermediary import Intermediary

transformation_matrix = [[0.00257904, 0.0, 0.0, 0.0],
                         [0.0, 0.00257904, 0.0, 0.0],
                         [0.0, 0.0, 0.00257904, 0.0], [0.0, 0.0, 0.0, 1.0]]

project_matrix = np.identity(4)
project_matrix[2][2] = 0
model_matrix = np.identity(4)
figure = Intermediary(Model('olen'), project_matrix, model_matrix)

new_verts = [
    *map(lambda vert: vert @ transformation_matrix, figure.model.vertices)
]

with open('obj/deer.obj', 'w', encoding='utf8') as file:
    for vert in new_verts:
        file.write(
            f'v {vert[0]:.8f} {vert[1]:.8f} {vert[2]:.8f} {vert[3]:.8f}\n')
    file.write('\n')
    for face in figure.model.faces:
        file.write(f'f {face[0]} {face[1]} {face[2]}\n')
    file.write('\n')
示例#33
0
from Dataset import Dataset
from Model import Model

dataset = Dataset('./data/headlines_train.json')

k_fold = KFold(n_splits=5)
k = 0

for train_index, test_index in k_fold.split(dataset.dataset['title']):
    x_train, x_test = np.array(
        dataset.dataset['title'])[train_index], np.array(
            dataset.dataset['title'])[test_index]
    y_train, y_test = np.array(
        dataset.dataset['sentiment'])[train_index], np.array(
            dataset.dataset['sentiment'])[test_index]

    model = Model(use_glove=True)
    val_size = 0.1
    x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                      y_train,
                                                      test_size=val_size)
    model.train(x_train, y_train, x_val, y_val, epochs=50)
    if val_size > 0.0:
        model.load_model(path=str(model.log_dir) + '/best_model.hdf5',
                         dict_path='./checkpoints/tokenizer.pickle')
    else:
        model.load_model(path='./checkpoints/model',
                         dict_path='./checkpoints/tokenizer.pickle')

    print(accuracy_score(y_true=y_test, y_pred=model.predict_classes(x_test)))
示例#34
0
    Xtrain = Xtrain[test[5000:], :, :, :]

    Ytrain = torchfile.load(args.ytrain)
    Ytrain = torch.from_numpy(Ytrain).long().unsqueeze(1)

    Ytest = Ytrain[test[0:5000], :]
    Ytrain = Ytrain[test[5000:], :]

    noTrain = Xtrain.shape[0]

    batchSize = args.b
    epochs = args.e
    alpha = args.a
    moment = 0.9

    myModel = Model(moment)
    myModel.addLayer(Flatten())
    myModel.addLayer(Linear(108 * 108, 80))
    myModel.addLayer(batchNorm())
    myModel.addLayer(sigactiv())
    myModel.addLayer(Dropout(0.7))
    myModel.addLayer(Linear(80, 20))
    myModel.addLayer(batchNorm())
    myModel.addLayer(sigactiv())
    myModel.addLayer(Linear(20, 10))
    myModel.addLayer(batchNorm())
    myModel.addLayer(sigactiv())
    myModel.addLayer(Linear(10, 6))
    criterion = Criterion()

    if args.loadModel:
示例#35
0
import pygame
from Model import Model

stickdummy_image = pygame.image.load("Pictures/Gjlotnok_All.png")
blue_stickdummy_image = pygame.image.load("Pictures/Shirokov_All.png")

StickDummy = Model(stickdummy_image, "StickDummy")
BlueStickDummy = Model(blue_stickdummy_image, "StickDummy")
示例#36
0
validationdata = btcdata[int(TOTAL * TRAIN_RATIO):int(TOTAL * TRAIN_RATIO) +
                         int(TOTAL * VALIDATION_RATIO)]
testdata = btcdata[int(TOTAL * TRAIN_RATIO) + int(TOTAL * VALIDATION_RATIO):]

# hyperparameters
PERIOD_LENGTH = 43200
EPISODES = (len(traindata) //
            PERIOD_LENGTH) * 100 if len(traindata) > PERIOD_LENGTH else 100

# set model, agent, environment
VALIDATION_KRW_SEED = 1000000
VALIDATION_BTC_SEED = 0.01
TEST_KRW_SEED = 1000000
TEST_BTC_SEED = 0.01

model = Model(input_size=7, hidden_size=128, output_size=1)
trainenv = DataEnvironment(data=traindata,
                           pick_random_period=True,
                           PERIOD_LENGTH=100)
validationenv = DataEnvironment(data=validationdata,
                                pick_random_period=False,
                                KRW_SEED=VALIDATION_KRW_SEED,
                                BTC_SEED=VALIDATION_BTC_SEED)
testenv = DataEnvironment(data=testdata,
                          pick_random_period=False,
                          KRW_SEED=TEST_KRW_SEED,
                          BTC_SEED=TEST_BTC_SEED)
agent = Agent(model, trainenv, validationenv, testenv)

model.to(device)
def cozmo_program(robot: cozmo.robot.Robot):
    robot.set_lift_height(1.0).wait_for_completed()
    robot.camera.color_image_enabled = True
    robot.add_event_handler(cozmo.camera.EvtNewRawCameraImage, handle_image)
    print("Added event handler")
    #robot.say_text("purple").wait_for_completed()
    while True:
        global speak
        if speak == True:
            robot.say_text(prediction).wait_for_completed()
            speak = False
        time.sleep(0.1)


model = Model(path='../cs481-senior-design/f18/data/coco2014',
              jpegs='../cs481-senior-design/f18/train2014',
              bb_csv='../cs481-senior-design/f18/data/coco2014/tmp/bb.csv')

with open('../cs481-senior-design/s19/language-model.pickle', 'rb') as handle:
    predModel = pickle.load(handle)  #trying to load a LanguageModel type

imageQueue = queue.Queue(maxsize=1)
imageBuffer = collections.deque(maxlen=8)
speak = False
predicting = False
prediction = ''

threading.Thread(target=detectImages).start()
threading.Thread(target=key_listener).start()

cozmo.run_program(cozmo_program, use_viewer=True)
示例#38
0
                    alpha=ALPHA,
                    activation=Tanh(),
                    last_layer=False)
l7 = FeedbackFC(size=[14 * 14 * 64, 128],
                num_classes=10,
                sparse=sparse,
                rank=args.rank)

l8 = FullyConnected(size=[128, 10],
                    num_classes=10,
                    init_weights=args.init,
                    alpha=ALPHA,
                    activation=Linear(),
                    last_layer=True)

model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8])

##############################################

predict = model.predict(X=XTEST)

if args.dfa:
    grads_and_vars = model.dfa(X=XTRAIN, Y=YTRAIN)
else:
    grads_and_vars = model.train(X=XTRAIN, Y=YTRAIN)

if args.opt == "adam":
    optimizer = tf.train.AdamOptimizer(
        learning_rate=ALPHA, beta1=0.9, beta2=0.999,
        epsilon=1.0).apply_gradients(grads_and_vars=grads_and_vars)
elif args.opt == "rms":
def main():
    global optimizer, lr_reducer, views, epoch, pipeline
    # Read configuration file
    parser = argparse.ArgumentParser()
    parser.add_argument("experiment_name")
    arguments = parser.parse_args()

    cfg_file_path = os.path.join("./experiments", arguments.experiment_name)
    args = configparser.ConfigParser()
    args.read(cfg_file_path)

    seed=args.getint('Training', 'RANDOM_SEED')
    if(seed is not None):
        torch.manual_seed(seed)
        #torch.use_deterministic_algorithms(True) # Requires pytorch>=1.8.0
        #torch.backends.cudnn.deterministic = True
        np.random.seed(seed=seed)
        ia.seed(seed)
        random.seed(seed)

    model_seed=args.getint('Training', 'MODEL_RANDOM_SEED', fallback=None)
    if(model_seed is not None):
        torch.manual_seed(model_seed)

    # Prepare rotation matrices for multi view loss function
    eulerViews = json.loads(args.get('Rendering', 'VIEWS'))
    views = prepareViews(eulerViews)

    # Set the cuda device
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Handle loading of multiple object paths
    try:
        model_path_loss = json.loads(args.get('Dataset', 'MODEL_PATH_LOSS'))
    except:
        model_path_loss = [args.get('Dataset', 'MODEL_PATH_LOSS')]

    # Set up batch renderer
    br = BatchRender(model_path_loss,
                     device,
                     batch_size=args.getint('Training', 'BATCH_SIZE'),
                     faces_per_pixel=args.getint('Rendering', 'FACES_PER_PIXEL'),
                     render_method=args.get('Rendering', 'SHADER'),
                     image_size=args.getint('Rendering', 'IMAGE_SIZE'),
                     norm_verts=args.getboolean('Rendering', 'NORMALIZE_VERTICES'))

    # Set size of model output depending on pose representation - deprecated?
    pose_rep = args.get('Training', 'POSE_REPRESENTATION')
    if(pose_rep == '6d-pose'):
        pose_dim = 6
    elif(pose_rep == 'quat'):
        pose_dim = 4
    elif(pose_rep == 'axis-angle'):
        pose_dim = 4
    elif(pose_rep == 'euler'):
        pose_dim = 3
    else:
        print("Unknown pose representation specified: ", pose_rep)
        pose_dim = -1

    # Initialize a model using the renderer, mesh and reference image
    model = Model(num_views=len(views),
                  weight_init_name=args.get('Training', 'WEIGHT_INIT_NAME', fallback=""))
    model.to(device)

    # Create an optimizer. Here we are using Adam and we pass in the parameters of the model
    low_lr = args.getfloat('Training', 'LEARNING_RATE_LOW')
    high_lr = args.getfloat('Training', 'LEARNING_RATE_HIGH')
    optimizer = torch.optim.Adam(model.parameters(), lr=low_lr)
    lr_reducer = OneCycleLR(optimizer, num_steps=args.getfloat('Training', 'NUM_ITER'), lr_range=(low_lr, high_lr))

    # Prepare output directories
    output_path = args.get('Training', 'OUTPUT_PATH')
    prepareDir(output_path)
    shutil.copy(cfg_file_path, os.path.join(output_path, cfg_file_path.split("/")[-1]))

    # Setup early stopping if enabled
    early_stopping = args.getboolean('Training', 'EARLY_STOPPING', fallback=False)
    if early_stopping:
        window = args.getint('Training', 'STOPPING_WINDOW', fallback=10)
        time_limit = args.getint('Training', 'STOPPING_TIME_LIMIT', fallback=10)
        window_means = []
        lowest_mean = np.inf
        lowest_x = 0
        timer = 0

    # Load checkpoint for last epoch if it exists
    model_path = latestCheckpoint(os.path.join(output_path, "models/"))
    if(model_path is not None):
        model, optimizer, epoch, lr_reducer = loadCheckpoint(model_path)

    if early_stopping:
        validation_csv=os.path.join(output_path, "validation-loss.csv")
        if os.path.exists(validation_csv):
            with open(validation_csv) as f:
                val_reader = csv.reader(f, delimiter='\n')
                val_loss = list(val_reader)
            val_losses = np.array(val_loss, dtype=np.float32).flatten()
            for epoch in range(window,len(val_loss)):
                timer += 1
                w_mean = np.mean(val_losses[epoch-window:epoch])
                window_means.append(w_mean)
                if w_mean < lowest_mean:
                    lowest_mean = w_mean
                    lowest_x = epoch
                    timer = 0


    # Prepare pipeline
    encoder = Encoder(args.get('Dataset', 'ENCODER_WEIGHTS')).to(device)
    encoder.eval()
    pipeline = Pipeline(encoder, model, device)

    # Handle loading of multiple object paths and translations
    try:
        model_path_data = json.loads(args.get('Dataset', 'MODEL_PATH_DATA'))
        translations = np.array(json.loads(args.get('Rendering', 'T')))
    except:
        model_path_data = [args.get('Dataset', 'MODEL_PATH_DATA')]
        translations = [np.array(json.loads(args.get('Rendering', 'T')))]

    # Prepare datasets
    bg_path = "../../autoencoder_ws/data/VOC2012/JPEGImages/"
    training_data = DatasetGenerator(args.get('Dataset', 'BACKGROUND_IMAGES'),
                                     model_path_data,
                                     translations,
                                     args.getint('Training', 'BATCH_SIZE'),
                                     "not_used",
                                     device,
                                     sampling_method = args.get('Training', 'VIEW_SAMPLING'),
                                     max_rel_offset = args.getfloat('Training', 'MAX_REL_OFFSET', fallback=0.2),
                                     augment_imgs = args.getboolean('Training', 'AUGMENT_IMGS', fallback=True),
                                     seed=args.getint('Training', 'RANDOM_SEED'))
    training_data.max_samples = args.getint('Training', 'NUM_SAMPLES')

    # Load the validationset
    validation_data = loadDataset(json.loads(args.get('Dataset', 'VALID_DATA_PATH')),
                                  args.getint('Training', 'BATCH_SIZE'))
    print("Loaded validation set!")

    # Start training
    while(epoch < args.getint('Training', 'NUM_ITER')):
        # Train on synthetic data
        model = model.train() # Set model to train mode
        loss = runEpoch(br, training_data, model, device, output_path,
                          t=translations, config=args)
        append2file([loss], os.path.join(output_path, "train-loss.csv"))
        append2file([lr_reducer.get_lr()], os.path.join(output_path, "learning-rate.csv"))

        # Test on validation data
        model = model.eval() # Set model to eval mode
        val_loss = runEpoch(br, validation_data, model, device, output_path,
                          t=translations, config=args)
        append2file([val_loss], os.path.join(output_path, "validation-loss.csv"))

        # Plot losses
        val_losses = plotLoss(os.path.join(output_path, "train-loss.csv"),
                 os.path.join(output_path, "train-loss.png"),
                 validation_csv=os.path.join(output_path, "validation-loss.csv"))
        print("-"*20)
        print("Epoch: {0} - train loss: {1} - validation loss: {2}".format(epoch,loss,val_loss))
        print("-"*20)
        if early_stopping and epoch >= window:
            timer += 1
            if timer > time_limit:
                # print stuff here
                print()
                print("-"*60)
                print("Validation loss seems to have plateaued, stopping early.")
                print("Best mean loss value over an epoch window of size {} was found at epoch {} ({:.8f} mean loss)".format(window, lowest_x, lowest_mean))
                print("-"*60)
                break
            w_mean = np.mean(val_losses[epoch-window:epoch])
            window_means.append(w_mean)
            if w_mean < lowest_mean:
                lowest_mean = w_mean
                lowest_x = epoch
                timer = 0
        epoch = epoch+1
示例#40
0
from Data import X_train, y_train
from Model import Model

epochs = 10
batch_size = 64
iterations = len(y_train) * epochs

tf.reset_default_graph()

dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
# Generate the complete Dataset required in the pipeline
dataset = dataset.repeat(epochs).batch(batch_size)
iterator = dataset.make_one_shot_iterator()

data_X, data_y = iterator.get_next()
data_y = tf.cast(data_y, tf.int32)
model = Model(data_X, data_y)

with tf.Session() as sess, tqdm(total=iterations) as pbar:
    sess.run(tf.global_variables_initializer())

    tot_accuracy = 0
    try:
        while True:
            accuracy, _ = sess.run([model.accuracy, model.optimizer])
            tot_accuracy += accuracy
            pbar.update(batch_size)
    except tf.errors.OutOfRangeError:
        pass

print('\nAverage training accuracy: {:.4f}'.format(tot_accuracy / iterations))
示例#41
0
        not_prbs=True)
    '''
    array=np.zeros(10000)
    i=0
    for (i,(u1,y1)) in enumerate(zip(uArray,yArray)):
        z1 = y1-u1
        for (j,(u2,y2)) in enumerate(zip(uArray,yArray)):
            z2 = y2-u2
            array[i*100+j] = sum(scipy.signal.correlate(z1,z2,mode='full'))
            
    '''
    print("--- %s seconds ---" % (time.time() - start_time))

    # These two lines are for training the model based on nstep and the sig data
    # Only uncomment if you want to train and not predict
    trainModel = Model()
    trainModel.load_and_train(sig,
                              epochs=1000,
                              batchSize=batchSize,
                              saveModel=False,
                              plotLoss=bool(plots != 0),
                              plotVal=bool(plots != 0))
    print("--- %s seconds ---" % (time.time() - start_time))
'''
# In this case, since we are only loading the model, not trying to train it,
# we can use function simulate and preprocess 
xData,yData = sig.MIMO_validation()

# Initialize the models that are saved using the parameters declared above
predictor = Model(nstep)
predictor.load_MIMO()
示例#42
0
from Model import Model
import Process
from numpy import random as rd
import numpy as np

n_params = 10

model = Model(n_params, (1, 2, 3),
              dense_activation='relu',
              n_upsample=5,
              n_conv=10,
              conv_kernels=[8, 16, 32, 64, 32, 32, 16, 16, 8, 8])

model.load('model.h5')
data, labels = Process.get_data_for_abstract('./abs_1',
                                             size=(736, 736),
                                             n_params=n_params)
print(labels)

data = Process.normalize(data)
for var in range(100):
    print(var)
    model.train(labels, data, n_iterations=1)
    if var % 50 == 0:
        i = model.generate(rd.rand(10, n_params)[0])
        i = Process.normalize(i, mode='back')
        Process.write_image_data(i[0], 'progress.jpg')

model.save('model.h5')

for variation in range(10):
示例#43
0
def main():
    "main function"
    parser = argparse.ArgumentParser()
    parser.add_argument('--train', help='train the NN', action='store_true')
    parser.add_argument('--validate',
                        help='validate the NN',
                        action='store_true')
    parser.add_argument('--decoder',
                        choices=['bestpath', 'beamsearch', 'wordbeamsearch'],
                        default='bestpath',
                        help='CTC decoder')
    parser.add_argument('--batch_size',
                        help='batch size',
                        type=int,
                        default=100)
    parser.add_argument('--data_dir',
                        help='directory containing IAM dataset',
                        type=Path,
                        required=False)
    parser.add_argument('--fast',
                        help='use lmdb to load images',
                        action='store_true')
    parser.add_argument('--dump',
                        help='dump output of NN to CSV file(s)',
                        action='store_true')
    args = parser.parse_args()
    print("\n********")
    print(args)
    print("********\n")

    # set chosen CTC decoder
    if args.decoder == 'bestpath':
        decoderType = DecoderType.BestPath
    elif args.decoder == 'beamsearch':
        decoderType = DecoderType.BeamSearch
    elif args.decoder == 'wordbeamsearch':
        decoderType = DecoderType.WordBeamSearch

    # train or validate on IAM dataset
    if args.train or args.validate:
        # load training data, create TF model
        loader = DataLoaderIAM(args.data_dir, args.batch_size, Model.imgSize,
                               Model.maxTextLen, args.fast)

        # save characters of model for inference mode
        open(FilePaths.fnCharList, 'w').write(str().join(loader.charList))

        # save words contained in dataset into file
        open(FilePaths.fnCorpus, 'w').write(
            str(' ').join(loader.trainWords + loader.validationWords))

        # execute training or validation
        if args.train:
            model = Model(loader.charList, decoderType)
            train(model, loader)
        elif args.validate:
            model = Model(loader.charList, decoderType, mustRestore=True)
            validate(model, loader)

    # infer text on test image
    else:
        model = Model(open(FilePaths.fnCharList).read(),
                      decoderType,
                      mustRestore=True,
                      dump=args.dump)
        infer(model, FilePaths.fnInfer)
示例#44
0
#!/usr/local/bin/python
from Model import Model
from Executor import Executor


if __name__ == '__main__':
    model = Model()
    model.assemble_graph()

    silence_step = 0
    skip_step = 20

    exe = Executor(model, silence_step=silence_step, skip_step=skip_step)

    exe.train_and_dev()
    exe.restore_and_test()
示例#45
0


x_train_,y_train_ = TrainData.get_minibatch_tensors()
x_valdn_,y_valdn_ = ValdnData.get_minibatch_tensors()

phase_train_ = tf.placeholder(dtype = tf.bool,shape=[])

x_ = tf.where(phase_train_, x_train_, x_valdn_)
y_ = tf.where(phase_train_, y_train_, y_valdn_)



#x_ = tf.placeholder(tf.float32, [None,32,32,3])
#y_ = tf.placeholder(tf.int32,[None])
M = Model()
#optimizer = tf.train.GradientDescentOptimizer(args.lr)
optimizer = tf.train.AdamOptimizer(args.lr)
y_logits_ , _= M.inference(x_)
loss_ = Losses.cross_entropy(labels = y_, logits = y_logits_)
train_op_ = optimizer.minimize(loss_)

correct_prediction = tf.equal(y_, tf.cast(tf.argmax(y_logits_, 1),dtype=tf.int32))
accuracy_ = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()

with tf.Session() as sess:
  init = tf.global_variables_initializer()
  coord = tf.train.Coordinator()
  threads = tf.train.start_queue_runners(sess=sess, coord=coord)
 
示例#46
0
import os, sys
sys.path = ['./utils/', './config/'] + sys.path

from collections import OrderedDict

from Model import Model
from Simulation import Simulation
from Atlas import Atlas

#import config_ARMCU as config
import config_RICO as config

dir_path = os.path.dirname(os.path.realpath(__file__))

model = Model(name='arp631.diag',
              binVersion='arp631.diag',
              levgrid='L91',
              tstep=300)

#simulations = [
#              Simulation(name='CMIP6',      model=model,case='ARMCU',subcase='REF',line='r'),
#              Simulation(name='CMIP6.LPBLE',model=model,case='ARMCU',subcase='REF',line='b'),
#              ]

#atlas = Atlas('test2',references=config.references,simulations=simulations,root_dir=dir_path)
#atlas.init_from_dict(config.diagnostics)
#atlas.info(references=True,simulations=True,groups=True)
#atlas.run()
#atlas.topdf()
#atlas.tohtml()

simulations = [
示例#47
0
def main():

    ###Parse arguments using docopt docstring
    arguments = docopt(__doc__)

    ###Remote directory location of train and test files
    remoteDir = "http://yann.lecun.com/exdb/mnist/"

    ###Extract absolute directory path of this script
    script_dir = os.path.dirname(__file__)  # <-- absolute dir the script is in

    ###Get output directory from user-specified argument
    dataDir = os.path.join(os.path.dirname(script_dir),
                           arguments['<dataset-dir>'])

    ###Get directory for model files
    modelDir = os.path.join(os.path.dirname(script_dir), "Models")

    ###Download dataset to the directory specified
    if (arguments['download']):

        ###Create data directory if doesn't exist
        try:
            os.mkdir(dataDir)
        except:
            print("Directory already exists, not creating...")

        urls = [
            "train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
            "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"
        ]
        for i in range(0, len(urls)):
            urls[i] = remoteDir + "/" + urls[i]

        ###Local directory location of each of the files
        filenames = [
            "Train.gz", "Train_Labels.gz", "Test.gz", "Test_Labels.gz"
        ]
        for i in range(0, len(filenames)):
            filenames[i] = dataDir + "/" + filenames[i]

        ###Run load data function from download
        load_data(urls, filenames)

    elif (arguments['train']):

        ###Create dataset object using directory and partition amount in arguments
        data = Dataset(os.path.join(dataDir, "Train.gz"),
                       os.path.join(dataDir, "Train_Labels.gz"),
                       partition=float(arguments['--split-percent'][0]))

        ###Create model object from training data
        mdl = Model(data, arguments['<model-name>'])

        ###Create model file path
        modelFile = os.path.join(modelDir,
                                 arguments['<model-description-file>'])

        ###Train the model based upon the specified model file
        h = mdl.train(modelFile)

        ###Apply the model to the dev set
        r = mdl.test()

        ###Print results to output file
        print("Dev Set Loss: " + str(r[0]) + ", Dev Set Accuracy: " +
              str(r[1]))

        ###Save the model to a serialized Keras file
        mdl.saveModel()

        ###Load each file and write test set results to file
        try:
            ###Create and open file for writing training and validation accuracies
            file = open(
                os.path.join(
                    os.path.join(os.path.dirname(script_dir), "Model_Output"),
                    arguments['<model-name>'] + ".txt"), "w")

            ###Write accuracies to file
            file.write("Training Loss\t" + str(h[0]) +
                       "\nTraining Accuracy\t" + str(h[1]) +
                       "\nValidation Loss\t" + str(r[0]) +
                       "\nValidation Accuracy\t" + str(r[1]))

            ###Flush output to the file
            file.flush()

            ###Close the file for writing
            file.close()
        except:
            print(
                "Unable to complete output of training results please ensure that your model name was properly specified without any slashes, dashes, or dots"
            )

    elif (arguments['test']):

        ###Load the testing dataset
        data = Dataset(os.path.join(dataDir, "Test.gz"),
                       os.path.join(dataDir, "Test_Labels.gz"), 0)

        modelNames = arguments['<model-names>']
        modelNames = modelNames.split(",")

        ###Load each file and write test set results to file
        try:
            file = open(arguments['<comparison-name>'] + ".txt", "w")
            file.write("Model_Name\tTest_Loss\tTest_Accuracy\n")
            ###Loop through each model
            for name in modelNames:

                file.write(name + "\t")
                ###Create the model object from the specified
                mdl = Model(data, name)

                ###Load model from serialized model file
                mdl.loadModel()

                ###Get Output statistics on the testing set
                result = mdl.test()

                file.write(str(result[0]) + "\t" + str(result[1]) + "\n")

            file.flush()

            file.close()
        except:
            print(
                "Unable to complete test results, please ensure that all of your model files exist in the Model_Output folder"
            )

    elif (arguments['explore']):

        ###Create dataset object using directory and partition amount in arguments
        data = Dataset(os.path.join(dataDir, "Train.gz"),
                       os.path.join(dataDir, "Train_Labels.gz"),
                       partition=float(arguments['--split-percent'][0]))

        ###Create model object from training data
        mdl = Model(data, arguments['<model-name>'])

        ###Convert the full arguments list to only those relevant to parameters
        hyperparams = convertParams(arguments)

        ###Explore hyperparameter space
        if len(arguments['--file']) > 0:
            mdl.exploreByFile(arguments['--file'][0])
        else:
            result = mdl.explore(hyperparams)
示例#48
0
from Model import Model
from Dataset import Dataset

np.random.seed(123)

N_train = 5000
lr = 1e-6
N_epochs = 50000
N_hidden = [64, 32, 16]
act_funcs = [tf.nn.relu, tf.nn.relu, tf.nn.relu, lambda x: x]
#act_funcs = [lambda x: x, lambda x: x]
sigma_1 = 1e-1
sigma_m = 1e-1

data = Dataset(N_train)
model = Model(lr, N_epochs, data, N_hidden, act_funcs, sigma_1, sigma_m)

#model.fig = plt.figure()
#ax = model.fig.add_subplot(1,1,1)
#model.fig.canvas.draw()
#model.h1, = ax.plot(np.arange(model.N_params), np.zeros(model.N_params), 'r')
#ax.set_ylim([-10, 10])
#ax.set_xlim([0,model.N_params])

loss_lst, val_loss_lst, log_post_list, log_prior_list, log_likelihood_list = model.train(
)

x_pred = np.linspace(-0.2, 1.2, 100)
x_pred_tf = x_pred[:, np.newaxis]

y_pred = model.predict(x_pred_tf, N_samples=100)
示例#49
0
'''Controller dell'applicazione web 'Insegnamenti'
Formattazione salva righe per i lucidi!
@author: posenato'''

import logging
from flask import Flask, request
from flask.templating import render_template
from Model import Model

logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)  # Applicazione Flask!
app.jinja_env.line_statement_prefix = '#'  # attivo Line statements in JINJA

app.model = Model()
app.facolta = app.model.getFacolta("Scienze Matematiche Fisiche e Naturali")


@app.route('/')
def homePage():
	'''Home page deve presentare form per la scelta corso studi e
	anno accademico tra i corsi della facoltà di Scienze MM FF NN.'''
	corsiStudi = app.model.getCorsiStudi(app.facolta['id'])
	aA = app.model.getAnniAccademici(app.facolta['id'])
	return render_template('homepage.html', facolta=app.facolta, corsiStudi=corsiStudi, aa=aA, prova="<b>prova</b>")


@app.route('/insegnamenti', methods=['POST', 'GET'])
def insegnamenti():
	'''Elenco degli insegnamenti di un corso di studi in un a.a.'''
	if request.method == 'POST':
		idCorsoStudi = request.form['idCorsoStudi']
示例#50
0
from Model import Model

import numpy as np
import matplotlib.pyplot as plt


def getBody(N):

    return np.random.uniform(size=N), np.random.uniform(size=N)


l = [2, 50, 50, 50, 2]
m = Model("cavity3", layers=l, penalty=2.0, num_steps=50000)

num_epoch = 0
batch_size = 2000
max_epoch = 3

while True:
    num_epoch = num_epoch + 1
    print("Epoch =", num_epoch)
    m.train({
        m.varAux: np.stack(getBody(batch_size), axis=1),
    },
            method="L-BFGS-B")
    c = m.convergence[-1]

    if c[0] < 0.005:
        print("Converged!")
        break
    elif num_epoch > max_epoch:
for (i, label) in enumerate(mlb.classes_):
    print("{}. {}".format(i + 1, label))

# Partition the data into training and testing splits using 80% of the data for training and the remaining 20%
# for testing, as a rule of thumb
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.3,
                                                  random_state=42)

# Initialize the model using a sigmoid activation as the final layer in the network so we can perform multi-label
# classification
print("[INFO] compiling model for multi-label classification...")
model = Model.build(width=IMAGE_DIMS[1],
                    height=IMAGE_DIMS[0],
                    depth=IMAGE_DIMS[2],
                    classes=len(mlb.classes_),
                    finalAct="sigmoid")

# Show the properties of the model
model.summary()

# Initialize the Adam optimizer
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)

# Compile the model using binary cross-entropy rather than categorical cross-entropy, even if we are doing
# multi-label classification. It's better to use binary crossentropy because that the goal here is to treat each
# output label as an independent Bernoulli distribution
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# Construct the image generator for data augmentation
示例#52
0
 def __init__(self, options={}):
     Model.__init__(self, options)
示例#53
0
        Paramètres : 
            - myCtrl (Controller) : instance de la classe Controller
            - nomRouteur (String) : nom du routeur
            - destination (String) : destination de la route que l'on va créer
            - masque (String) : masque de la route que l'on va créer
            - via (String) : passerelle par défaut de la route que l'on va créer
            
        But : Fonction qui appelle la fonction sshDelRoute, ferme la fenetre courante et lance la fenetre
              d'accueil des interfaces.
        '''

        global nomFichier

        myCtrl.sshDelRoute(nomFichier, nomRouteur, destination, masque, via)

        self.close()
        self.accueilInterface(myCtrl, nomRouteur)


# Permet d'éxecuter le script ci-dessous en premier
if __name__ == '__main__':
    app = QApplication(sys.argv)
    global compteur
    compteur = 1
    global nomFichier
    nomFichier = 'database.csv'
    creationFichier = open(nomFichier, 'w')
    model = Model()
    ctrl = Controller(model)
    view = View(ctrl)
    sys.exit(app.exec_())
示例#54
0
 def setUp(self):
     self.model = Model(0)
     self.model.parameters['p1'] = Parameter(2.)
     self.model.species['s1'] = Species(3.)
     self.model.species['s2'] = Species(5.)
     self.model.initialize()
示例#55
0
文件: PP_sim.py 项目: a127a127/PP_sim
def main():
    start_time = time.time()
    model      = sys.argv[1]
    mapping    = sys.argv[2]
    scheduling = sys.argv[3]
    partition_h = int(sys.argv[4])
    partition_w = int(sys.argv[5])
    mapping_str = mapping+sys.argv[4]+"_"+sys.argv[5]
    buffer_size_str = sys.argv[6]
    buffer_size = int(sys.argv[6])
    
    model_config = ModelConfig(model)
    model_info = Model(model_config)
    hw_config = HardwareConfig(buffer_size)
    hw_config.eDRAM_buffer_rd_wr_data_per_cycle = int(hw_config.eDRAM_buffer_bandwidth * 8 // model_info.input_bit * hw_config.cycle_time)
    hw_config.eDRAM_buffer_read_to_IR_cycles = math.ceil(hw_config.Xbar_h * hw_config.Xbar_num / hw_config.eDRAM_buffer_rd_wr_data_per_cycle)

    LoadOrder = True
    filename = './order_file/'+model_config.Model_type+'_'+mapping_str+'_'+scheduling+'_'+buffer_size_str+'.pkl'
    try:
        with open(filename, 'rb') as input:
            order_generator = pickle.load(input)
    except FileNotFoundError:
        print("Order file not found.")
        LoadOrder = False

    ### output path ###
    path = './statistics/'+model_config.Model_type+'/'+mapping_str+'/'+scheduling+'/'+buffer_size_str
    if not os.path.exists(path):
        os.makedirs(path)

    ### Mapping ##
    if not LoadOrder:
        cant_use_pe = (13, 12, 1, 1) # 讓不同的實驗設定下,使用相同數量的PE
        # Used PE: Lenet:6, Cifar10: 5, DeepID: 6, Caffenet: 321, Overfeat: 568, VGG16: 708
        if model == "Lenet":
            cant_use_pe = (0, 1, 1, 0)
        elif model == "Cifar10":
            cant_use_pe = (0, 1, 0, 1)
        elif model == "DeepID":
            cant_use_pe = (0, 1, 1, 0)
        elif model == "Caffenet":
            cant_use_pe = (6, 2, 0, 1)
        elif model == "Overfeat":
            cant_use_pe = (10, 12, 0, 0)
        elif model == "VGG16":
            cant_use_pe = (13, 4, 0, 0)

        cant_use_pe = (10000, 0, 1, 1, 3, 2)
        
        start_mapping_time = time.time()
        print("--- Mapping ---")
        print("Mapping policy:  ", end="")
        if mapping == "LIDR":
            print("Low input data reuse mapping")
            mapping_information = LIDR(model_info, hw_config, partition_h, partition_w, cant_use_pe)
        elif mapping == "HIDR":
            print("High input data reuse mapping")
            mapping_information = HIDR(model_info, hw_config, partition_h, partition_w, cant_use_pe)

        end_mapping_time = time.time()
        print("--- Mapping is finished in %s seconds ---\n" % (end_mapping_time - start_mapping_time))

    ### Print layer info ###
    if True:
        for nlayer in range(model_info.layer_length):
            if model_info.layer_list[nlayer].layer_type == "convolution" or model_info.layer_list[nlayer].layer_type == "fully":
                strides = model_info.strides[nlayer]
                pad = model_info.pad[nlayer]
                o_height = model_info.input_h[nlayer+1]
                o_width = model_info.input_w[nlayer+1]

                print(f'  - {nlayer} {model_info.layer_list[nlayer].layer_type}: [{model_info.input_c[nlayer]}, {model_info.input_h[nlayer]}, {model_info.input_w[nlayer]}] x [{model_info.filter_n[nlayer]}, {model_info.filter_c[nlayer]}, {model_info.filter_h[nlayer]}, {model_info.filter_w[nlayer]}] s: {strides}, p: {pad} -> [{model_info.input_c[nlayer+1]}, {o_height}, {o_width}]')
            else:
                o_height = model_info.input_h[nlayer+1]
                o_width = model_info.input_w[nlayer+1]
                print(f'  - {nlayer} {model_info.layer_list[nlayer].layer_type}: [{model_info.input_c[nlayer]}, {model_info.input_h[nlayer]}, {model_info.input_w[nlayer]}] x [{model_info.pooling_h[nlayer]}, {model_info.pooling_w[nlayer]}] s: {strides}, p: {pad} -> [{model_info.input_c[nlayer+1]}, {o_height}, {o_width}]')


    ### Buffer Replacement ###
    # print("Buffer replacement policy: ", end="")
    # replacement = "LRU"
    # if replacement == "Ideal":
    #     print("Ideal")
    # elif replacement == "LRU":
    #     print("LRU")

    ### Trace ###
    isTrace_order      = False
    isTrace_controller = False

    ### Generate computation order graph ### 
    if not LoadOrder:
        start_order_time = time.time()
        print("--- Generate computation order ---")
        order_generator = OrderGenerator(model_info, hw_config, mapping_information, isTrace_order)
        end_order_time = time.time()
        print("--- Computation order graph is generated in %s seconds ---\n" % (end_order_time - start_order_time))
        
        # Save Order
        if not os.path.exists('./order_file/'):
            os.makedirs('./order_file/')
        with open(filename, 'wb') as output:
            pickle.dump(order_generator, output, pickle.HIGHEST_PROTOCOL)
    
    else:
        with open(filename, 'rb') as input:
            order_generator = pickle.load(input)

    ### Dump JSON ###
    if False:
        json_name = f"{model}-{mapping}-{scheduling}-{partition_h}-{partition_w}-{buffer_size}"
        print(f"Dumping JSON to {json_name}.json...")
        with open(f"{json_name}.json", "w") as outfile:
            opts = jsbeautifier.default_options()
            opts.indent_with_tabs = True
            opts.indent_level = 1
            model_config_json = jsons.dumps(model_config)
            hw_config_json = jsons.dumps(hw_config)

            #json = jsons.dumps({
            #    "order_generator.Computation_order": order_generator.Computation_order,
            #})

            model_config_json = jsbeautifier.beautify(model_config_json, opts)
            hw_config_json = jsbeautifier.beautify(hw_config_json, opts)
            outfile.write(f'{{\n\t"model_config": {model_config_json},\n\t"hw_config": {hw_config_json},\n\t"order_generator.Computation_order": [\n')
            for index, event in enumerate(tqdm(order_generator.Computation_order)):
                outfile.write(f'\t\t// {index}:\n')
                outfile.write(f'\t\t{jsons.dumps(event)},\n')
            outfile.write(f'\t]\n}}\n')
        print(f"Done")

    #Visualizer.weightMappingByCO(hw_config, model_config, order_generator.Computation_order, f"{model}")
    #return
    #Visualizer.visualizeGif(hw_config, model_config, order_generator.Computation_order, f"{model}")
    #return
    
    log = {}

    ## Power and performance simulation ###
    start_simulation_time = time.time()
    print("--- Power and performance simulation---")
    controller = Controller(model_config, hw_config, order_generator, isTrace_controller, mapping_str, scheduling, path, log)
    end_simulation_time = time.time()
    print("--- Simulate in %s seconds ---\n" % (end_simulation_time - start_simulation_time))
    end_time = time.time()
    print("--- Run in %s seconds ---\n" % (end_time - start_time))

    Visualizer.visualizeSimulation2(hw_config, model_config, order_generator.Computation_order, log, f"{model}")
示例#56
0
def train(train_gen, valid_gen, df_train, df_val, batch_size, target_size):
    epochs = 150
    Drawer.draw_data_samples(df_train)
    unet_model = Model.create_model(input_size=target_size + (3,))
    Train.run_train(unet_model, train_gen=train_gen, valid_gen=valid_gen, batch_size=batch_size,
                    df_train=df_train, df_val=df_val, epochs=epochs)
示例#57
0
from Model import Model
from View import View
from Controller import Controller

data = [{
    "Nimetus": "Leib",
    "Hind": 0.80,
    "Kogus": 20
}, {
    "Nimetus": "Piim",
    "Hind": 0.50,
    "Kogus": 15
}, {
    "Nimetus": "Vein",
    "Hind": 5.60,
    "Kogus": 5
}]

Maxima = Controller(Model(data), View())

Maxima.kuva_elemendid()

#Maxima.lisa_element("Lada", 399, 9)

#Maxima.kuva_element("Lada")

#Maxima.uuenda_elementi("Piim", 0.87, 20)

Maxima.kustuta_kõik()

Maxima.kuva_elemendid()
示例#58
0
class Simulator():
    def __init__(self, wereld_object):
        pygame.init()
        self.Wereld = wereld_object
        self.Model = Model(wereld_object)
        self.BREEDTE = 800
        self.HOOGTE = 800
        #self.screen = pygame.display.set_mode((self.BREEDTE, self.HOOGTE))
        self.clock = pygame.time.Clock()
        self.achtergrond = [0, 0, 0]
        self.tijdstap = 0
        self.screen = pygame.display.set_mode((self.BREEDTE, self.HOOGTE))
        self.screenTitle = pygame.display.set_caption("Vincent Simulator")
        self.lijst_Prey_agents_in_wereld = []
        self.lijst_Hunter_agents_in_wereld = []

    def display_all_agents_to_screen(self):
        for eenagent in self.Wereld.lijstVanAlleAgentsInDeWorld:
            if isinstance(eenagent, Hunter):
                pygame.draw.circle(self.screen, eenagent.kleur_hunter,
                                   (int(eenagent.x_pos), int(eenagent.y_pos)),
                                   eenagent.size)
                self.aantal_hunters += 1
            elif isinstance(eenagent, Prey):
                pygame.draw.circle(self.screen, eenagent.kleur_prooi,
                                   (int(eenagent.x_pos), int(eenagent.y_pos)),
                                   eenagent.size)
                self.aantal_prooien += 1
            eenagent.step()

    def preys_RL_step(self):
        for een_agent_object in self.Wereld.lijstVanAlleAgentsInDeWorld:
            if isinstance(een_agent_object, Prey):
                een_agent_object.step_RL()

    def predators_RL_step(self):
        for een_agent_object in self.Wereld.lijstVanAlleAgentsInDeWorld:
            if isinstance(een_agent_object, Hunter):
                een_agent_object.step_RL()

    def preys_step(self):
        for eenagent in self.Wereld.lijstVanAlleAgentsInDeWorld:
            if isinstance(eenagent, Prey):
                eenagent.step()

    def predator_step(self):
        for eenagent in self.Wereld.lijstVanAlleAgentsInDeWorld:
            if isinstance(eenagent, Hunter):
                eenagent.step()

    def runSimulationWorld(self):

        running = True
        plot = True

        while running:
            if self.Wereld.getaantal_prooien(
            ) > 0 and self.Wereld.getaantal_hunters() > 0:

                #self.Wereld.reset()
                self.aantal_hunters = 0
                self.aantal_prooien = 0
                self.Model.showAmountOfAgentsInWorld(self.tijdstap)
                self.display_all_agents_to_screen()

                if len(self.Wereld.lijstVanAlleAgentsInDeWorld) == 0 or (
                        self.Model.totaal_aantal_hunters_in_World() == 0
                        and self.Model.totaal_aantal_preys_in_World() >= 0
                ) and plot:
                    print("je bent hier geweest voor de plot")
                    plot = False
                    self.Model.showPlot()
                    running = False
            else:
                # hier kom ik nog niet doordat de getaantal prooien nog niet op punt staat wordt nog niet ge-update
                if len(self.Wereld.lijstVanAlleAgentsInDeWorld
                       ) == 0 and not plot:
                    running = False

            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    running = False

            self.tijdstap += 1
            pygame.display.update()
            self.screen.fill(self.achtergrond)
            time.sleep(1)

        print("The simulation is over")

    def voer_step_agent_uit(self, Agent):
        Agent.step()
print("There are {} validation tuples".format(len(val_tuples)))

#---- Preprocess tuples to be usable in Model ----#
train_set = DatasetFromTuples(train_tuples,
                              args.patch_size,
                              aug=True,
                              crop=True)
val_set = DatasetFromTuples(val_tuples, args.patch_size, aug=False, crop=True)

train_data_loader = DataLoader(dataset=train_set,
                               batch_size=args.batch_size,
                               shuffle=True)
val_data_loader = DataLoader(dataset=val_set, batch_size=1, shuffle=False)

print("\n\n--- Building model...")
model = Model()
if torch.cuda.device_count() > 1:
    print("There are %d GPUs" % (torch.cuda.device_count()))
    #model = nn.DataParallel (model)
model = model.to(device)

l1_loss = nn.L1Loss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)


def compute_psnr(sr_im_orig, hr_im_orig):
    # Note: function only apply for single image (not batch)
    if len(sr_im_orig.shape) == 4 and len(hr_im_orig.shape) == 4:
        sr_im = sr_im_orig.squeeze(0)
        hr_im = hr_im_orig.squeeze(0)
    else:
示例#60
0
import editdistance
from DataLoader import DataLoader, Batch
from Model import Model
from SamplePreprocessor import preprocess


class FilePaths:
    "filenames and paths to data"
    fnCharList = '../model/charList.txt'
    fnAccuracy = '../model/accuracy.txt'
    fnTrain = '../data/'
    fnInfer = '../data/test.png'
    fnCorpus = '../data/corpus.txt'


model = Model(open(FilePaths.fnCharList).read(), mustRestore=True)


def infer(model, fnImg):
    "recognize text in image provided by file path"
    #image = cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE)
    # cv2.cvtColor(fnImg, cv2.COLOR_BGR2GRAY)
    # image = cv2.resize(image,(500,500))
    img = preprocess(fnImg, Model.imgSize)
    batch = Batch(None, [img] * Model.batchSize)
    recognized = model.inferBatch(batch)
    print('Recognized:', '"' + recognized[0] + '"')
    return recognized[0]


def line_sep(image):