Пример #1
0
    def execute_cmd(key, value):
        """
        Executes a handed command on the linuino side.
        """
        if key == "available_logs":
            logs = Logger.get_log_list()

            result = "All available logs with their ids:\n\n" + logs
            return result
        elif key == "read_log":
            log = Logger.get_log(value)

            result = "\n" + log
            return result
        elif key == "delete_log":
            Logger.remove_log(value)

            result = "Logfile was removed successfully."
        elif key == "help":
            result = "All available commands:\n\n"

            for cmd in valid_cmds:
                help_line = "{command} - {description}\n".format(
                    command=cmd,
                    description=valid_cmds[cmd]["desc"]
                )
                result += help_line

            return result
Пример #2
0
	def testLogger_init(self):
		try:
			logger = Logger('sqlite:///testDB.db')
		except:
			self.fail("Unexpected Exception!")
		self.assertIsInstance(logger, Logger)
		logger.stop()
Пример #3
0
    def chk_job_result(self, status_file_path, job):
        """
    Read status file attributes and parse them.

    Args:

    Return:
      True/False
      status_attr: dictionary of status file attrs
    """
        try:
            Logger.info("ErcJobProcessor.chk_job_result", "Check log result")
            is_valid_file, status_attr = ErcFileHandler().init_chmm_file(status_file_path, "STATUS")
            if not is_valid_file:
                Logger.error("ErcJobProcessor.chk_job_result", "Invalid CHMM status file.")
                return False, {}
            if not int(status_attr["ErrorCount"]) == 0:
                error_msg = ""
                trap_list = []
                for key, value in status_attr.items():
                    if key.startswith("Error-"):
                        error_msg = error_msg + str(key) + ":" + str(value) + " "
                        trap_list.append(value)
                Logger.error("ErcJobProcessor.chk_job_result", "Error code: " + error_msg)
                ErcTrapGenerator.generate_crm_trap(trap_list, self.config_attr["trap_drop_dir"])
                return False, {}
            else:
                Logger.info(
                    "ErcJobProcessor.chk_job_result", "Generated Log: " + status_attr["Job1 Created Download File"]
                )
                return True, status_attr
        except Exception as e:
            Logger.error("ErcJobProcessor.chk_job_result", str(e))
            return False, {}
Пример #4
0
class DeviceDetailsDBHelper(DBHelper):

    def __init__(self):
        DBHelper.__init__(self)
        self.log = Logger('DeviceDetailsDBHelper')

    def add_device_detail(self,detail):
        TAG = 'add_device_detail'

        if type(detail) == dict:
            if not detail.has_key(C.DEVICE_DETAILS_TABLE_EXTRAS):
                detail[C.DEVICE_DETAILS_TABLE_EXTRAS]= "{}"

            try:
                query = "INSERT INTO " + C.DEVICE_DETAILS_TABLE + "(" + C.DEVICE_DETAILS_TABLE_DEVICE + "," + C.DEVICE_DETAILS_TABLE_EXTRAS  +\
                                      ") VALUES( "+str(detail[C.DEVICE_DETAILS_TABLE_DEVICE])+", '"+json.dumps(detail[C.DEVICE_DETAILS_TABLE_EXTRAS])+"') RETURNING "+C.DEVICE_DETAILS_TABLE_DEVICE
                self.cursor.execute(query)
            except Exception,err:
                self.log.e(TAG,'Exception : ' + repr(err))
                return None
            if self.cursor.rowcount > 0:
                row = self.cursor.fetchone()
                return row[0]

        else:
Пример #5
0
def fix_recent_time_marker(control_attr, control_path):
    """
  When the log request start time was long ago.
  Fix it to be "current time" - "max log duration"

  Args:
    control_attr: dictionary which stores attributes in contorl file
    control_attr: path for the control file
  Return:
    True/False
  """
    try:
        new_marker = datetime.datetime.now() - datetime.timedelta(minutes=int(control_attr["MaxDuration"]))
        new_marker = new_marker.replace(second=0, microsecond=0)
        new_marker = new_marker.strftime("%m/%d/%Y %H:%M:%S")
        readLineBuf = ""

        with open(control_path, "r") as f:
            for line in f.readlines():
                if line.startswith("MostRecentTime: "):
                    readLineBuf += "MostRecentTime: " + str(new_marker) + "\n"
                else:
                    readLineBuf += line

        with open(control_path, "w") as f2:
            f2.write(readLineBuf)
    except Exception as e:
        Logger.error("ErcJobProcessor.fix_recent_time_marker", str(e))
        raise
Пример #6
0
def exportJunctionsDictionary(junctionsDict):
    """
    Writes the junctions dictionary in an output file
    """
    Logger.info("{}Exporting junctions dictionary...".format(constants.PRINT_PREFIX_GRAPH))
    junctionsFile = open(constants.SUMO_JUNCTIONS_DICTIONARY_FILE, 'w')
    
    for pair in junctionsDict.items():
        junctionsFile.write(pair[0])
        junctionsFile.write(constants.END_OF_LINE)
        
        size = len(pair[1][0])
        i = 1
        for value in pair[1][0]:
            junctionsFile.write(value)
            if i != size: 
                junctionsFile.write(constants.SEPARATOR)
                i += 1
        junctionsFile.write(constants.END_OF_LINE)
        
        size = len(pair[1][1])
        i = 1
        for value in pair[1][1]:
            junctionsFile.write(value)
            if i != size: 
                junctionsFile.write(constants.SEPARATOR)
                i += 1
        junctionsFile.write(constants.END_OF_LINE)
        
    junctionsFile.close()
    Logger.info("{}Done".format(constants.PRINT_PREFIX_GRAPH))
Пример #7
0
	def test_assertPathOk(self) :
		s = Logger("badpath")
		try :
			s._assertPathOk()
			self.fail("Exception expected")
		except BadServerPath, e:
			self.assertEqual(e.message, "badpath")
Пример #8
0
def importJunctionsDictionary():
    """
    Reads the junctions dictionary from an input file
    """
    Logger.info("{}Importing junctions dictionary...".format(constants.PRINT_PREFIX_GRAPH))
    junctionsFile = open(constants.SUMO_JUNCTIONS_DICTIONARY_FILE, 'r')
    junctionsDict = dict()
    
    key = junctionsFile.readline()[0:-1]
    while key:
        value = [set(), set()]
        
        strvalues = junctionsFile.readline()[0:-1]
        lvalues = strvalues.split(constants.SEPARATOR)
        for v in lvalues:
            value[0].add(v)
            
        strvalues = junctionsFile.readline()[0:-1]
        lvalues = strvalues.split(constants.SEPARATOR)
        for v in lvalues:
            value[1].add(v)
            
        junctionsDict[key] = value
        key = junctionsFile.readline()[0:-1]
    
    junctionsFile.close()
    Logger.info("{}Done".format(constants.PRINT_PREFIX_GRAPH))
    return junctionsDict
Пример #9
0
def importGraph():
    """
    Reads the network graph from an input graphFile
    """
    Logger.info("{}Importing graph...".format(constants.PRINT_PREFIX_GRAPH))
    graphFile = open(constants.SUMO_GRAPH_FILE, 'r')
    graphDict = dict()
    
    line = graphFile.readline()[0:-1]
    while line:
        lineArray = line.split(constants.SEPARATOR)
        junctionNode = lineArray[0]
        graphDict[junctionNode] = dict()
        
        i = 1
        while i < len(lineArray):
            junctionSuccessor = lineArray[i]
            edgeLength = lineArray[i + 1]
            graphDict[junctionNode][junctionSuccessor] = float(edgeLength)
            i += 2

        line = graphFile.readline()[0:-1]
    
    graphFile.close()
    Logger.info("{}Done".format(constants.PRINT_PREFIX_GRAPH))
    return graphDict
Пример #10
0
class LogsGetHandlerThreadWithPage(threading.Thread):
    final_dict = {}
    log = 'log'
    def __init__(self, request = None, data = None,callback =None, *args, **kwargs):
        super(LogsGetHandlerThreadWithPage, self).__init__(*args, **kwargs)
        self.request = request
        self.data = data
        self.callback =callback


    def run(self):
        #Return All the users in the User table
        self.log = Logger('LogsGetHandlerThreadWithPage')
        TAG = 'run'
        print self.data
        page = self.request.get_argument('page', None)
        count = self.request.get_argument('count', None)

        logs = LogsDBHelper()
        logs_list = logs.get_logs('1', ['error', 'warning', 'info'], count, page)

        if logs_list is None or len(logs_list) == 0:
            self.log.i(TAG,'No more logs to show')

        self.final_dict['pass'] = True
        self.final_dict['logs'] = logs_list
        opJson = json.dumps(self.final_dict)
        #self.request.add_header('Access-Control-Allow-Origin', '*')
        self.request.set_header ('Content-Type', 'application/json')
        self.request.write(opJson)
        tornado.ioloop.IOLoop.instance().add_callback(self.callback)
Пример #11
0
def slicer(list_to_slice,slices):
    """
    *Generator* Slice a list in  individual slices

    Args:
        list_to_slice (list): a list of items
        slices (int): how many lists will be sliced of
    
    Returns:
        list: slices of the given list
    """

    #FIXME: there must be something in the std lib..
    # implementing this because I am flying anyway
    # right now and have nothing to do..

    if slices == 0:
        slices = 1 # prevent ZeroDivisionError
    maxslice = len(list_to_slice)/slices
    if (maxslice*slices) < len(list_to_slice) :
        maxslice += 1
    Logger.info("Sliced list in {} slices with a maximum slice index of {}" .format(slices,maxslice))
    for index in range(0,slices):
        lower_bound = index*maxslice
        upper_bound = lower_bound + maxslice
        thisslice = list_to_slice[lower_bound:upper_bound]
        #if not thisslice: #Do not emit empty lists!
        yield thisslice
Пример #12
0
def backup_res_file(fpath):
    if os.path.exists(fpath):
        backup_path = fpath + '.bak'
        Logger.debug('backup: {}'.format(fpath))
        if os.path.exists(backup_path):
            os.remove(backup_path)
        os.rename(fpath, backup_path)
Пример #13
0
    def delete(lease, task_result):
        """Remove a task from the queue
        
        Args:
            lease: 
                a lease that was acquired form a prior call to taskqueue.lease
                a task_result that indicates whether the task was completed successfully 
                
                If the task execution failed and you want it to be re-tried automatically, then do NOT call delete,  
                just come back later after the lease times out and the task will be available for lease again 
                (unless the retry limit is reached).
            
        Returns:
            True if the task was deleted, false if the task was not found.
        """

        q = taskqueue.Queue('tasks')
        task=taskqueue.Task(name=lease['id'])
        q.delete_tasks(task)
        if task.was_deleted:
                
            #TODO: It looks like the task that comes back from delete_tasks_by_name() does not include the payload
            #
            # Ideally we could do this:
            #     lease = json.loads(deleted_task[0].payload)
            # but payload always come back as None
            #
            # So the workaround, though clumsy, is to require that the entire lease response to be sent back 
            # with the delete request

                
            Logger.log (op='delete', channel=lease['channel'], name=lease['name'], status='OK', task_result=task_result)
            return True
        else:
            return False
Пример #14
0
Файл: db.py Проект: mflorin/ir
    def init(self):

        # config options
        self.config = {}

        self.loadConfig()
        
        self.running = False

        # initialize the event object used for sleeping
        self.event = threading.Event()

        if self.config['persistence'] and os.path.exists(self.config['file_name']):
            try:
                Logger.info('loading database from %s' % self.config['file_name'])
                self.load()
            except Exception as e:
                Logger.critical(str(e))

        self.setup()

        # register our 'save' commands
        Command.register(self.saveCmd, 'db.save', 0, 'db.save')
        
        # treat events
        Event.register('core.reload', self.reloadEvent)
        Event.register('core.shutdown', self.shutdownEvent)
Пример #15
0
class ViolationsDBHelper(DBHelper):

    def __init__(self):
        DBHelper.__init__(self)
        self.log = Logger('ViolationsDBHelper')

    def add_violation(self, device):
        TAG = 'add_violation'
        if isinstance(device, str):
            try:
                self.cursor.execute("""INSERT INTO {0} ({1}, {2})
                                    VALUES (%s, %s) RETURNING id;""".format(
                                    C.VIOLATION_TABLE, C.VIOLATION_TABLE_DEVICE,
                                    C.VIOLATION_TABLE_TIMESTAMP), (str(device),
                                        datetime.datetime.now(),))
            except Exception, err:
                self.log.e(TAG, 'Exception : ' + repr(err))
                return None

            if self.cursor.rowcount > 0:
                row = self.cursor.fetchone()
                return row[0]
            else:
                self.log.e(TAG, 'Not able to insert in Violation table')
                return None
        else:
Пример #16
0
class TestLogger(unittest.TestCase):

    def setUp(self):
        self.logger = Logger()

    def tearDown(self):
        subprocess.call(["rm", "-f", "python_log"])

    def it_has_a_log_file(self):
        self.logger.file |should| equal_to('python_log')

    def it_allows_change_the_log_file(self):
        self.logger.file = 'testing_logger'
        self.logger.file |should| equal_to('testing_logger')

    def it_has_a_writing_method(self):
        self.logger.method |should| equal_to('a')

    def it_allows_change_the_writing_method(self):
        self.logger.method = 'w'
        self.logger.method |should| equal_to('w')

    def it_writes_stdout_to_the_log_file(self):
        self.logger.start_log()
        print "a message to stdout"
        self.logger.stop_log()
        log_file_content = open("python_log").read()
        log_file_content |should| include("a message to stdout")
Пример #17
0
def main():
    '''Initialise the pipeline, then run it'''
    # Parse command line arguments
    options = parse_command_line()
    # Initialise the logger
    logger = Logger(__name__, options.log_file, options.verbose)
    # Log the command line used to run the pipeline
    logger.info(' '.join(sys.argv))
    drmaa_session = None
    try:
        # Set up the DRMAA session for running cluster jobs
        import drmaa
        drmaa_session = drmaa.Session()
        drmaa_session.initialize()
    except Exception as e:
        print("{progname} error using DRMAA library".format(progname=program_name), file=sys.stdout)
        print("Error message: {msg}".format(msg=e.message, file=sys.stdout))
        exit(error_codes.DRMAA_ERROR)
    # Parse the configuration file, and initialise global state
    config = Config(options.config)
    config.validate()
    state = State(options=options, config=config, logger=logger,
                  drmaa_session=drmaa_session)
    # Build the pipeline workflow
    pipeline = make_pipeline(state)
    # Run (or print) the pipeline
    cmdline.run(options)
    if drmaa_session is not None:
        # Shut down the DRMAA session
        drmaa_session.exit()
Пример #18
0
	def _update_mark(self, marked):
		"""
		Handles the AJAX calls from the app mark actions
		"""
		post_args = parse_qs(self._env["wsgi.input"].read())
		if "id" not in post_args:
			Logger.warning("not in post args: %s" % str(post_args))
			return self.construct_response(json.dumps({
				"success": False,
				"error": "missing args",
				"id": None
			}), self._route_types.JSON_CONTENT_TYPE)

		post_ids = post_args["id"]
		_, id = post_ids[0].split("_")
		p = Photo.get_by_id(id)
		if p == None:
			Logger.warning("no photo retrieved")
			return self.construct_response(json.dumps({
				"success": False,
				"error": "invalid_id",
				"id": id
			}), self._route_types.JSON_CONTENT_TYPE)

		p.marked = marked
		p.store()
		a = self.construct_response(json.dumps({
				"success": True,
				"details": {
					"marked": p.marked,
					"id": id
				}
			}), self._route_types.JSON_CONTENT_TYPE)
		return a
Пример #19
0
    def call_command(self, args):
        self._config = read_freeline_config()
        self._args = args
        self.debug('command line args: ' + str(args))
        Logger.info('[INFO] preparing for tasks...')

        if 'debug' in args and args.debug:
            self._logger.debuggable = True
        if is_windows_system():
            self._logger.debuggable = True

        self._check_logger_worker()

        if 'cleanBuild' in args and args.cleanBuild:
            is_build_all_projects = args.all
            self._setup_clean_build_command(is_build_all_projects)
        elif 'version' in args and args.version:
            version()
        elif 'clean' in args and args.clean:
            self._command = CleanAllCacheCommand(self._config['build_cache_dir'])
        else:
            from freeline_build import FreelineBuildCommand
            self._command = FreelineBuildCommand(self._config, task_engine=self._task_engine)

        if not isinstance(self._command, AbstractCommand):
            raise TypeError

        self._exec_command(self._command)
Пример #20
0
	def get_dirs_from_date(self):
		"""
		Renders a list of all the year "folders" in the system.

		As we're not rendering any photos, we can assume this to be a separate
		function from the photo fetching and rendering; this is just reporting
		certain dates.
		"""
		path = self._env.get('PATH_INFO', '').lstrip('/')
		path = os.path.relpath(path, "photos")
		Logger.debug(path)
		path_parts = path.split(os.sep)
		if len(path_parts) == 1 and path_parts[0] == ".":
			path_parts = []

		year = None if len(path_parts) < 1 else path_parts[0]
		month = None if len(path_parts) < 2 else path_parts[1]
		list = Photo.get_all_dates(year=year, month=month)

		list = [("0%d" % f if f < 10 else str(f)) for f in list]
		list.sort()
		tokens = {
			"dirs": list,
			"year": year,
			"month": month
		}
		return self.construct_response(Template.render("photos/dirs.html", tokens))
Пример #21
0
	def _get_image(self, size, action):
		"""
		Fetches the large image for lightboxing for the given photo id. Returns
		the image raw data.
		"""
		id = self._get_id_from_path(action)
		try:
			id = int(id)
			p = Photo.get_by_id(id)
		except Exception as e:
			p = None

		if p == None:
			fc = util.FileContainer(os.path.join(S.IMPORT_DIR, id), S.IMPORT_DIR)
			fc.time = util.get_time(fc)["time"]
			p = Photo.from_file_container(fc)

		if p == None:
			Logger.warning("could not find photo for %s" % id)
			image_path = S.BROKEN_IMG_PATH
		else:
			rel_thumb_path = p.get_or_create_thumb(size)
			image_path = os.path.join(S.THUMBNAIL_DIR, rel_thumb_path)

		f = open(image_path)
		raw_image = f.read()
		f.close()
		return self.construct_response(raw_image, self._route_types.JPEG_CONTENT_TYPE)
Пример #22
0
def sendVehiclesSpeed(vehiclesId, outputSocket, mtraci, mVehicles):
    """
    Sends the speed of the given vehicles to the distant client
    """
    speedMsg = []
    speedMsg.append(constants.VEHICLE_SPEED_RESPONSE_HEADER)
    
    mVehicles.acquire()
    for vehicleId in vehiclesId:
        try:
            mtraci.acquire()
            speed = traci.vehicle.getSpeed(vehicleId)
            mtraci.release()
        
            speedMsg.append(constants.SEPARATOR)
            speedMsg.append(vehicleId)
            speedMsg.append(constants.SEPARATOR)
            speedMsg.append(str(speed))
        except:
            mtraci.release()
    mVehicles.release()
        
    speedMsg.append(constants.END_OF_MESSAGE)
        
    strmsg = ''.join(speedMsg)
    try:
        outputSocket.send(strmsg.encode())
    except:
        raise constants.ClosedSocketException("The listening socket has been closed")
    Logger.infoFile("{} Message sent: {}".format(constants.PRINT_PREFIX_VEHICLE, strmsg))
def main(argv):
    """
    Main rosnode
    """
    rospy.init_node('rtk_recorder', anonymous=True)

    argv = FLAGS(argv)
    log_dir = os.path.dirname(os.path.abspath(__file__)) + "/../../../data/log/"
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    Logger.config(
        log_file=log_dir + "rtk_recorder.log",
        use_stdout=True,
        log_level=logging.DEBUG)
    print("runtime log is in %s%s" % (log_dir, "rtk_recorder.log"))
    record_file = log_dir + "/garage.csv"
    recorder = RtkRecord(record_file)
    atexit.register(recorder.shutdown)
    rospy.Subscriber('/apollo/canbus/chassis', chassis_pb2.Chassis,
                     recorder.chassis_callback)

    rospy.Subscriber('/apollo/localization/pose',
                     localization_pb2.LocalizationEstimate,
                     recorder.localization_callback)

    rospy.spin()
Пример #24
0
    def getUserLocation(self, user_uniqid):
        self.loadUserDb()

        if self._user_db is None:
            return None

        try:
            cursor = self._user_db.cursor()
        except:
            Logger.warning(self, "Unable to create user DB cursor in getUserLocation")
            self._user_db.close()
            return None

        ret = None
        try:
            cursor.execute("SELECT location FROM locations where user_uniqid=?", [user_uniqid])
            data = cursor.fetchall()
            if len(data) > 0:
                ret = data[0][0]
        except:
            pass

        try:
            cursor.close()
        except:
            pass

        self._user_db.close()

        return ret
Пример #25
0
    def storeUserLocation(self, user_uniqid, location):
        self.loadUserDb()

        if self._user_db is None:
            return

        try:
            cursor = self._user_db.cursor()
        except:
            Logger.warning(self, "Unable to create user DB cursor in storeUserLocation")
            return

        try:
            cursor.execute("delete from locations where user_uniqid = ?", [user_uniqid])
            cursor.execute("insert into locations values (?,?)", [user_uniqid, location])
            self._user_db.commit()
        except:
            pass

        try:
            cursor.close()
        except:
            pass

        self._user_db.close()
Пример #26
0
    def startModule(name):
        m = Module.modules[name]
        if m['started'] == False:
            try:
                # we're flagging it here to avoid infinite
                # dependency loops; we have to make sure we
                # set it to false on the except: branch
                m['started'] = True

                # solve dependencies
                if m['deps']:
                    for d in m['deps']:
                        if d in Module.modules:
                            if Module.modules[d]['started'] == False:
                                Module.startModule(d)
                        else:
                            Logger.error('`%s` is needed by `%s`' % (d, name))
                            raise MotherBeeException('dependency `%s\' not found' % d)
                
                # start the module
                Logger.info('starting the `%s\' module' % name)
                m['object'].init()

            except MotherBeeException as e:
                m['started'] = False
                Logger.error('error while starting module `%s\'' % name)

            except Exception as e:
                m['started'] = False
                Logger.error('error while starting module `%s\'' % name)
                Logger.exception()
Пример #27
0
 def __init__(self, jobs_queue, config_attr):
   threading.Thread.__init__(self)
   self.jobs_queue = jobs_queue
   self.log_period = config_attr["log_period"]
   if not os.path.exists(config_attr["log_path"]):
    Logger.error("PeriodicJobsProducer", "Log repository not found: " + config_attr["log_path"])
    os.mkdir(config_attr["log_path"])
Пример #28
0
 def extractContent(self, html, url):
     thread_name = current_thread().name
     t = time.strftime("%y-%m-%d %H:%M:%S", time.localtime())
     Logger.write("[%s][%s][finish = %s][fail = %s] : %s" % (t, thread_name, self.complate, self.fail.qsize(), url))
     parser = etree.XMLParser(ns_clean=True, recover=True)
     tree = etree.fromstring(html, parser)
     self.content.write(url, tree, self.contents)
Пример #29
0
def play(track):
    time.sleep(1)
    print track.path
    logger = Logger("test_log.txt")
    logger.info(track.path)
    os.system("omxplayer -o local " + "\"" + track.path + "\"")
    track.plays -= 1
Пример #30
0
 def __init__(self, cachedir, mmap_mode=None, compress=False, verbose=1):
     """
         Parameters
         ----------
         cachedir: string or None
             The path of the base directory to use as a data store
             or None. If None is given, no caching is done and
             the Memory object is completely transparent.
         mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
             The memmapping mode used when loading from cache
             numpy arrays. See numpy.load for the meaning of the
             arguments.
         compress: boolean
             Whether to zip the stored data on disk. Note that
             compressed arrays cannot be read by memmapping.
         verbose: int, optional
             Verbosity flag, controls the debug messages that are issued
             as functions are revaluated.
     """
     # XXX: Bad explaination of the None value of cachedir
     Logger.__init__(self)
     self._verbose = verbose
     self.mmap_mode = mmap_mode
     self.timestamp = time.time()
     self.compress = compress
     if compress and mmap_mode is not None:
         warnings.warn('Compressed results cannot be memmapped',
                       stacklevel=2)
     if cachedir is None:
         self.cachedir = None
     else:
         self.cachedir = os.path.join(cachedir, 'joblib')
         mkdirp(self.cachedir)
Пример #31
0
def train_motion_embedding(config, generator, motion_generator, kp_detector,
                           checkpoint, log_dir, dataset, valid_dataset,
                           device_ids):

    png_dir = os.path.join(log_dir, 'train_motion_embedding/png')
    log_dir = os.path.join(log_dir, 'train_motion_embedding')

    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    if not os.path.exists(png_dir):
        os.makedirs(png_dir)

    train_params = config['train_motion_embedding_params']
    optimizer_generator = torch.optim.Adam(motion_generator.parameters(),
                                           lr=train_params['lr'],
                                           betas=(0.5, 0.999))

    if checkpoint is not None:
        Logger.load_cpk(checkpoint,
                        generator=generator,
                        kp_detector=kp_detector)
    else:
        raise AttributeError(
            "kp_detector_checkpoint should be specified for mode='test'.")

    start_epoch = 0
    it = 0

    scheduler_generator = MultiStepLR(optimizer_generator,
                                      train_params['epoch_milestones'],
                                      gamma=0.1,
                                      last_epoch=start_epoch - 1)
    dataloader = DataLoader(valid_dataset,
                            batch_size=train_params['batch_size'],
                            shuffle=True,
                            num_workers=4)
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=1,
                                  shuffle=False,
                                  num_workers=1)

    loss_list = []
    motion_generator_full = MotionGeneratorFullModel(motion_generator,
                                                     train_params)
    motion_generator_full_par = DataParallelWithCallback(motion_generator_full,
                                                         device_ids=device_ids)

    kp_detector = DataParallelWithCallback(kp_detector)
    generator.eval()
    kp_detector.eval()
    cat_dict = lambda l, dim: {
        k: torch.cat([v[k] for v in l], dim=dim)
        for k in l[0]
    }

    with Logger(log_dir=log_dir,
                visualizer_params=config['visualizer_params'],
                **train_params['log_params']) as logger:
        #valid_motion_embedding(config, valid_dataloader, motion_generator, kp_detector, log_dir)
        for epoch in range(start_epoch, train_params['num_epochs']):
            print("Epoch {}".format(epoch))
            motion_generator.train()
            for it, x in tqdm(enumerate(dataloader)):

                with torch.no_grad():

                    # import ipdb; ipdb.set_trace()

                    # x['video']: [bz, ch, #frames, H, W]
                    # detect keypoint for first frame
                    kp_appearance = kp_detector(x['video'][:, :, :1])
                    # kp_appearance['mean']: [bz, frame idx, #kp, 2]
                    # kp_appearance['var']: [bz, frame idx, #kp, 2, 2]

                    d = x['video'].shape[2]
                    # kp_video['mean']: [bz, #frame, #kp, 2]
                    # kp_video['var']: [bz, #frame, #kp, 2, 2]
                    kp_video = cat_dict([
                        kp_detector(x['video'][:, :, i:(i + 1)])
                        for i in range(d)
                    ],
                                        dim=1)


                loss = motion_generator_full_par(d, kp_video, \
                    epoch, it==len(dataloader)-1)

                loss.backward()
                optimizer_generator.step()
                optimizer_generator.zero_grad()
                generator_loss_values = [loss.detach().cpu().numpy()]

                logger.log_iter(it,
                                names=generator_loss_names(
                                    train_params['loss_weights']),
                                values=generator_loss_values,
                                inp=x)

            valid_motion_embedding(config, valid_dataloader, motion_generator,
                                   kp_detector, log_dir)

            scheduler_generator.step()
            logger.log_epoch(epoch, {
                'generator': generator,
                'optimizer_generator': optimizer_generator
            })
Пример #32
0
class EngineBase(object):
    """Base class for training engine."""
    def __init__(self, model, args, verbose=False):
        self.model = model
        self.args = args
        self.verbose = verbose
        self.opt = self.make_opt(self.args.lr)
        self.crit = Criterion(self.model.word_dict)
        self.sel_crit = nn.CrossEntropyLoss(reduction='mean')
        self.ref_crit = nn.BCEWithLogitsLoss(reduction='mean')
        if args.tensorboard_log:
            log_name = 'tensorboard_logs/{}'.format(args.model_type)
            if os.path.exists(log_name):
                print("remove old tensorboard log")
                shutil.rmtree(log_name)
            self.logger = Logger(log_name)

    def make_opt(self, lr):
        if self.args.optimizer == 'adam':
            return optim.Adam(self.model.parameters(), lr=lr)
        elif self.args.optimizer == 'rmsprop':
            return optim.RMSprop(self.model.parameters(),
                                 lr=lr,
                                 momentum=self.args.momentum)
        else:
            assert False

    def get_model(self):
        return self.model

    def train_batch(self, batch):
        pass

    def valid_batch(self, batch):
        pass

    def train_pass(self, trainset, trainset_stats):
        '''
        basic implementation of one training pass
        '''
        self.model.train()

        total_lang_loss, total_select_loss, total_num_correct, total_num_select = 0, 0, 0, 0
        start_time = time.time()

        for batch in trainset:
            lang_loss, select_loss, num_correct, num_select = self.train_batch(
                batch)
            total_lang_loss += lang_loss
            total_select_loss += select_loss
            total_num_correct += num_correct
            total_num_select += num_select

        total_lang_loss /= len(trainset)
        total_select_loss /= len(trainset)
        time_elapsed = time.time() - start_time
        return total_lang_loss, total_select_loss, total_num_correct / total_num_select, time_elapsed

    def valid_pass(self, validset, validset_stats):
        '''
        basic implementation of one validation pass
        '''
        self.model.eval()

        total_lang_loss, total_select_loss, total_num_correct, total_num_select = 0, 0, 0, 0
        for batch in validset:
            lang_loss, select_loss, num_correct, num_select = self.valid_batch(
                batch)
            total_lang_loss += lang_loss
            total_select_loss += select_loss
            total_num_correct += num_correct
            total_num_select += num_select

        total_lang_loss /= len(validset)
        total_select_loss /= len(validset)
        return total_lang_loss, total_select_loss, total_num_correct / total_num_select

    def iter(self, epoch, lr, traindata, validdata):
        trainset, trainset_stats = traindata
        validset, validset_stats = validdata

        train_lang_loss, train_select_loss, train_select_accuracy, train_time = self.train_pass(
            trainset, trainset_stats)
        valid_lang_loss, valid_select_loss, valid_select_accuracy = self.valid_pass(
            validset, validset_stats)

        if self.verbose:
            print(
                '| epoch %03d | trainlangloss %.6f | trainlangppl %.6f | s/epoch %.2f | lr %0.8f'
                % (epoch, train_lang_loss, np.exp(train_lang_loss), train_time,
                   lr))
            print(
                '| epoch %03d | trainselectloss(scaled) %.6f | trainselectaccuracy %.4f | s/epoch %.2f | lr %0.8f'
                % (epoch, train_select_loss * self.args.sel_weight,
                   train_select_accuracy, train_time, lr))
            print('| epoch %03d | validlangloss %.6f | validlangppl %.8f' %
                  (epoch, valid_lang_loss, np.exp(valid_lang_loss)))
            print(
                '| epoch %03d | validselectloss %.6f | validselectaccuracy %.4f'
                % (epoch, valid_select_loss, valid_select_accuracy))

        if self.args.tensorboard_log:
            info = {
                'Train_Lang_Loss': train_lang_loss,
                'Train_Select_Loss': train_select_loss,
                'Valid_Lang_Loss': valid_lang_loss,
                'Valid_Select_Loss': valid_select_loss,
                'Valid_Select_Accuracy': valid_select_accuracy
            }
            for tag, value in info.items():
                self.logger.scalar_summary(tag, value, epoch)

            for tag, value in self.model.named_parameters():
                if value.grad is None:
                    continue
                tag = tag.replace('.', '/')
                self.logger.histo_summary(tag, value.data.cpu().numpy(), epoch)
                self.logger.histo_summary(tag + '/grad',
                                          value.grad.data.cpu().numpy(), epoch)

        return valid_lang_loss, valid_select_loss

    def combine_loss(self, lang_loss, select_loss):
        return lang_loss + select_loss * self.args.sel_weight

    def train(self, corpus):
        best_model, best_combined_valid_loss = copy.deepcopy(self.model), 1e100
        validdata = corpus.valid_dataset(self.args.bsz)

        for epoch in range(1, self.args.max_epoch + 1):
            traindata = corpus.train_dataset(self.args.bsz)
            valid_lang_loss, valid_select_loss = self.iter(
                epoch, self.args.lr, traindata, validdata)

            combined_valid_loss = self.combine_loss(valid_lang_loss,
                                                    valid_select_loss)
            if combined_valid_loss < best_combined_valid_loss:
                print(
                    "update best model: validlangloss %.8f | validselectloss %.8f"
                    % (valid_lang_loss, valid_select_loss))
                best_combined_valid_loss = combined_valid_loss
                best_model = copy.deepcopy(self.model)
                best_model.flatten_parameters()

        return best_combined_valid_loss, best_model

    def train_scheduled(self, corpus):
        best_model, best_combined_valid_loss = copy.deepcopy(self.model), 1e100
        lr = self.args.lr
        last_decay_epoch = 0
        self.t = 0
        validdata = corpus.valid_dataset(self.args.bsz)

        for epoch in range(1, self.args.max_epoch + 1):
            traindata = corpus.train_dataset(self.args.bsz)
            valid_lang_loss, valid_select_loss = self.iter(
                epoch, lr, traindata, validdata)

            combined_valid_loss = self.combine_loss(valid_lang_loss,
                                                    valid_select_loss)
            if combined_valid_loss < best_combined_valid_loss:
                print(
                    "update best model: validlangloss %.8f | validselectloss %.8f"
                    % (valid_lang_loss, valid_select_loss))
                best_combined_valid_loss = combined_valid_loss
                best_model = copy.deepcopy(self.model)
                best_model.flatten_parameters()

        if self.verbose:
            print(
                '| start annealing | best combined loss %.3f | best combined ppl %.3f'
                % (best_combined_valid_loss, np.exp(best_combined_valid_loss)))

        self.model = best_model
        for epoch in range(self.args.max_epoch + 1, 100):
            if epoch - last_decay_epoch >= self.args.decay_every:
                last_decay_epoch = epoch
                lr /= self.args.decay_rate
                if lr < self.args.min_lr:
                    break
                self.opt = self.make_opt(lr)

            traindata = corpus.train_dataset(self.args.bsz)
            valid_lang_loss, valid_select_loss = self.iter(
                epoch, lr, traindata, validdata)

            combined_valid_loss = self.combine_loss(valid_lang_loss,
                                                    valid_select_loss)
            if combined_valid_loss < best_combined_valid_loss:
                print(
                    "update best model: validlangloss %.8f | validselectloss %.8f"
                    % (valid_lang_loss, valid_select_loss))
                best_combined_valid_loss = combined_valid_loss
                best_model = copy.deepcopy(self.model)
                best_model.flatten_parameters()

        return best_combined_valid_loss, best_model
Пример #33
0
import telebot
from command_tree import CommandTree
from chat_manager import *
from fsa_serializer import FsaSerializer
from logger import Logger
import config


bot = telebot.TeleBot(config.token)
logger = Logger("log.txt")
command_tree = CommandTree("bot_tree.json")
fsa_serializer = FsaSerializer("fsa_data", logger)
chat_manager = ChatManager(bot, command_tree, BotMode.POLLING, fsa_serializer)

@bot.message_handler(content_types=['text', 'contact'])
def cmd_all(message):
    print('message: "{}"'.format(message.text))
    command = ''
    if message.content_type != 'contact':
        command = message.text.lower()
    handle_command(message, command)


def handle_command(message, command):
    chat_manager.handle_command(message, command, False)


if __name__ == '__main__':
    print("bot has started..")
    while True:
        try:
Пример #34
0
def main():
    """Main."""
    args = parse_arguments()

    if not baseline_utils.validate_args(args):
        return

    print(f"Using all ({joblib.cpu_count()}) CPUs....")
    if use_cuda:
        print(f"Using all ({torch.cuda.device_count()}) GPUs...")

    model_utils = ModelUtils()

    # key for getting feature set
    # Get features
    if args.use_map and args.use_social:
        baseline_key = "map_social"
    elif args.use_map:
        baseline_key = "map"
    elif args.use_social:
        baseline_key = "social"
    else:
        baseline_key = "none"

    # Get data
    data_dict = baseline_utils.get_data(args, baseline_key)

    # Get model
    criterion = nn.MSELoss()
    encoder = EncoderRNN(
        input_size=len(baseline_utils.BASELINE_INPUT_FEATURES[baseline_key]))
    decoder = DecoderRNN(output_size=2)
    if use_cuda:
        encoder = nn.DataParallel(encoder)
        decoder = nn.DataParallel(decoder)
    encoder.to(device)
    decoder.to(device)

    encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=args.lr)
    decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=args.lr)

    # If model_path provided, resume from saved checkpoint
    if args.model_path is not None and os.path.isfile(args.model_path):
        epoch, rollout_len, _ = model_utils.load_checkpoint(
            args.model_path, encoder, decoder, encoder_optimizer,
            decoder_optimizer)
        start_epoch = epoch + 1
        start_rollout_idx = ROLLOUT_LENS.index(rollout_len) + 1

    else:
        start_epoch = 0
        start_rollout_idx = 0

    if not args.test:

        # Tensorboard logger
        log_dir = os.path.join(os.getcwd(), "lstm_logs", baseline_key)

        # Get PyTorch Dataset
        train_dataset = LSTMDataset(data_dict, args, "train")
        val_dataset = LSTMDataset(data_dict, args, "val")

        # Setting Dataloaders
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.train_batch_size,
            shuffle=True,
            drop_last=False,
            collate_fn=model_utils.my_collate_fn,
        )

        val_loader = torch.utils.data.DataLoader(
            val_dataset,
            batch_size=args.val_batch_size,
            drop_last=False,
            shuffle=False,
            collate_fn=model_utils.my_collate_fn,
        )

        print("Training begins ...")

        decrement_counter = 0

        epoch = start_epoch
        global_start_time = time.time()
        for i in range(start_rollout_idx, len(ROLLOUT_LENS)):
            rollout_len = ROLLOUT_LENS[i]
            logger = Logger(log_dir, name="{}".format(rollout_len))
            best_loss = float("inf")
            prev_loss = best_loss
            while epoch < args.end_epoch:
                start = time.time()
                train(
                    train_loader,
                    epoch,
                    criterion,
                    logger,
                    encoder,
                    decoder,
                    encoder_optimizer,
                    decoder_optimizer,
                    model_utils,
                    rollout_len,
                )
                end = time.time()

                print(
                    f"Training epoch completed in {(end - start) / 60.0} mins, Total time: {(end - global_start_time) / 60.0} mins"
                )

                epoch += 1
                if epoch % 5 == 0:
                    start = time.time()
                    prev_loss, decrement_counter = validate(
                        val_loader,
                        epoch,
                        criterion,
                        logger,
                        encoder,
                        decoder,
                        encoder_optimizer,
                        decoder_optimizer,
                        model_utils,
                        prev_loss,
                        decrement_counter,
                        rollout_len,
                    )
                    end = time.time()
                    print(
                        f"Validation completed in {(end - start) / 60.0} mins, Total time: {(end - global_start_time) / 60.0} mins"
                    )

                    # If val loss increased 3 times consecutively, go to next rollout length
                    if decrement_counter > 2:
                        break

    else:

        start_time = time.time()

        temp_save_dir = tempfile.mkdtemp()

        test_size = data_dict["test_input"].shape[0]
        test_data_subsets = baseline_utils.get_test_data_dict_subset(
            data_dict, args)

        # test_batch_size should be lesser than joblib_batch_size
        Parallel(n_jobs=-2, verbose=2)(
            delayed(infer_helper)(test_data_subsets[i], i, encoder, decoder,
                                  model_utils, temp_save_dir)
            for i in range(0, test_size, args.joblib_batch_size))

        baseline_utils.merge_saved_traj(temp_save_dir, args.traj_save_path)
        shutil.rmtree(temp_save_dir)

        end = time.time()
        print(f"Test completed in {(end - start_time) / 60.0} mins")
        print(f"Forecasted Trajectories saved at {args.traj_save_path}")
Пример #35
0
def validate(
    val_loader: Any,
    epoch: int,
    criterion: Any,
    logger: Logger,
    encoder: Any,
    decoder: Any,
    encoder_optimizer: Any,
    decoder_optimizer: Any,
    model_utils: ModelUtils,
    prev_loss: float,
    decrement_counter: int,
    rollout_len: int = 30,
) -> Tuple[float, int]:
    """Validate the lstm network.

    Args:
        val_loader: DataLoader for the train set
        epoch: epoch number
        criterion: Loss criterion
        logger: Tensorboard logger
        encoder: Encoder network instance
        decoder: Decoder network instance
        encoder_optimizer: optimizer for the encoder network
        decoder_optimizer: optimizer for the decoder network
        model_utils: instance for ModelUtils class
        prev_loss: Loss in the previous validation run
        decrement_counter: keeping track of the number of consecutive times loss increased in the current rollout
        rollout_len: current prediction horizon

    """
    args = parse_arguments()
    global best_loss
    total_loss = []

    for i, (_input, target, helpers) in enumerate(val_loader):

        _input = _input.to(device)
        target = target.to(device)

        # Set to eval mode
        encoder.eval()
        decoder.eval()

        # Encoder
        batch_size = _input.shape[0]
        input_length = _input.shape[1]
        output_length = target.shape[1]
        input_shape = _input.shape[2]

        # Initialize encoder hidden state
        encoder_hidden = model_utils.init_hidden(
            batch_size,
            encoder.module.hidden_size if use_cuda else encoder.hidden_size)

        # Initialize loss
        loss = 0

        # Encode observed trajectory
        for ei in range(input_length):
            encoder_input = _input[:, ei, :]
            encoder_hidden = encoder(encoder_input, encoder_hidden)

        # Initialize decoder input with last coordinate in encoder
        decoder_input = encoder_input[:, :2]

        # Initialize decoder hidden state as encoder hidden state
        decoder_hidden = encoder_hidden

        decoder_outputs = torch.zeros(target.shape).to(device)

        # Decode hidden state in future trajectory
        for di in range(output_length):
            decoder_output, decoder_hidden = decoder(decoder_input,
                                                     decoder_hidden)
            decoder_outputs[:, di, :] = decoder_output

            # Update losses for all benchmarks
            loss += criterion(decoder_output[:, :2], target[:, di, :2])

            # Use own predictions as inputs at next step
            decoder_input = decoder_output

        # Get average loss for pred_len
        loss = loss / output_length
        total_loss.append(loss)

        if i % 10 == 0:

            cprint(
                f"Val -- Epoch:{epoch}, loss:{loss}, Rollout: {rollout_len}",
                color="green",
            )

    # Save
    val_loss = sum(total_loss) / len(total_loss)

    if val_loss <= best_loss:
        best_loss = val_loss
        if args.use_map:
            save_dir = "saved_models/lstm_map"
        elif args.use_social:
            save_dir = "saved_models/lstm_social"
        else:
            save_dir = "saved_models/lstm"

        os.makedirs(save_dir, exist_ok=True)
        model_utils.save_checkpoint(
            save_dir,
            {
                "epoch": epoch + 1,
                "rollout_len": rollout_len,
                "encoder_state_dict": encoder.state_dict(),
                "decoder_state_dict": decoder.state_dict(),
                "best_loss": val_loss,
                "encoder_optimizer": encoder_optimizer.state_dict(),
                "decoder_optimizer": decoder_optimizer.state_dict(),
            },
        )

    logger.scalar_summary(tag="Val/loss", value=val_loss.item(), step=epoch)

    # Keep track of the loss to change preiction horizon
    if val_loss <= prev_loss:
        decrement_counter = 0
    else:
        decrement_counter += 1

    return val_loss, decrement_counter
Пример #36
0
def train(
    train_loader: Any,
    epoch: int,
    criterion: Any,
    logger: Logger,
    encoder: Any,
    decoder: Any,
    encoder_optimizer: Any,
    decoder_optimizer: Any,
    model_utils: ModelUtils,
    rollout_len: int = 30,
) -> None:
    """Train the lstm network.

    Args:
        train_loader: DataLoader for the train set
        epoch: epoch number
        criterion: Loss criterion
        logger: Tensorboard logger
        encoder: Encoder network instance
        decoder: Decoder network instance
        encoder_optimizer: optimizer for the encoder network
        decoder_optimizer: optimizer for the decoder network
        model_utils: instance for ModelUtils class
        rollout_len: current prediction horizon

    """
    args = parse_arguments()
    global global_step

    for i, (_input, target, helpers) in enumerate(train_loader):
        _input = _input.to(device)
        target = target.to(device)

        # Set to train mode
        encoder.train()
        decoder.train()

        # Zero the gradients
        encoder_optimizer.zero_grad()
        decoder_optimizer.zero_grad()

        # Encoder
        batch_size = _input.shape[0]
        input_length = _input.shape[1]
        output_length = target.shape[1]
        input_shape = _input.shape[2]

        # Initialize encoder hidden state
        encoder_hidden = model_utils.init_hidden(
            batch_size,
            encoder.module.hidden_size if use_cuda else encoder.hidden_size)

        # Initialize losses
        loss = 0

        # Encode observed trajectory
        for ei in range(input_length):
            encoder_input = _input[:, ei, :]
            encoder_hidden = encoder(encoder_input, encoder_hidden)

        # Initialize decoder input with last coordinate in encoder
        decoder_input = encoder_input[:, :2]

        # Initialize decoder hidden state as encoder hidden state
        decoder_hidden = encoder_hidden

        decoder_outputs = torch.zeros(target.shape).to(device)

        # Decode hidden state in future trajectory
        for di in range(rollout_len):
            decoder_output, decoder_hidden = decoder(decoder_input,
                                                     decoder_hidden)
            decoder_outputs[:, di, :] = decoder_output

            # Update loss
            loss += criterion(decoder_output[:, :2], target[:, di, :2])

            # Use own predictions as inputs at next step
            decoder_input = decoder_output

        # Get average loss for pred_len
        loss = loss / rollout_len

        # Backpropagate
        loss.backward()
        encoder_optimizer.step()
        decoder_optimizer.step()

        if global_step % 1000 == 0:

            # Log results
            print(
                f"Train -- Epoch:{epoch}, loss:{loss}, Rollout:{rollout_len}")

            logger.scalar_summary(tag="Train/loss",
                                  value=loss.item(),
                                  step=epoch)

        global_step += 1
Пример #37
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-l",
                        "--length",
                        help="Packet length",
                        type=int,
                        required=True)
    parser.add_argument("-t",
                        "--simulationtime",
                        help="Simulation time",
                        type=float,
                        required=True)
    parser.add_argument(
        "-g",
        "--generationconstant",
        help="Generation time = Packet length * Generation constant",
        type=float,
        required=True,
    )
    parser.add_argument(
        "-q",
        "--queueconstant",
        help="Handling time = Packet length * Queue constant",
        type=float,
        required=True,
    )
    parser.add_argument(
        "-o",
        "--lambdaon",
        help="Lambda parameter for ON state",
        type=float,
        required=True,
    )
    parser.add_argument(
        "-f",
        "--lambdaoff",
        help="Lambda parameter for OFF state",
        type=float,
        required=True,
    )
    parser.add_argument("-n",
                        "--streams",
                        help="Number of streams",
                        type=int,
                        required=True)
    parser.add_argument(
        "-d",
        "--dropped",
        help="Number of dropped streams after the first queue",
        type=int,
        required=True,
    )
    args = parser.parse_args()

    simulation_params = SimulationParameters(
        args.simulationtime,
        args.length,
        args.generationconstant,
        args.queueconstant,
        args.lambdaon,
        args.lambdaoff,
        args.streams,
        args.dropped,
    )
    Logger.set_logger_params()
    log = Logger(None)

    if simulation_params.streams_number <= simulation_params.dropped_streams:
        log.error("The number of dropped streams has to be lower than "
                  "the number of streams going into the first queue")
        exit(1)

    simulation_time = simulation_params.simulation_time
    timer = Timer()
    event_queue = EventQueue(simulation_time, timer)
    queue_two = Queue(
        timer,
        event_queue,
        simulation_params.packet_length,
        simulation_params.queue_constant,
    )
    queue_one = Queue(
        timer,
        event_queue,
        simulation_params.packet_length,
        simulation_params.queue_constant,
        queue_two,
    )

    rand = Rand(simulation_params.lambda_on, simulation_params.lambda_off)

    generator_pool: List[PacketGenerator] = []

    for _ in range(simulation_params.streams_number -
                   simulation_params.dropped_streams):
        generator = PacketGenerator(
            timer,
            event_queue,
            queue_one,
            rand,
            simulation_params.packet_length,
            simulation_params.generation_constant,
            True,
        )
        generator_pool.append(generator)

    for _ in range(simulation_params.dropped_streams):
        generator = PacketGenerator(
            timer,
            event_queue,
            queue_one,
            rand,
            simulation_params.packet_length,
            simulation_params.generation_constant,
            False,
        )
        generator_pool.append(generator)

        generator = PacketGenerator(
            timer,
            event_queue,
            queue_two,
            rand,
            simulation_params.packet_length,
            simulation_params.generation_constant,
            True,
        )
        generator_pool.append(generator)

    while event_queue.handle_event():
        log.debug(f"Time: @{timer.current_time:.2f}")

    log.debug("queue one data:")
    log.debug(f"{str(queue_one.packets_number)[:100]} --clip--")
    log.debug(f"{str(queue_one.packets)[:100]} --clip--")
    log.debug(f"{str(queue_one.packets_passed)[:100]} --clip--")

    results = {}
    results["avg_queue_length_Q1"] = data_reader.show_queue_length_average(
        queue_one.packets_number)
    results[
        "avg_queue_waiting_time_Q1"] = data_reader.show_average_queue_waiting_time_Q1(
            queue_one.packets_passed)
    results["avg_delay_Q1"] = data_reader.show_average_delay_Q1(
        queue_one.packets_passed)
    results["avg_load_Q1"] = data_reader.show_average_server_load_Q1(
        queue_one.packets_passed)
    results["packets_passed_Q1"] = len(queue_one.packets_passed)

    log.debug("queue two data:")
    log.debug(f"{str(queue_two.packets_number)[:100]} --clip--")
    log.debug(f"{str(queue_two.packets)[:100]} --clip--")
    log.debug(f"{str(queue_two.packets_passed)[:100]} --clip--")
    results["avg_queue_length_Q2"] = data_reader.show_queue_length_average(
        queue_two.packets_number)
    results[
        "avg_queue_waiting_time_Q2"] = data_reader.show_average_queue_waiting_time_Q2(
            queue_two.packets_passed)
    results["avg_delay_Q2"] = data_reader.show_average_delay_Q2(
        queue_two.packets_passed)
    results["avg_load_Q2"] = data_reader.show_average_server_load_Q2(
        queue_two.packets_passed)
    results["packets_passed_Q2"] = len(queue_two.packets_passed)
    results["simulation_params"] = {
        "simulation_time": simulation_params.simulation_time,
        "packet_length": simulation_params.packet_length,
        "generation_constant": simulation_params.generation_constant,
        "queue_constant": simulation_params.queue_constant,
        "lambda_on": simulation_params.lambda_on,
        "lambda_off": simulation_params.lambda_off,
        "streams_number": simulation_params.streams_number,
        "dropped_streams": simulation_params.dropped_streams,
    }
    print(json.dumps(results))
Пример #38
0
class Solver(object):
    """Solver for training and testing StarGAN."""
    def __init__(self, celeba_loader, config):
        """Initialize configurations."""

        # Data loader.
        self.celeba_loader = celeba_loader

        # Model configurations.
        self.c_dim = config.c_dim
        self.image_size = config.image_size
        self.g_conv_dim = config.g_conv_dim
        self.d_conv_dim = config.d_conv_dim
        self.g_repeat_num = config.g_repeat_num
        self.d_repeat_num = config.d_repeat_num
        self.lambda_cls = config.lambda_cls
        self.lambda_rec = config.lambda_rec
        self.lambda_gp = config.lambda_gp

        # Training configurations.
        self.batch_size = config.batch_size
        self.num_iters = config.num_iters
        self.num_iters_decay = config.num_iters_decay
        self.g_lr = config.g_lr
        self.d_lr = config.d_lr
        self.n_critic = config.n_critic
        self.beta1 = config.beta1
        self.beta2 = config.beta2
        self.resume_iters = config.resume_iters
        self.selected_attrs = config.selected_attrs

        # Test configurations.
        self.test_iters = config.test_iters

        # Miscellaneous.
        self.use_tensorboard = config.use_tensorboard
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        # Directories.
        self.log_dir = config.log_dir
        self.sample_dir = config.sample_dir
        self.model_save_dir = config.model_save_dir
        self.result_dir = config.result_dir

        # Step size.
        self.log_step = config.log_step
        self.sample_step = config.sample_step
        self.model_save_step = config.model_save_step
        self.lr_update_step = config.lr_update_step

        # Build the model and tensorboard.
        self.build_model()
        if self.use_tensorboard:
            self.build_tensorboard()

    def build_model(self):
        """Create a generator and a discriminator."""
        self.G = Generator(self.g_conv_dim, self.c_dim, self.g_repeat_num)
        self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim,
                               self.d_repeat_num)

        self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr,
                                            [self.beta1, self.beta2])
        self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr,
                                            [self.beta1, self.beta2])
        self.print_network(self.G, 'G')
        self.print_network(self.D, 'D')

        self.G.to(self.device)
        self.D.to(self.device)

    def print_network(self, model, name):
        """Print out the network information."""
        num_params = 0
        for p in model.parameters():
            num_params += p.numel()
        print(model)
        print(name)
        print("The number of parameters: {}".format(num_params))

    def restore_model(self, resume_iters):
        """Restore the trained generator and discriminator."""
        print(
            'Loading the trained models from step {}...'.format(resume_iters))
        G_path = os.path.join(self.model_save_dir,
                              '{}-G.ckpt'.format(resume_iters))
        D_path = os.path.join(self.model_save_dir,
                              '{}-D.ckpt'.format(resume_iters))
        self.G.load_state_dict(
            torch.load(G_path, map_location=lambda storage, loc: storage))
        self.D.load_state_dict(
            torch.load(D_path, map_location=lambda storage, loc: storage))

    def build_tensorboard(self):
        """Build a tensorboard logger."""
        from logger import Logger
        self.logger = Logger(self.log_dir)

    def update_lr(self, g_lr, d_lr):
        """Decay learning rates of the generator and discriminator."""
        for param_group in self.g_optimizer.param_groups:
            param_group['lr'] = g_lr
        for param_group in self.d_optimizer.param_groups:
            param_group['lr'] = d_lr

    def reset_grad(self):
        """Reset the gradient buffers."""
        self.g_optimizer.zero_grad()
        self.d_optimizer.zero_grad()

    def denorm(self, x):
        """Convert the range from [-1, 1] to [0, 1]."""
        out = (x + 1) / 2
        return out.clamp_(0, 1)

    def gradient_penalty(self, y, x):
        """Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
        weight = torch.ones(y.size()).to(self.device)
        dydx = torch.autograd.grad(outputs=y,
                                   inputs=x,
                                   grad_outputs=weight,
                                   retain_graph=True,
                                   create_graph=True,
                                   only_inputs=True)[0]

        dydx = dydx.view(dydx.size(0), -1)
        dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
        return torch.mean((dydx_l2norm - 1)**2)

    def create_labels(self, c_org, c_dim=5, selected_attrs=None):
        """Generate target domain labels for debugging and testing.
        Generate a list of class vectors each with different hair colors for testing."""
        # Get hair color indices.
        hair_color_indices = []
        for i, attr_name in enumerate(selected_attrs):
            if attr_name in [
                    'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair'
            ]:
                hair_color_indices.append(i)

        c_trg_list = []
        for i in range(c_dim):
            c_trg = c_org.clone()
            if i in hair_color_indices:  # Set one hair color to 1 and the rest to 0.
                c_trg[:, i] = 1
                for j in hair_color_indices:
                    if j != i:
                        c_trg[:, j] = 0
            else:
                c_trg[:, i] = (c_trg[:, i] == 0)  # Reverse attribute value.

            c_trg_list.append(c_trg.to(self.device))
        return c_trg_list

    def classification_loss(self, logit, target):
        """Compute binary or softmax cross entropy loss."""
        return F.binary_cross_entropy_with_logits(
            logit, target, reduction='sum') / logit.size(0)

    def train(self):
        """Train StarGAN within a single dataset."""
        # Set data loader.
        data_loader = self.celeba_loader

        # Fetch fixed inputs for debugging.
        data_iter = iter(data_loader)
        x_fixed, c_org = next(data_iter)  # first iter is used for debugging
        x_fixed = x_fixed.to(self.device)
        c_fixed_list = self.create_labels(c_org, self.c_dim,
                                          self.selected_attrs)

        # Learning rate cache for decaying.
        g_lr = self.g_lr
        d_lr = self.d_lr

        # Start training from scratch or resume training.
        start_iters = 0
        if self.resume_iters:
            start_iters = self.resume_iters
            self.restore_model(self.resume_iters)

        # Start training.
        print('Start training...')
        start_time = time.time()
        for i in range(start_iters, self.num_iters):

            # =================================================================================== #
            #                             1. Preprocess input data                                #
            # =================================================================================== #

            # Fetch real images and labels.
            try:
                x_real, label_org = next(data_iter)
            except:
                data_iter = iter(data_loader)
                x_real, label_org = next(data_iter)

            # Generate target domain labels randomly.
            rand_idx = torch.randperm(label_org.size(0))
            label_trg = label_org[rand_idx]

            c_org = label_org.clone()
            c_trg = label_trg.clone()

            x_real = x_real.to(self.device)  # Input images.
            c_org = c_org.to(self.device)  # Original domain labels.
            c_trg = c_trg.to(self.device)  # Target domain labels.
            label_org = label_org.to(
                self.device)  # Labels for computing classification loss.
            label_trg = label_trg.to(
                self.device)  # Labels for computing classification loss.

            # =================================================================================== #
            #                             2. Train the discriminator                              #
            # =================================================================================== #

            # Compute loss with real images.
            out_src, out_cls = self.D(x_real)
            d_loss_real = -torch.mean(out_src)
            d_loss_cls = self.classification_loss(out_cls, label_org)

            # Compute loss with fake images.
            x_fake = self.G(x_real, c_trg)
            out_src, out_cls = self.D(x_fake.detach())  # don't want to train G
            # here
            d_loss_fake = torch.mean(out_src)

            # Compute loss for gradient penalty.
            alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
            x_hat = (alpha * x_real.data +
                     (1 - alpha) * x_fake.data).requires_grad_(True)
            out_src, _ = self.D(x_hat)
            d_loss_gp = self.gradient_penalty(out_src, x_hat)

            # Backward and optimize.
            d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp
            self.reset_grad()
            d_loss.backward()
            self.d_optimizer.step()

            # Logging.
            loss = {}
            loss['D/loss_real'] = d_loss_real.item()
            loss['D/loss_fake'] = d_loss_fake.item()
            loss['D/loss_cls'] = d_loss_cls.item()
            loss['D/loss_gp'] = d_loss_gp.item()

            # =================================================================================== #
            #                               3. Train the generator                                #
            # =================================================================================== #

            if (i + 1) % self.n_critic == 0:
                # Original-to-target domain.
                x_fake = self.G(x_real, c_trg)
                out_src, out_cls = self.D(x_fake)
                g_loss_fake = -torch.mean(out_src)
                g_loss_cls = self.classification_loss(out_cls, label_trg)

                # Target-to-original domain.
                x_reconst = self.G(x_fake, c_org)
                g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))

                # Backward and optimize.
                g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                # Logging.
                loss['G/loss_fake'] = g_loss_fake.item()
                loss['G/loss_rec'] = g_loss_rec.item()
                loss['G/loss_cls'] = g_loss_cls.item()

            # =================================================================================== #
            #                                 4. Miscellaneous                                    #
            # =================================================================================== #

            # Print out training information.
            if (i + 1) % self.log_step == 0:
                et = time.time() - start_time
                et = str(datetime.timedelta(seconds=et))[:-7]
                log = "Elapsed [{}], Iteration [{}/{}]".format(
                    et, i + 1, self.num_iters)
                for tag, value in loss.items():
                    log += ", {}: {:.4f}".format(tag, value)
                print(log)

                if self.use_tensorboard:
                    for tag, value in loss.items():
                        self.logger.scalar_summary(tag, value, i + 1)

            # Translate fixed images for debugging.
            if (i + 1) % self.sample_step == 0:
                with torch.no_grad():
                    x_fake_list = [x_fixed]
                    for c_fixed in c_fixed_list:
                        x_fake_list.append(self.G(x_fixed, c_fixed))
                    x_concat = torch.cat(x_fake_list, dim=3)
                    sample_path = os.path.join(self.sample_dir,
                                               '{}-images.jpg'.format(i + 1))
                    save_image(self.denorm(x_concat.data.cpu()),
                               sample_path,
                               nrow=1,
                               padding=0)
                    print('Saved real and fake images into {}...'.format(
                        sample_path))

            # Save model checkpoints.
            if (i + 1) % self.model_save_step == 0:
                G_path = os.path.join(self.model_save_dir,
                                      '{}-G.ckpt'.format(i + 1))
                D_path = os.path.join(self.model_save_dir,
                                      '{}-D.ckpt'.format(i + 1))
                torch.save(self.G.state_dict(), G_path)
                torch.save(self.D.state_dict(), D_path)
                print('Saved model checkpoints into {}...'.format(
                    self.model_save_dir))

            # Decay learning rates.
            if (i + 1) % self.lr_update_step == 0 and (i + 1) > (
                    self.num_iters - self.num_iters_decay):
                g_lr -= (self.g_lr / float(self.num_iters_decay))
                d_lr -= (self.d_lr / float(self.num_iters_decay))
                self.update_lr(g_lr, d_lr)
                print('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(
                    g_lr, d_lr))

    def test(self):
        """Translate images using StarGAN trained on a single dataset."""
        # Load the trained generator.
        self.restore_model(self.test_iters)

        # Set data loader.
        data_loader = self.celeba_loader

        with torch.no_grad():
            for i, (x_real, c_org) in enumerate(data_loader):

                # Prepare input images and target domain labels.
                x_real = x_real.to(self.device)
                c_trg_list = self.create_labels(c_org, self.c_dim,
                                                self.selected_attrs)

                # Translate images.
                x_fake_list = [x_real]
                for c_trg in c_trg_list:
                    x_fake_list.append(self.G(x_real, c_trg))

                # Save the translated images.
                x_concat = torch.cat(x_fake_list, dim=3)
                result_path = os.path.join(self.result_dir,
                                           '{}-images.jpg'.format(i + 1))
                save_image(self.denorm(x_concat.data.cpu()),
                           result_path,
                           nrow=1,
                           padding=0)
                print('Saved real and fake images into {}...'.format(
                    result_path))
Пример #39
0
from_zero = False

# Rewards scaling - default is 10.0
scale_rewards = 10.0

# norm to use
norm = 2

if from_zero:
    fb._M1 = lambda x: np.zeros((4, 4))
    fb._f1 = lambda x: np.zeros((4, 1))

if do_PPO:
    logger = Logger(
        "logs/quadrotor_14d_PPO_%dx%d_std%f_lr%f_kl%f_%d_%d_dyn_%f_%f_%f_%f_seed_%d.pkl"
        % (num_layers, num_hidden_units, noise_std, learning_rate, desired_kl,
           num_rollouts, num_steps_per_rollout, mass_scaling, Ix_scaling,
           Iy_scaling, Iz_scaling, seed))
    solver = PPO(num_iters, learning_rate, desired_kl, discount_factor,
                 num_rollouts, num_steps_per_rollout, dyn,
                 initial_state_sampler, fb, logger)

if do_Reinforce:
    logger = Logger(
        "logs/quadrotor_14d_Reinforce_%dx%d_std%f_lr%f_kl%f_%d_%d_fromzero_%s_dyn_%f_%f_%f_%f_seed_%d_norm_%d_smallweights_relu.pkl"
        % (num_layers, num_hidden_units, noise_std, learning_rate, desired_kl,
           num_rollouts, num_steps_per_rollout, str(from_zero), mass_scaling,
           Ix_scaling, Iy_scaling, Iz_scaling, seed, norm))
    solver = Reinforce(num_iters, learning_rate, desired_kl, discount_factor,
                       num_rollouts, num_steps_per_rollout, dyn,
                       initial_state_sampler, fb, logger, norm, scale_rewards,
Пример #40
0
def run3Dalignment(paramsdict, partids, partstack, outputdir, procid, myid,
                   main_node, nproc):
    #  Reads from paramsdict["stack"] particles partids set parameters in partstack
    #    and do refinement as specified in paramsdict
    #
    #  Will create outputdir
    #  Will write to outputdir output parameters: params-chunk0.txt and params-chunk1.txt
    if (myid == main_node):
        #  Create output directory
        log = Logger(BaseLogger_Files())
        log.prefix = os.path.join(outputdir)
        #cmd = "mkdir "+log.prefix
        #cmdexecute(cmd)
        log.prefix += "/"
    else:
        log = None
    mpi_barrier(MPI_COMM_WORLD)

    ali3d_options.delta = paramsdict["delta"]
    ali3d_options.ts = paramsdict["ts"]
    ali3d_options.xr = paramsdict["xr"]
    #  low pass filter is applied to shrank data, so it has to be adjusted
    ali3d_options.fl = paramsdict["lowpass"] / paramsdict["shrink"]
    ali3d_options.initfl = paramsdict["initialfl"] / paramsdict["shrink"]
    ali3d_options.aa = paramsdict["falloff"]
    ali3d_options.maxit = paramsdict["maxit"]
    ali3d_options.mask3D = paramsdict["mask3D"]
    ali3d_options.an = paramsdict["an"]
    ali3d_options.ou = paramsdict[
        "radius"]  #  This is changed in ali3d_base, but the shrank value is needed in vol recons, fixt it!
    shrinkage = paramsdict["shrink"]

    projdata = getindexdata(paramsdict["stack"], partids, partstack, myid,
                            nproc)
    onx = projdata[0].get_xsize()
    last_ring = ali3d_options.ou
    if last_ring < 0: last_ring = int(onx / 2) - 2
    mask2D = model_circle(last_ring, onx, onx) - model_circle(
        ali3d_options.ir, onx, onx)
    if (shrinkage < 1.0):
        # get the new size
        masks2D = resample(mask2D, shrinkage)
        nx = masks2D.get_xsize()
        masks2D = model_circle(
            int(last_ring * shrinkage + 0.5), nx, nx) - model_circle(
                max(int(ali3d_options.ir * shrinkage + 0.5), 1), nx, nx)
    nima = len(projdata)
    oldshifts = [0.0, 0.0] * nima
    for im in xrange(nima):
        #data[im].set_attr('ID', list_of_particles[im])
        ctf_applied = projdata[im].get_attr_default('ctf_applied', 0)
        phi, theta, psi, sx, sy = get_params_proj(projdata[im])
        projdata[im] = fshift(projdata[im], sx, sy)
        set_params_proj(projdata[im], [phi, theta, psi, 0.0, 0.0])
        #  For local SHC set anchor
        #if(nsoft == 1 and an[0] > -1):
        #	set_params_proj(data[im],[phi,tetha,psi,0.0,0.0], "xform.anchor")
        oldshifts[im] = [sx, sy]
        if ali3d_options.CTF:
            ctf_params = projdata[im].get_attr("ctf")
            if ctf_applied == 0:
                st = Util.infomask(projdata[im], mask2D, False)
                projdata[im] -= st[0]
                projdata[im] = filt_ctf(projdata[im], ctf_params)
                projdata[im].set_attr('ctf_applied', 1)
        if (shrinkage < 1.0):
            #phi,theta,psi,sx,sy = get_params_proj(projdata[im])
            projdata[im] = resample(projdata[im], shrinkage)
            st = Util.infomask(projdata[im], None, True)
            projdata[im] -= st[0]
            st = Util.infomask(projdata[im], masks2D, True)
            projdata[im] /= st[1]
            #sx *= shrinkage
            #sy *= shrinkage
            #set_params_proj(projdata[im], [phi,theta,psi,sx,sy])
            if ali3d_options.CTF:
                ctf_params.apix /= shrinkage
                projdata[im].set_attr('ctf', ctf_params)
        else:
            st = Util.infomask(projdata[im], None, True)
            projdata[im] -= st[0]
            st = Util.infomask(projdata[im], mask2D, True)
            projdata[im] /= st[1]
    del mask2D
    if (shrinkage < 1.0): del masks2D
    """
	if(paramsdict["delpreviousmax"]):
		for i in xrange(len(projdata)):
			try:  projdata[i].del_attr("previousmax")
			except:  pass
	"""
    if (myid == main_node):
        print_dict(paramsdict, "3D alignment parameters")
        print("                    =>  actual lowpass      :  "******"                    =>  actual init lowpass :  "******"                    =>  PW adjustment       :  ",ali3d_options.pwreference)
        print("                    =>  partids             :  ", partids)
        print("                    =>  partstack           :  ", partstack)

    if (ali3d_options.fl > 0.46):
        ERROR(
            "Low pass filter in 3D alignment > 0.46 on the scale of shrank data",
            "sxcenter_projections", 1, myid)

    #  Run alignment command, it returns params per CPU
    params = center_projections_3D(projdata, paramsdict["refvol"], \
            ali3d_options, onx, shrinkage, \
            mpi_comm = MPI_COMM_WORLD,  myid = myid, main_node = main_node, log = log )
    del log, projdata

    params = wrap_mpi_gatherv(params, main_node, MPI_COMM_WORLD)

    #  store params
    if (myid == main_node):
        for im in xrange(nima):
            params[im][0] = params[im][0] / shrinkage + oldshifts[im][0]
            params[im][1] = params[im][1] / shrinkage + oldshifts[im][1]
        line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
        print(line, "Executed successfully: ", "3D alignment",
              "  number of images:%7d" % len(params))
        write_text_row(params, os.path.join(outputdir, "params.txt"))
Пример #41
0
def request(method, path, params={}, access_token=None, payload=None):
    '''
    Envia una request HTTP a la API de Shokesu.
    :param method: Es el método a utilizar. Deberá ser uno de los siguientes
    GET, PUT, POST, DELETE
    :param path Es la ruta del recurso a obtener, relativa a https://api.shokesu.com
    :param params Es un diccionario que indicará los parámetros a añadir a la URL.
    :param access_token Es un token JWT que se usará para autenticar al cliente. (Si es None,
    no se añadira a la cabecera)
    :param payload Parámetros a añadir al cuerpo de la request. Por defecto es None. Si no es
    None, deberá ser un objeto convertible a JSON. El objeto se codificará en dicho formato y
    se incluirá en el payload.
    :return:
    '''

    params = dict([(key, value) for key, value in params.items()
                   if not value is None])

    # Construimos la url
    result = match('^\/?(.*)$', path)
    path = result.group(1)
    url = '{}/{}?{}'.format(api_uri_root, path, urlencode(params))

    send_request = {
        'GET': requests.get,
        'POST': requests.post,
        'PUT': requests.put,
        'DELETE': requests.delete
    }

    logger = Logger()

    if payload is None and method == 'POST':
        payload = {}
    payload = json.dumps(payload) if not payload is None else None
    headers = {}
    if not access_token is None:
        headers['Authorization'] = 'Bearer {}'.format(access_token)

    logger.debug('-------------\n')
    logger.debug('Sending {} request to {}'.format(method, url))
    logger.debug('Headers: ')
    logger.debug('\n'.join(
        ['{}: {}'.format(key, value) for key, value in headers.items()]))
    logger.debug('Payload: ')
    logger.debug(payload)

    response = send_request[method](url=url, data=payload, headers=headers)
    logger.debug('Response status code: {}'.format(response.status_code))

    try:
        if response.status_code == 404:
            raise Exception('404 Error Not Found')

        if response.status_code != 200:
            try:
                description = search('<b>description<\/b>[^<]*<u>([^<]+)<\/u>',
                                     response.text, DOTALL).group(1)
            except:
                description = 'Unknown server error'
            raise Exception(description)

    except Exception as e:
        raise Exception('Error executing request to {}: {}'.format(
            response.url, str(e)))

    logger.debug('Response body: ')
    logger.debug(response.text)
    logger.debug('-------------\n')

    return response
    '''
Пример #42
0
 def build_tensorboard(self):
     """Build a tensorboard logger."""
     from logger import Logger
     self.logger = Logger(self.log_dir)
Пример #43
0
    def __init__(self, input_size=784, hidden_size=500, num_classes=10):
        super(NeuralNet, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out


model = NeuralNet().to(device)

logger = Logger('./logs')

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.00001)

data_iter = iter(data_loader)
iter_per_epoch = len(data_loader)
total_step = 50000

# Start training
for step in range(total_step):

    # Reset the data_iter
    if (step + 1) % iter_per_epoch == 0:
        data_iter = iter(data_loader)
Пример #44
0
    parser.add_argument('--log-interval', type=int, default=200, metavar='N',
                        help='report interval')
    parser.add_argument('--tb-logdir', type=str,
                        help='folder for logs for TensorBoard')
    parser.add_argument('--load', type=str, required=True,
                        help='where to load a model from')
    parser.add_argument('--save', type=str, required=True,
                        help='path to save the final model')
    args = parser.parse_args()
    print(args)

    init_seeds(args.seed, args.cuda)

    tb_logger = None
    if args.tb_logdir:
        tb_logger = Logger(args.tb_logdir, update_freq=100)

    print("loading model...")
    lm = torch.load(args.load)
    if args.cuda:
        lm.cuda()
    print(lm.model)

    print("preparing data...")
    tokenize_regime = 'words'
    if args.characters:
        tokenize_regime = 'chars'

    train_ids = tokens_from_fn(args.train, lm.vocab, randomize=False, regime=tokenize_regime)
    train_batched = batchify(train_ids, args.batch_size, args.cuda)
    train_data_tb = TemporalSplits(
Пример #45
0
sys.path.append(os.path.join(os.path.abspath(__file__), '..', '..'))

import schedule
from logger import Logger


def main():
    print("This is bob at: ", time.ctime())


if __name__ == "__main__":
    interval = 2
    dir_name = os.path.dirname(os.path.abspath(__file__))
    file_with_path = os.path.join(dir_name, __file__)
    logger = Logger(__file__)
    print("\n\n*****DO NOT KILL this program*****\n")
    print(
        "If you accidentally or intentionally killed this program, please rerun it"
    )
    print("This program runs processes every:", interval, "secs")

    log_msg = "\n\n" + file_with_path + "\n"
    log_msg += "This program runs every: " + str(interval) + "secs\n"
    log_msg += "It was invoked at: " + time.strftime("%c")

    schedule.every(1).minutes.do(main)
    while True:
        schedule.run_pending()
        # logger.log(log_msg)
        print("right after run_pending")
from awsclients import AwsClients
from cfnpipeline import CFNPipeline
from logger import Logger

# Initialise logging
loglevel = 'debug'
logger = Logger(loglevel=loglevel)
logger.info('New Lambda container initialised, logging configured.')

# Initialise boto client factory
clients = AwsClients(logger)

# Initialise CFNPipeline helper library
pipeline_run = CFNPipeline(logger, clients)


def test_subnet_name(region, stackid, logical_resource_id,
                     physical_resource_id):

    logger.debug({"test_subnet_name": "starting"})
    # Create an empty list to put errors into (so you can have more than 1 error per test)
    error_msg = []

    # Setup our output object, if there are errors we will flip the "success" key to False
    results = {
        "success": True,
        "logical_resource_id": logical_resource_id,
        "physical_resource_id": physical_resource_id,
        "region": region,
        "stackid": stackid
    }
Пример #47
0
class Simulation(object):
    """Main class that will run the herd immunity simulation program.
    Expects initialization parameters passed as command line arguments when
    file is run.

    Simulates the spread of a virus through a given population.  The percentage
    of the
    population that are vaccinated, the size of the population, and the amount
    of initially
    infected people in a population are all variables that can be set when the
    program is run.
    """

    def __init__(self, pop_size, vacc_percentage, virus, initial_infected=1):
        """
        Logger object logger records all events during the simulation.
        Population represents all Persons in the population.
        The next_person_id is the next available id for all created Persons,
        and should have a unique _id value.
        The vaccination percentage represents the total percentage of
        population vaccinated at the start of the simulation.
        You will need to keep track of the number of people currently infected
        with the disease.
        The total infected people is the running total that have been infected
        since the
        simulation began, including the currently infected people who died.
        You will also need to keep track of the number of people that have die
        as a result
        of the infection.

        All arguments will be passed as command-line arguments when the file is
        run.
        HINT: Look in the if __name__ == "__main__" function at the bottom.
        """
        self.pop_size = pop_size  # Int
        self.virus = virus  # Virus object
        self.initial_infected = initial_infected  # Int
        self.vacc_percentage = vacc_percentage  # float between 0 and 1
        self.total_infected = 0  # Int
        self.current_infected = 0  # Int
        self.new_deaths = 0  # Int
        self.total_dead = 0  # Int
        self.new_vaccinations = 0  # Int
        self.total_vaccinated = 0  # Int
        self.vacc_saves = 0  # Int
        self.population = self._create_population(self.initial_infected)
        # List of people objects
        self.file_name = "logs/{}_simulation_pop_{}_vp_{}_infected_{}\
.txt".format(virus.name, pop_size, vacc_percentage, initial_infected)
        self.logger = Logger(self.file_name)
        self.newly_infected = []

        self.logger.write_metadata(self.pop_size, self.vacc_percentage,
                                   self.virus.name, self.virus.mortality_rate,
                                   self.virus.repro_rate)

    def _create_population(self, initial_infected):
        """
        This method will create the initial population.
            Args:
                initial_infected (int): The number of infected people that the
                simulation
                will begin with.

            Returns:
                list: A list of Person objects.

        """
        # Use the attributes created in the init method to create a population
        # that has the correct intial vaccination percentage and initial
        # infected.
        pop_list = []
        vacc_number = int(self.pop_size * self.vacc_percentage)

        for person_num in range(self.pop_size):
            # Create initially infected people
            if person_num < initial_infected:
                pop_list.append(Person(person_num, False, self.virus))
                self.total_infected += 1
                self.current_infected += 1
            # Create vaccinated people
            elif person_num < initial_infected + vacc_number:
                pop_list.append(Person(person_num, True))
                self.total_vaccinated += 1
            # Create everyone else
            else:
                pop_list.append(Person(person_num, False))

        return pop_list

    def _simulation_should_continue(self):
        """
        The simulation should only end if the entire population is dead
        or everyone is vaccinated.

            Returns:
                bool: True for simulation should continue, False if it should
                end.

        """
        # If there are no infected people left
        if self.current_infected == 0:
            return False
        else:
            return True

    def get_infected(self):
        """Helper function to get and return the list of infected people"""
        inf_list = []
        self.current_infected = 0
        for person in self.population:
            if person.infection is not None and person.is_alive:
                inf_list.append(person)
                self.current_infected += 1

        return inf_list

    def run(self):
        """
        This method should run the simulation until all requirements for ending
        the simulation are met.
        """
        time_step_counter = 0
        should_continue = True

        while should_continue:
            # Complete another step of the simulation
            time_step_counter += 1
            self.time_step()
            self.logger.log_time_step(time_step_counter, self.current_infected,
                                      self.new_deaths, self.new_vaccinations,
                                      self.total_infected, self.total_dead,
                                      self.total_vaccinated, self.vacc_saves)
            should_continue = self._simulation_should_continue()

        print(f'The simulation has ended after {time_step_counter} turns.')

        return f'The simulation has ended after {time_step_counter} turns.'

    def time_step(self):
        """
        This method should contain all the logic for computing one time step
        in the simulation.

        This includes:
            1. 100 total interactions with a randon person for each infected
            person in the population
            2. If the person is dead, grab another random person from the
            population.
                Since we don't interact with dead people, this does not count
                as an interaction.
            3. Otherwise call simulation.interaction(person, random_person) and
                increment interaction counter by 1.
        """
        # Create list of infected people
        self.new_deaths = 0
        self.new_vaccinations = 0
        inf_list = self.get_infected()

        # Iterate through infected population and interact with 100 people
        for person in inf_list:
            interaction_count = 0
            while interaction_count < 100:
                random_person = random.choice(self.population)
                while (not random_person.is_alive
                       and random_person._id != person._id):
                    random_person = random.choice(self.population)
                self.interaction(person, random_person)
                interaction_count += 1

        # Check if infected people survive the infection
        for person in inf_list:
            survived = person.did_survive_infection()
            if survived:
                self.total_vaccinated += 1
                self.new_vaccinations += 1
                self.logger.log_infection_survival(person, False)
            else:
                self.total_dead += 1
                self.new_deaths += 1
                self.logger.log_infection_survival(person, True)

        # Infect newly infected people
        self._infect_newly_infected()
        self.get_infected()

    def interaction(self, person, random_person):
        """
        This method should be called any time two living people are selected
        for an
        interaction. It assumes that only living people are passed in as
        parameters.

        Args:
            person (person): The initial infected person
            random_person (person): The person that person1 interacts with.

        """
        # Assert statements are included to make sure that only living people
        # are passed
        # in as params
        assert person.is_alive is True
        assert random_person.is_alive is True

        # Check Cases:
        # If vaccinated or sick, do nothing
        # Otherwise find random percentage and check against repro_rate
        #     If it's lower, add random_person to newly_infected list
        #     Otherwise, nothing happens

        if random_person.is_vaccinated:
            self.logger.log_interaction(person, random_person,
                                        False, True, False)
            self.vacc_saves += 1
        elif random_person.infection is not None:
            self.logger.log_interaction(person, random_person,
                                        True, False, False)
        else:
            inf_chance = random.random()
            if (inf_chance < person.infection.repro_rate
               and self.newly_infected.count(random_person._id) == 0):
                self.newly_infected.append(random_person._id)
                self.logger.log_interaction(person, random_person,
                                            False, False, True)
            else:
                self.logger.log_interaction(person, random_person,
                                            False, False, False)

            return inf_chance

    def _infect_newly_infected(self):
        """
        This method should iterate through the list of ._id stored in
        self.newly_infected
        and update each Person object with the disease.
        """
        for person_id in self.newly_infected:
            self.population[person_id].infection = self.virus
            self.total_infected += 1

        self.newly_infected.clear()
Пример #48
0
def main():
	from optparse   import OptionParser
	from global_def import SPARXVERSION
	from EMAN2      import EMData
	from logger     import Logger, BaseLogger_Files
	import sys, os, time
	global Tracker, Blockdata
	from global_def import ERROR
	progname = os.path.basename(sys.argv[0])
	usage = progname + " --output_dir=output_dir  --isac_dir=output_dir_of_isac "
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--pw_adjustment", type ="string", default ='analytical_model',  \
	   help="adjust power spectrum of 2-D averages to an analytic model. Other opions: no_adjustment; bfactor; a text file of 1D rotationally averaged PW")
	#### Four options for --pw_adjustment: 	
	# 1> analytical_model(default); 
	# 2> no_adjustment;
	# 3> bfactor;
	# 4> adjust_to_given_pw2(user has to provide a text file that contains 1D rotationally averaged PW)

	# options in common
	parser.add_option("--isac_dir",              type   ="string",         default ='',     help="ISAC run output directory, input directory for this command")
	parser.add_option("--output_dir",            type   ="string",         default ='',     help="output directory where computed averages are saved")
	parser.add_option("--pixel_size",            type   ="float",          default =-1.0,   help="pixel_size of raw images. one can put 1.0 in case of negative stain data")
	parser.add_option("--fl",                    type   ="float",          default =-1.0,    help= "low pass filter, = -1.0, not applied; =0.0, using FH1 (initial resolution), = 1.0 using FH2 (resolution after local alignment), or user provided value in absolute freqency [0.0:0.5]")
	parser.add_option("--stack",                 type   ="string",         default ="",     help= "data stack used in ISAC")
	parser.add_option("--radius",                type   ="int",            default =-1,     help= "radius")
	parser.add_option("--xr",                    type   ="float",          default =-1.0,   help= "local alignment search range")
	#parser.add_option("--ts",                    type   ="float",          default =1.0,    help= "local alignment search step")
	parser.add_option("--fh",                    type   ="float",          default =-1.0,   help= "local alignment high frequencies limit")
	#parser.add_option("--maxit",                 type   ="int",            default =5,      help= "local alignment iterations")
	parser.add_option("--navg",                  type   ="int",            default =1000000,     help= "number of aveages")
	parser.add_option("--local_alignment",       action ="store_true",     default =False,  help= "do local alignment")
	parser.add_option("--noctf",                 action ="store_true",     default =False,  help="no ctf correction, useful for negative stained data. always ctf for cryo data")
	parser.add_option("--B_start",  type   ="float",  default = 45.0,  help="start frequency (Angstrom) of power spectrum for B_factor estimation")
	parser.add_option("--Bfactor",  type   ="float",  default = -1.0,  help= "User defined bactors (e.g. 25.0[A^2]). By default, the program automatically estimates B-factor. ")
			
	(options, args) = parser.parse_args(sys.argv[1:])
	
	adjust_to_analytic_model = False
	adjust_to_given_pw2      = False
	B_enhance                = False
	no_adjustment            = False

	if   options.pw_adjustment=='analytical_model':   adjust_to_analytic_model = True
	elif options.pw_adjustment=='no_adjustment':      no_adjustment            = True
	elif options.pw_adjustment=='bfactor':            B_enhance                = True
	else:                                             adjust_to_given_pw2      = True 

	from utilities 		import get_im, bcast_number_to_all, write_text_file,read_text_file,wrap_mpi_bcast, write_text_row
	from utilities 		import cmdexecute
	from filter			import filt_tanl
	from logger         import Logger,BaseLogger_Files
	import user_functions
	import string
	from   string       import split, atoi, atof
	import json

	mpi_init(0, [])
	nproc    = mpi_comm_size(MPI_COMM_WORLD)
	myid     = mpi_comm_rank(MPI_COMM_WORLD)


	Blockdata = {}
	#  MPI stuff
	Blockdata["nproc"]              = nproc
	Blockdata["myid"]               = myid
	Blockdata["main_node"]          = 0
	Blockdata["shared_comm"]                    = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED,  0, MPI_INFO_NULL)
	Blockdata["myid_on_node"]                   = mpi_comm_rank(Blockdata["shared_comm"])
	Blockdata["no_of_processes_per_group"]      = mpi_comm_size(Blockdata["shared_comm"])
	masters_from_groups_vs_everything_else_comm = mpi_comm_split(MPI_COMM_WORLD, Blockdata["main_node"] == Blockdata["myid_on_node"], Blockdata["myid_on_node"])
	Blockdata["color"], Blockdata["no_of_groups"], balanced_processor_load_on_nodes = get_colors_and_subsets(Blockdata["main_node"], MPI_COMM_WORLD, Blockdata["myid"], \
			 Blockdata["shared_comm"], Blockdata["myid_on_node"], masters_from_groups_vs_everything_else_comm)
	#  We need two nodes for processing of volumes
	Blockdata["node_volume"] = [Blockdata["no_of_groups"]-3, Blockdata["no_of_groups"]-2, Blockdata["no_of_groups"]-1]  # For 3D stuff take three last nodes
	#  We need two CPUs for processing of volumes, they are taken to be main CPUs on each volume
	#  We have to send the two myids to all nodes so we can identify main nodes on two selected groups.
	Blockdata["nodes"] = [Blockdata["node_volume"][0]*Blockdata["no_of_processes_per_group"],Blockdata["node_volume"][1]*Blockdata["no_of_processes_per_group"], \
		 Blockdata["node_volume"][2]*Blockdata["no_of_processes_per_group"]]
	# End of Blockdata: sorting requires at least three nodes, and the used number of nodes be integer times of three
	global_def.BATCH = True
	global_def.MPI   = True


	if adjust_to_given_pw2:
		checking_flag = 0
		if(Blockdata["myid"] == Blockdata["main_node"]):
			if not os.path.exists(options.pw_adjustment): checking_flag = 1
		checking_flag = bcast_number_to_all(checking_flag, Blockdata["main_node"], MPI_COMM_WORLD)
		if checking_flag ==1: ERROR("User provided power spectrum does not exist", "sxcompute_isac_avg.py", 1, Blockdata["myid"])
	
	Tracker                                   = {}
	Constants		                          = {}
	Constants["isac_dir"]                     = options.isac_dir
	Constants["masterdir"]                    = options.output_dir
	Constants["pixel_size"]                   = options.pixel_size
	Constants["orgstack"]                     = options.stack
	Constants["radius"]                       = options.radius
	Constants["xrange"]                       = options.xr
	Constants["FH"]                           = options.fh
	Constants["low_pass_filter"]              = options.fl
	#Constants["maxit"]                        = options.maxit
	Constants["navg"]                         = options.navg
	Constants["B_start"]                      = options.B_start
	Constants["Bfactor"]                      = options.Bfactor
	
	if adjust_to_given_pw2: Constants["modelpw"] = options.pw_adjustment
	Tracker["constants"] = Constants
	# -------------------------------------------------------------
	#
	# Create and initialize Tracker dictionary with input options  # State Variables


	#<<<---------------------->>>imported functions<<<---------------------------------------------

	#x_range = max(Tracker["constants"]["xrange"], int(1./Tracker["ini_shrink"])+1)
	#y_range =  x_range

	####-----------------------------------------------------------
	# Create Master directory and associated subdirectories
	line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
	if Tracker["constants"]["masterdir"] == Tracker["constants"]["isac_dir"]:
		masterdir = os.path.join(Tracker["constants"]["isac_dir"], "sharpen")
	else: masterdir = Tracker["constants"]["masterdir"]

	if(Blockdata["myid"] == Blockdata["main_node"]):
		msg = "Postprocessing ISAC 2D averages starts"
		print(line, "Postprocessing ISAC 2D averages starts")
		if not masterdir:
			timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime())
			masterdir ="sharpen_"+Tracker["constants"]["isac_dir"]
			os.mkdir(masterdir)
		else:
			if os.path.exists(masterdir): print("%s already exists"%masterdir)
			else: os.mkdir(masterdir)
		subdir_path = os.path.join(masterdir, "ali2d_local_params_avg")
		if not os.path.exists(subdir_path): os.mkdir(subdir_path)
		subdir_path = os.path.join(masterdir, "params_avg")
		if not os.path.exists(subdir_path): os.mkdir(subdir_path)
		li =len(masterdir)
	else: li = 0
	li                                  = mpi_bcast(li,1,MPI_INT,Blockdata["main_node"],MPI_COMM_WORLD)[0]
	masterdir							= mpi_bcast(masterdir,li,MPI_CHAR,Blockdata["main_node"],MPI_COMM_WORLD)
	masterdir                           = string.join(masterdir,"")
	Tracker["constants"]["masterdir"]	= masterdir
	log_main = Logger(BaseLogger_Files())
	log_main.prefix = Tracker["constants"]["masterdir"]+"/"

	while not os.path.exists(Tracker["constants"]["masterdir"]):
		print("Node ", Blockdata["myid"], "  waiting...", Tracker["constants"]["masterdir"])
		sleep(1)
	mpi_barrier(MPI_COMM_WORLD)

	if(Blockdata["myid"] == Blockdata["main_node"]):
		init_dict = {}
		print(Tracker["constants"]["isac_dir"])
		Tracker["directory"] = os.path.join(Tracker["constants"]["isac_dir"], "2dalignment")
		core = read_text_row(os.path.join(Tracker["directory"], "initial2Dparams.txt"))
		for im in range(len(core)): init_dict[im]  = core[im]
		del core
	else: init_dict = 0
	init_dict = wrap_mpi_bcast(init_dict, Blockdata["main_node"], communicator = MPI_COMM_WORLD)
	###
	do_ctf = True
	if options.noctf: do_ctf = False 
	if(Blockdata["myid"] == Blockdata["main_node"]):
		if do_ctf: print("CTF correction is on")
		else:      print("CTF correction is off")
		if options.local_alignment: print("local refinement is on")
		else:                       print("local refinement is off")
		if B_enhance: print("Bfactor is to be applied on averages")
		elif adjust_to_given_pw2: print("PW of averages is adjusted to a given 1D PW curve")
		elif adjust_to_analytic_model: print("PW of averages is adjusted to analytical model")
		else: print("PW of averages is not adjusted")
		#Tracker["constants"]["orgstack"] = "bdb:"+ os.path.join(Tracker["constants"]["isac_dir"],"../","sparx_stack")
		image = get_im(Tracker["constants"]["orgstack"], 0)
		Tracker["constants"]["nnxo"] = image.get_xsize()
		if Tracker["constants"]["pixel_size"] == -1.0:
			print("Pixel size value is not provided by user. extracting it from ctf header entry of the original stack.")
			try:
				ctf_params = image.get_attr("ctf")
				Tracker["constants"]["pixel_size"] = ctf_params.apix
			except: 
				ERROR("Pixel size could not be extracted from the original stack.", "sxcompute_isac_avg.py", 1, Blockdata["myid"]) # action=1 - fatal error, exit
		## Now fill in low-pass filter
			
		isac_shrink_path = os.path.join(Tracker["constants"]["isac_dir"], "README_shrink_ratio.txt")
		if not os.path.exists(isac_shrink_path):
			ERROR("%s does not exist in the specified ISAC run output directory"%(isac_shrink_path), "sxcompute_isac_avg.py", 1, Blockdata["myid"]) # action=1 - fatal error, exit
		isac_shrink_file = open(isac_shrink_path, "r")
		isac_shrink_lines = isac_shrink_file.readlines()
		isac_shrink_ratio = float(isac_shrink_lines[5])  # 6th line: shrink ratio (= [target particle radius]/[particle radius]) used in the ISAC run
		isac_radius = float(isac_shrink_lines[6])        # 7th line: particle radius at original pixel size used in the ISAC run
		isac_shrink_file.close()
		print("Extracted parameter values")
		print("ISAC shrink ratio    : {0}".format(isac_shrink_ratio))
		print("ISAC particle radius : {0}".format(isac_radius))
		Tracker["ini_shrink"] = isac_shrink_ratio
	else: Tracker["ini_shrink"] = 0.0
	Tracker = wrap_mpi_bcast(Tracker, Blockdata["main_node"], communicator = MPI_COMM_WORLD)

	#print(Tracker["constants"]["pixel_size"], "pixel_size")	
	x_range = max(Tracker["constants"]["xrange"], int(1./Tracker["ini_shrink"]+0.99999) )
	a_range = y_range = x_range

	if(Blockdata["myid"] == Blockdata["main_node"]): parameters = read_text_row(os.path.join(Tracker["constants"]["isac_dir"], "all_parameters.txt"))
	else: parameters = 0
	parameters = wrap_mpi_bcast(parameters, Blockdata["main_node"], communicator = MPI_COMM_WORLD)		
	params_dict = {}
	list_dict   = {}
	#parepare params_dict

	#navg = min(Tracker["constants"]["navg"]*Blockdata["nproc"], EMUtil.get_image_count(os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf")))
	navg = min(Tracker["constants"]["navg"], EMUtil.get_image_count(os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf")))
	global_dict = {}
	ptl_list    = []
	memlist     = []
	if(Blockdata["myid"] == Blockdata["main_node"]):
		print("Number of averages computed in this run is %d"%navg)
		for iavg in range(navg):
			params_of_this_average = []
			image   = get_im(os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"), iavg)
			members = sorted(image.get_attr("members"))
			memlist.append(members)
			for im in range(len(members)):
				abs_id =  members[im]
				global_dict[abs_id] = [iavg, im]
				P = combine_params2( init_dict[abs_id][0], init_dict[abs_id][1], init_dict[abs_id][2], init_dict[abs_id][3], \
				parameters[abs_id][0], old_div(parameters[abs_id][1],Tracker["ini_shrink"]), old_div(parameters[abs_id][2],Tracker["ini_shrink"]), parameters[abs_id][3])
				if parameters[abs_id][3] ==-1: 
					print("WARNING: Image #{0} is an unaccounted particle with invalid 2D alignment parameters and should not be the member of any classes. Please check the consitency of input dataset.".format(abs_id)) # How to check what is wrong about mirror = -1 (Toshio 2018/01/11)
				params_of_this_average.append([P[0], P[1], P[2], P[3], 1.0])
				ptl_list.append(abs_id)
			params_dict[iavg] = params_of_this_average
			list_dict[iavg] = members
			write_text_row(params_of_this_average, os.path.join(Tracker["constants"]["masterdir"], "params_avg", "params_avg_%03d.txt"%iavg))
		ptl_list.sort()
		init_params = [ None for im in range(len(ptl_list))]
		for im in range(len(ptl_list)):
			init_params[im] = [ptl_list[im]] + params_dict[global_dict[ptl_list[im]][0]][global_dict[ptl_list[im]][1]]
		write_text_row(init_params, os.path.join(Tracker["constants"]["masterdir"], "init_isac_params.txt"))
	else:  
		params_dict = 0
		list_dict   = 0
		memlist     = 0
	params_dict = wrap_mpi_bcast(params_dict, Blockdata["main_node"], communicator = MPI_COMM_WORLD)
	list_dict   = wrap_mpi_bcast(list_dict, Blockdata["main_node"], communicator = MPI_COMM_WORLD)
	memlist     = wrap_mpi_bcast(memlist, Blockdata["main_node"], communicator = MPI_COMM_WORLD)
	# Now computing!
	del init_dict
	tag_sharpen_avg = 1000
	## always apply low pass filter to B_enhanced images to suppress noise in high frequencies 
	enforced_to_H1 = False
	if B_enhance:
		if Tracker["constants"]["low_pass_filter"] == -1.0: enforced_to_H1 = True
	if navg <Blockdata["nproc"]:#  Each CPU do one average 
		ERROR("number of nproc is larger than number of averages", "sxcompute_isac_avg.py", 1, Blockdata["myid"])
	else:
		FH_list  = [ [0, 0.0, 0.0] for im in range(navg)]
		image_start,image_end = MPI_start_end(navg, Blockdata["nproc"], Blockdata["myid"])
		if Blockdata["myid"] == Blockdata["main_node"]:
			cpu_dict = {}
			for iproc in range(Blockdata["nproc"]):
				local_image_start, local_image_end = MPI_start_end(navg, Blockdata["nproc"], iproc)
				for im in range(local_image_start, local_image_end): cpu_dict [im] = iproc
		else:  cpu_dict = 0
		cpu_dict = wrap_mpi_bcast(cpu_dict, Blockdata["main_node"], communicator = MPI_COMM_WORLD)
	
		slist      = [None for im in range(navg)]
		ini_list   = [None for im in range(navg)]
		avg1_list  = [None for im in range(navg)]
		avg2_list  = [None for im in range(navg)]
		plist_dict = {}
		
		data_list  = [ None for im in range(navg)]
		if Blockdata["myid"] == Blockdata["main_node"]:
			if B_enhance: print("Avg ID   B-factor  FH1(Res before ali) FH2(Res after ali)")	
			else: print("Avg ID   FH1(Res before ali)  FH2(Res after ali)")	
		for iavg in range(image_start,image_end):
			mlist = EMData.read_images(Tracker["constants"]["orgstack"], list_dict[iavg])
			for im in range(len(mlist)):
				#mlist[im]= get_im(Tracker["constants"]["orgstack"], list_dict[iavg][im])
				set_params2D(mlist[im], params_dict[iavg][im], xform = "xform.align2d")
			
			if options.local_alignment:
				"""
				new_average1 = within_group_refinement([mlist[kik] for kik in range(0,len(mlist),2)], maskfile= None, randomize= False, ir=1.0,  \
				 ou=Tracker["constants"]["radius"], rs=1.0, xrng=[x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \
				 dst=0.0, maxit=Tracker["constants"]["maxit"], FH=max(Tracker["constants"]["FH"], FH1), FF=0.02, method="")
				new_average2 = within_group_refinement([mlist[kik] for kik in range(1,len(mlist),2)], maskfile= None, randomize= False, ir=1.0, \
				 ou= Tracker["constants"]["radius"], rs=1.0, xrng=[ x_range], yrng=[y_range], step=[Tracker["constants"]["xstep"]], \
				 dst=0.0, maxit=Tracker["constants"]["maxit"], FH = max(Tracker["constants"]["FH"], FH1), FF=0.02, method="")
				new_avg, frc, plist = compute_average(mlist, Tracker["constants"]["radius"], do_ctf)
				"""
				new_avg, plist, FH2 = refinement_2d_local(mlist, Tracker["constants"]["radius"], a_range,x_range, y_range, CTF = do_ctf, SNR=1.0e10)

				plist_dict[iavg] = plist
				FH1 = -1.0
			else:
				new_avg, frc, plist = compute_average(mlist, Tracker["constants"]["radius"], do_ctf)
				FH1 = get_optimistic_res(frc)
				FH2 = -1.0
			#write_text_file(frc, os.path.join(Tracker["constants"]["masterdir"], "fsc%03d.txt"%iavg))
			FH_list[iavg] = [iavg, FH1, FH2]
			
			if B_enhance:
				new_avg, gb = apply_enhancement(new_avg, Tracker["constants"]["B_start"], Tracker["constants"]["pixel_size"], Tracker["constants"]["Bfactor"])
				print("  %6d      %6.3f  %4.3f  %4.3f"%(iavg, gb, FH1, FH2))
				
			elif adjust_to_given_pw2: 
				roo = read_text_file( Tracker["constants"]["modelpw"], -1)
				roo = roo[0] # always on the first column
				new_avg = adjust_pw_to_model(new_avg, Tracker["constants"]["pixel_size"], roo)
				print("  %6d      %4.3f  %4.3f  "%(iavg, FH1, FH2))
				
			elif adjust_to_analytic_model:
				new_avg = adjust_pw_to_model(new_avg, Tracker["constants"]["pixel_size"], None)
				print("  %6d      %4.3f  %4.3f   "%(iavg, FH1, FH2))

			elif no_adjustment: pass
			
			if Tracker["constants"]["low_pass_filter"] != -1.0:
				if Tracker["constants"]["low_pass_filter"] == 0.0: low_pass_filter = FH1
				elif Tracker["constants"]["low_pass_filter"] == 1.0: 
					low_pass_filter = FH2
					if not options.local_alignment: low_pass_filter = FH1
				else: 
					low_pass_filter = Tracker["constants"]["low_pass_filter"]
					if low_pass_filter >=0.45: low_pass_filter =0.45 		
				new_avg = filt_tanl(new_avg, low_pass_filter, 0.02)
			else:# No low pass filter but if enforced
				if enforced_to_H1: new_avg = filt_tanl(new_avg, FH1, 0.02)
			if B_enhance: new_avg = fft(new_avg)
				
			new_avg.set_attr("members",   list_dict[iavg])
			new_avg.set_attr("n_objects", len(list_dict[iavg]))
			slist[iavg]    = new_avg
			print(strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>",  "Refined average %7d"%iavg)
			
		## send to main node to write
		mpi_barrier(MPI_COMM_WORLD)

		
		for im in range(navg):
			# avg
			if cpu_dict[im] == Blockdata["myid"] and Blockdata["myid"] != Blockdata["main_node"]:
				send_EMData(slist[im], Blockdata["main_node"],  tag_sharpen_avg)
			
			elif cpu_dict[im] == Blockdata["myid"] and Blockdata["myid"] == Blockdata["main_node"]:
				slist[im].set_attr("members", memlist[im])
				slist[im].set_attr("n_objects", len(memlist[im]))
				slist[im].write_image(os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im)
			
			elif cpu_dict[im] != Blockdata["myid"] and Blockdata["myid"] == Blockdata["main_node"]:
				new_avg_other_cpu = recv_EMData(cpu_dict[im], tag_sharpen_avg)
				new_avg_other_cpu.set_attr("members", memlist[im])
				new_avg_other_cpu.set_attr("n_objects", len(memlist[im]))
				new_avg_other_cpu.write_image(os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im)
			
			if options.local_alignment:
				if cpu_dict[im] == Blockdata["myid"]:
					write_text_row(plist_dict[im], os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params_avg", "ali2d_local_params_avg_%03d.txt"%im))
				
				if cpu_dict[im] == Blockdata["myid"] and cpu_dict[im]!= Blockdata["main_node"]:
					wrap_mpi_send(plist_dict[im], Blockdata["main_node"], MPI_COMM_WORLD)
					wrap_mpi_send(FH_list, Blockdata["main_node"], MPI_COMM_WORLD)
				
				elif cpu_dict[im]!= Blockdata["main_node"] and Blockdata["myid"] == Blockdata["main_node"]:
					dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD)
					plist_dict[im] = dummy
					dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD)
					FH_list[im] = dummy[im]
			else:
				if cpu_dict[im] == Blockdata["myid"] and cpu_dict[im]!= Blockdata["main_node"]:
					wrap_mpi_send(FH_list, Blockdata["main_node"], MPI_COMM_WORLD)
				
				elif cpu_dict[im]!= Blockdata["main_node"] and Blockdata["myid"] == Blockdata["main_node"]:
					dummy = wrap_mpi_recv(cpu_dict[im], MPI_COMM_WORLD)
					FH_list[im] = dummy[im]
					
			mpi_barrier(MPI_COMM_WORLD)
		mpi_barrier(MPI_COMM_WORLD)
	
	if options.local_alignment:
		if Blockdata["myid"] == Blockdata["main_node"]:
			ali3d_local_params = [ None for im in range(len(ptl_list)) ]
			for im in range(len(ptl_list)):
				ali3d_local_params[im] = [ptl_list[im]] + plist_dict[global_dict[ptl_list[im]][0]][global_dict[ptl_list[im]][1]]
			write_text_row(ali3d_local_params, os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params.txt"))
			write_text_row(FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt"))
	else:
		if Blockdata["myid"] == Blockdata["main_node"]:
			write_text_row(FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt"))
				
	mpi_barrier(MPI_COMM_WORLD)			
	target_xr =3
	target_yr =3
	if( Blockdata["myid"] == 0):
		cmd = "{} {} {} {} {} {} {} {} {} {}".format("sxchains.py", os.path.join(Tracker["constants"]["masterdir"],"class_averages.hdf"),\
		os.path.join(Tracker["constants"]["masterdir"],"junk.hdf"),os.path.join(Tracker["constants"]["masterdir"],"ordered_class_averages.hdf"),\
		"--circular","--radius=%d"%Tracker["constants"]["radius"] , "--xr=%d"%(target_xr+1),"--yr=%d"%(target_yr+1),"--align", ">/dev/null")
		junk = cmdexecute(cmd)
		cmd = "{} {}".format("rm -rf", os.path.join(Tracker["constants"]["masterdir"], "junk.hdf") )
		junk = cmdexecute(cmd)
		
	from mpi import mpi_finalize
	mpi_finalize()
	exit()
Пример #49
0
from logger import Logger
from splitter import Splitter
from linguistics_info import *

language = 'english'

PROJECT_PATH = f"/neurospin/unicog/protocols/IRMf/LePetitPrince_Pallier_2018/LePetitPrince/"
OUTPUT_PATH = f"/neurospin/unicog/protocols/IRMf/LePetitPrince_Pallier_2018/LePetitPrince/derivatives/fMRI/maps/{language}"
INPUT_PATH = f"/neurospin/unicog/protocols/IRMf/LePetitPrince_Pallier_2018/LePetitPrince/data/stimuli-representations/{language}"
FMRIDATA_PATH = f"/neurospin/unicog/protocols/IRMf/LePetitPrince_Pallier_2018/LePetitPrince/data/fMRI/{language}"
MASKER_PATH = f"/neurospin/unicog/protocols/IRMf/LePetitPrince_Pallier_2018/LePetitPrince/derivatives/fMRI/ROI_masks/global_masker_95%_{language}"
ALL_MASKS_PATH = f"/neurospin/unicog/protocols/IRMf/LePetitPrince_Pallier_2018/LePetitPrince/derivatives/fMRI/ROI_masks/"
SAVING_FOLDER = f"/neurospin/unicog/protocols/IRMf/LePetitPrince_Pallier_2018/LePetitPrince/derivatives/fMRI/clustering/{language}"
TMP_FOLDER = f"/home/ap259944/tmp"

logger = Logger(os.path.join(PROJECT_PATH, 'logs.txt'))

global_masker_50 = reporting.fetch_masker(
    f"{ALL_MASKS_PATH}/global_masker_{language}",
    language,
    FMRIDATA_PATH,
    INPUT_PATH,
    smoothing_fwhm=None,
    logger=logger)
global_masker_95 = reporting.fetch_masker(
    f"{ALL_MASKS_PATH}/global_masker_95%_{language}",
    language,
    FMRIDATA_PATH,
    INPUT_PATH,
    smoothing_fwhm=None,
    logger=logger)
Пример #50
0
    try:
        from fake.const import (
            APPROVAL_PENDING as S_PENDING,
            APPROVAL_NONE as S_NONE,
            OK as S_OK,
            ERROR as S_ERROR,
            CRED_TYPE_PASSWORD,
            CRED_TYPE_SSH_KEY,
            CRED_DATA_PRIVATE_KEY,
            CRED_DATA_SSH_CERTIFICATE,
            CRED_DATA_LOGIN,
            CRED_DATA_ACCOUNT_UID,
            CRED_INDEX,
        )
    except Exception as e:
        Logger().info("Wabengine const LOADING FAILED %s" % tracelog)

APPROVAL_ACCEPTED = "APPROVAL_ACCEPTED"
APPROVAL_REJECTED = "APPROVAL_REJECTED"
APPROVAL_PENDING = "APPROVAL_PENDING"
APPROVAL_NONE = "APPROVAL_NONE"
STATUS_SUCCESS = [S_OK]
STATUS_PENDING = [S_PENDING]
STATUS_APPR_NEEDED = [S_NONE]


class CheckoutEngine(object):
    def __init__(self, engine):
        self.engine = engine
        self.session_credentials = {}
        self.scenario_credentials = {}
Пример #51
0
class Solver(object):
    def __init__(self, celeba_loader, rafd_loader, config):

        self.celeba_loader = celeba_loader
        self.rafd_loader = rafd_loader

        #
        self.c_dim = config.c_dim
        self.c2_dim = config.c2_dim
        self.image_size = config.image_size
        self.g_conv_dim = config.g_conv_dim
        self.d_conv_dim = config.d_conv_dim
        self.g_repeat_num = config.g_repeat_num
        self.d_repeat_num = config.d_repeat_num
        self.lambda_cls = config.lambda_cls
        self.lambda_rec = config.lambda_rec
        self.lambda_gp = config.lambda_gp

        self.dataset = config.dataset
        self.batch_size = config.batch_size
        self.num_iters = config.num_iters
        self.num_iters_decay = config.num_iters_decay
        self.g_lr = config.g_lr
        self.d_lr = config.d_lr
        self.n_critic = config.n_critic
        self.beta1 = config.beta1
        self.beta2 = config.beta2
        self.resume_iters = config.resume_iters
        self.selected_attrs = config.selected_attrs

        self.test_iters = config.test_iters

        self.use_tensorboard = config.use_tensorboard
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        self.log_dir = config.log_dir
        self.sample_dir = config.sample_dir
        self.model_save_dir = config.model_save_dir
        self.result_dir = config.result_dir

        self.log_step = config.log_step
        self.sample_step = config.sample_step
        self.model_save_step = config.model_save_step
        self.lr_update_step = config.lr_update_step

        self.build_model()
        if self.use_tensorboard:
            self.build_tensorboard()

    def build_model(self):
        if self.dataset in ['CelebA', 'RaFD']:
            self.G = Generator(self.g_conv_dim, self.c_dim, self.g_repeat_num)
            self.D = Discriminator(self.image_size, self.d_conv_dim,
                                   self.c_dim, self.d_repeat_num)
        elif self.dataset in ['Both']:
            self.G = Generator(self.g_conv_dim, self.c_dim + self.c2_dim + 2,
                               self.g_repeat_num)  # 2 for mask vector.
            self.D = Discriminator(self.image_size, self.d_conv_dim,
                                   self.c_dim + self.c2_dim, self.d_repeat_num)

        self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr,
                                            [self.beta1, self.beta2])
        self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr,
                                            [self.beta1, self.beta2])
        self.print_network(self.G, 'G')
        self.print_network(self.D, 'D')

        self.G.to(self.device)
        self.D.to(self.device)

    def print_network(self, model, name):
        num_params = 0
        for p in model.parameters():
            num_params += p.numel()
        print(model)
        print(name)
        print("The number of parameters: {}".format(num_params))

    def restore_model(self, resume_iters):
        print(
            'Loading the trained models from step {}...'.format(resume_iters))
        G_path = os.path.join(self.model_save_dir,
                              '{}-G.ckpt'.format(resume_iters))
        D_path = os.path.join(self.model_save_dir,
                              '{}-D.ckpt'.format(resume_iters))
        self.G.load_state_dict(
            torch.load(G_path, map_location=lambda storage, loc: storage))
        self.D.load_state_dict(
            torch.load(D_path, map_location=lambda storage, loc: storage))

    def build_tensorboard(self):
        from logger import Logger
        self.logger = Logger(self.log_dir)

    def update_lr(self, g_lr, d_lr):
        for param_group in self.g_optimizer.param_groups:
            param_group['lr'] = g_lr
        for param_group in self.d_optimizer.param_groups:
            param_group['lr'] = d_lr

    def reset_grad(self):
        self.g_optimizer.zero_grad()
        self.d_optimizer.zero_grad()

    def denorm(self, x):
        out = (x + 1) / 2
        return out.clamp_(0, 1)

    def gradient_penalty(self, y, x):
        weight = torch.ones(y.size()).to(self.device)
        dydx = torch.autograd.grad(outputs=y,
                                   inputs=x,
                                   grad_outputs=weight,
                                   retain_graph=True,
                                   create_graph=True,
                                   only_inputs=True)[0]

        dydx = dydx.view(dydx.size(0), -1)
        dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
        return torch.mean((dydx_l2norm - 1)**2)

    def label2onehot(self, labels, dim):
        batch_size = labels.size(0)
        out = torch.zeros(batch_size, dim)
        out[np.arange(batch_size), labels.long()] = 1
        return out

    def create_labels(self,
                      c_org,
                      c_dim=5,
                      dataset='CelebA',
                      selected_attrs=None):
        if dataset == 'CelebA':
            hair_color_indices = []
            for i, attr_name in enumerate(selected_attrs):
                if attr_name in [
                        'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair'
                ]:
                    hair_color_indices.append(i)

        c_trg_list = []
        for i in range(c_dim):
            if dataset == 'CelebA':
                c_trg = c_org.clone()
                if i in hair_color_indices:
                    c_trg[:, i] = 1
                    for j in hair_color_indices:
                        if j != i:
                            c_trg[:, j] = 0
                else:
                    c_trg[:, i] = (c_trg[:, i] == 0)
            elif dataset == 'RaFD':
                c_trg = self.label2onehot(torch.ones(c_org.size(0)) * i, c_dim)

            c_trg_list.append(c_trg.to(self.device))
        return c_trg_list

    def classification_loss(self, logit, target, dataset='CelebA'):
        if dataset == 'CelebA':
            return F.binary_cross_entropy_with_logits(
                logit, target, size_average=False) / logit.size(0)
        elif dataset == 'RaFD':
            return F.cross_entropy(logit, target)

    def train(self):
        # Set data loader.
        if self.dataset == 'CelebA':
            data_loader = self.celeba_loader
        elif self.dataset == 'RaFD':
            data_loader = self.rafd_loader

        data_iter = iter(data_loader)
        x_fixed, c_org = next(data_iter)
        x_fixed = x_fixed.to(self.device)
        c_fixed_list = self.create_labels(c_org, self.c_dim, self.dataset,
                                          self.selected_attrs)

        g_lr = self.g_lr
        d_lr = self.d_lr

        start_iters = 0
        if self.resume_iters:
            start_iters = self.resume_iters
            self.restore_model(self.resume_iters)

        # Start training.
        print('Start training...')
        start_time = time.time()
        for i in range(start_iters, self.num_iters):

            try:
                x_real, label_org = next(data_iter)
            except:
                data_iter = iter(data_loader)
                x_real, label_org = next(data_iter)

            rand_idx = torch.randperm(label_org.size(0))
            label_trg = label_org[rand_idx]

            if self.dataset == 'CelebA':
                c_org = label_org.clone()
                c_trg = label_trg.clone()
            elif self.dataset == 'RaFD':
                c_org = self.label2onehot(label_org, self.c_dim)
                c_trg = self.label2onehot(label_trg, self.c_dim)

            x_real = x_real.to(self.device)
            c_org = c_org.to(self.device)
            c_trg = c_trg.to(self.device)
            label_org = label_org.to(self.device)
            label_trg = label_trg.to(self.device)

            out_src, out_cls = self.D(x_real)
            d_loss_real = -torch.mean(out_src)
            d_loss_cls = self.classification_loss(out_cls, label_org,
                                                  self.dataset)

            x_fake = self.G(x_real, c_trg)
            out_src, out_cls = self.D(x_fake.detach())
            d_loss_fake = torch.mean(out_src)

            alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
            x_hat = (alpha * x_real.data +
                     (1 - alpha) * x_fake.data).requires_grad_(True)
            out_src, _ = self.D(x_hat)
            d_loss_gp = self.gradient_penalty(out_src, x_hat)

            d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp
            self.reset_grad()
            d_loss.backward()
            self.d_optimizer.step()

            loss = {}
            loss['D/loss_real'] = d_loss_real.item()
            loss['D/loss_fake'] = d_loss_fake.item()
            loss['D/loss_cls'] = d_loss_cls.item()
            loss['D/loss_gp'] = d_loss_gp.item()

            if (i + 1) % self.n_critic == 0:

                x_fake = self.G(x_real, c_trg)
                out_src, out_cls = self.D(x_fake)
                g_loss_fake = -torch.mean(out_src)
                g_loss_cls = self.classification_loss(out_cls, label_trg,
                                                      self.dataset)

                x_reconst = self.G(x_fake, c_org)
                g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))

                g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                loss['G/loss_fake'] = g_loss_fake.item()
                loss['G/loss_rec'] = g_loss_rec.item()
                loss['G/loss_cls'] = g_loss_cls.item()

            if (i + 1) % self.log_step == 0:
                et = time.time() - start_time
                et = str(datetime.timedelta(seconds=et))[:-7]
                log = "Elapsed [{}], Iteration [{}/{}]".format(
                    et, i + 1, self.num_iters)
                for tag, value in loss.items():
                    log += ", {}: {:.4f}".format(tag, value)
                print(log)

                if self.use_tensorboard:
                    for tag, value in loss.items():
                        self.logger.scalar_summary(tag, value, i + 1)

            if (i + 1) % self.sample_step == 0:
                with torch.no_grad():
                    x_fake_list = [x_fixed]
                    for c_fixed in c_fixed_list:
                        x_fake_list.append(self.G(x_fixed, c_fixed))
                    x_concat = torch.cat(x_fake_list, dim=3)
                    sample_path = os.path.join(self.sample_dir,
                                               '{}-images.jpg'.format(i + 1))
                    save_image(self.denorm(x_concat.data.cpu()),
                               sample_path,
                               nrow=1,
                               padding=0)
                    print('Saved real and fake images into {}...'.format(
                        sample_path))

            if (i + 1) % self.model_save_step == 0:
                G_path = os.path.join(self.model_save_dir,
                                      '{}-G.ckpt'.format(i + 1))
                D_path = os.path.join(self.model_save_dir,
                                      '{}-D.ckpt'.format(i + 1))
                torch.save(self.G.state_dict(), G_path)
                torch.save(self.D.state_dict(), D_path)
                print('Saved model checkpoints into {}...'.format(
                    self.model_save_dir))

            if (i + 1) % self.lr_update_step == 0 and (i + 1) > (
                    self.num_iters - self.num_iters_decay):
                g_lr -= (self.g_lr / float(self.num_iters_decay))
                d_lr -= (self.d_lr / float(self.num_iters_decay))
                self.update_lr(g_lr, d_lr)
                print('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(
                    g_lr, d_lr))

    def train_multi(self):

        celeba_iter = iter(self.celeba_loader)
        rafd_iter = iter(self.rafd_loader)

        x_fixed, c_org = next(celeba_iter)
        x_fixed = x_fixed.to(self.device)
        c_celeba_list = self.create_labels(c_org, self.c_dim, 'CelebA',
                                           self.selected_attrs)
        c_rafd_list = self.create_labels(c_org, self.c2_dim, 'RaFD')
        zero_celeba = torch.zeros(x_fixed.size(0), self.c_dim).to(
            self.device)  # Zero vector for CelebA.
        zero_rafd = torch.zeros(x_fixed.size(0), self.c2_dim).to(
            self.device)  # Zero vector for RaFD.
        mask_celeba = self.label2onehot(torch.zeros(x_fixed.size(0)), 2).to(
            self.device)  # Mask vector: [1, 0].
        mask_rafd = self.label2onehot(torch.ones(x_fixed.size(0)), 2).to(
            self.device)  # Mask vector: [0, 1].

        g_lr = self.g_lr
        d_lr = self.d_lr

        start_iters = 0
        if self.resume_iters:
            start_iters = self.resume_iters
            self.restore_model(self.resume_iters)

        print('Start training...')
        start_time = time.time()
        for i in range(start_iters, self.num_iters):
            for dataset in ['CelebA', 'RaFD']:

                data_iter = celeba_iter if dataset == 'CelebA' else rafd_iter

                try:
                    x_real, label_org = next(data_iter)
                except:
                    if dataset == 'CelebA':
                        celeba_iter = iter(self.celeba_loader)
                        x_real, label_org = next(celeba_iter)
                    elif dataset == 'RaFD':
                        rafd_iter = iter(self.rafd_loader)
                        x_real, label_org = next(rafd_iter)

                rand_idx = torch.randperm(label_org.size(0))
                label_trg = label_org[rand_idx]

                if dataset == 'CelebA':
                    c_org = label_org.clone()
                    c_trg = label_trg.clone()
                    zero = torch.zeros(x_real.size(0), self.c2_dim)
                    mask = self.label2onehot(torch.zeros(x_real.size(0)), 2)
                    c_org = torch.cat([c_org, zero, mask], dim=1)
                    c_trg = torch.cat([c_trg, zero, mask], dim=1)
                elif dataset == 'RaFD':
                    c_org = self.label2onehot(label_org, self.c2_dim)
                    c_trg = self.label2onehot(label_trg, self.c2_dim)
                    zero = torch.zeros(x_real.size(0), self.c_dim)
                    mask = self.label2onehot(torch.ones(x_real.size(0)), 2)
                    c_org = torch.cat([zero, c_org, mask], dim=1)
                    c_trg = torch.cat([zero, c_trg, mask], dim=1)

                x_real = x_real.to(self.device)
                c_org = c_org.to(self.device)
                c_trg = c_trg.to(self.device)
                label_org = label_org.to(self.device)
                label_trg = label_trg.to(self.device)
                out_src, out_cls = self.D(x_real)
                out_cls = out_cls[:, :self.
                                  c_dim] if dataset == 'CelebA' else out_cls[:,
                                                                             self
                                                                             .
                                                                             c_dim:]
                d_loss_real = -torch.mean(out_src)
                d_loss_cls = self.classification_loss(out_cls, label_org,
                                                      dataset)

                x_fake = self.G(x_real, c_trg)
                out_src, _ = self.D(x_fake.detach())
                d_loss_fake = torch.mean(out_src)

                alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
                x_hat = (alpha * x_real.data +
                         (1 - alpha) * x_fake.data).requires_grad_(True)
                out_src, _ = self.D(x_hat)
                d_loss_gp = self.gradient_penalty(out_src, x_hat)

                d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp
                self.reset_grad()
                d_loss.backward()
                self.d_optimizer.step()

                loss = {}
                loss['D/loss_real'] = d_loss_real.item()
                loss['D/loss_fake'] = d_loss_fake.item()
                loss['D/loss_cls'] = d_loss_cls.item()
                loss['D/loss_gp'] = d_loss_gp.item()

                if (i + 1) % self.n_critic == 0:
                    x_fake = self.G(x_real, c_trg)
                    out_src, out_cls = self.D(x_fake)
                    out_cls = out_cls[:, :self.
                                      c_dim] if dataset == 'CelebA' else out_cls[:,
                                                                                 self
                                                                                 .
                                                                                 c_dim:]
                    g_loss_fake = -torch.mean(out_src)
                    g_loss_cls = self.classification_loss(
                        out_cls, label_trg, dataset)

                    x_reconst = self.G(x_fake, c_org)
                    g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))

                    g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
                    self.reset_grad()
                    g_loss.backward()
                    self.g_optimizer.step()

                    loss['G/loss_fake'] = g_loss_fake.item()
                    loss['G/loss_rec'] = g_loss_rec.item()
                    loss['G/loss_cls'] = g_loss_cls.item()

                if (i + 1) % self.log_step == 0:
                    et = time.time() - start_time
                    et = str(datetime.timedelta(seconds=et))[:-7]
                    log = "Elapsed [{}], Iteration [{}/{}], Dataset [{}]".format(
                        et, i + 1, self.num_iters, dataset)
                    for tag, value in loss.items():
                        log += ", {}: {:.4f}".format(tag, value)
                    print(log)

                    if self.use_tensorboard:
                        for tag, value in loss.items():
                            self.logger.scalar_summary(tag, value, i + 1)

            if (i + 1) % self.sample_step == 0:
                with torch.no_grad():
                    x_fake_list = [x_fixed]
                    for c_fixed in c_celeba_list:
                        c_trg = torch.cat([c_fixed, zero_rafd, mask_celeba],
                                          dim=1)
                        x_fake_list.append(self.G(x_fixed, c_trg))
                    for c_fixed in c_rafd_list:
                        c_trg = torch.cat([zero_celeba, c_fixed, mask_rafd],
                                          dim=1)
                        x_fake_list.append(self.G(x_fixed, c_trg))
                    x_concat = torch.cat(x_fake_list, dim=3)
                    sample_path = os.path.join(self.sample_dir,
                                               '{}-images.jpg'.format(i + 1))
                    save_image(self.denorm(x_concat.data.cpu()),
                               sample_path,
                               nrow=1,
                               padding=0)
                    print('Saved real and fake images into {}...'.format(
                        sample_path))

            if (i + 1) % self.model_save_step == 0:
                G_path = os.path.join(self.model_save_dir,
                                      '{}-G.ckpt'.format(i + 1))
                D_path = os.path.join(self.model_save_dir,
                                      '{}-D.ckpt'.format(i + 1))
                torch.save(self.G.state_dict(), G_path)
                torch.save(self.D.state_dict(), D_path)
                print('Saved model checkpoints into {}...'.format(
                    self.model_save_dir))

            if (i + 1) % self.lr_update_step == 0 and (i + 1) > (
                    self.num_iters - self.num_iters_decay):
                g_lr -= (self.g_lr / float(self.num_iters_decay))
                d_lr -= (self.d_lr / float(self.num_iters_decay))
                self.update_lr(g_lr, d_lr)
                print('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(
                    g_lr, d_lr))

    def test(self):
        self.restore_model(self.test_iters)

        if self.dataset == 'CelebA':
            data_loader = self.celeba_loader
        elif self.dataset == 'RaFD':
            data_loader = self.rafd_loader

        with torch.no_grad():
            for i, (x_real, c_org) in enumerate(data_loader):

                x_real = x_real.to(self.device)
                c_trg_list = self.create_labels(c_org, self.c_dim,
                                                self.dataset,
                                                self.selected_attrs)

                x_fake_list = [x_real]
                for c_trg in c_trg_list:
                    x_fake_list.append(self.G(x_real, c_trg))

                x_concat = torch.cat(x_fake_list, dim=3)
                result_path = os.path.join(self.result_dir,
                                           '{}-images.jpg'.format(i + 1))
                save_image(self.denorm(x_concat.data.cpu()),
                           result_path,
                           nrow=1,
                           padding=0)
                print('Saved real and fake images into {}...'.format(
                    result_path))

    def test_multi(self):
        self.restore_model(self.test_iters)

        with torch.no_grad():
            for i, (x_real, c_org) in enumerate(self.celeba_loader):

                x_real = x_real.to(self.device)
                c_celeba_list = self.create_labels(c_org, self.c_dim, 'CelebA',
                                                   self.selected_attrs)
                c_rafd_list = self.create_labels(c_org, self.c2_dim, 'RaFD')
                zero_celeba = torch.zeros(x_real.size(0), self.c_dim).to(
                    self.device)  # Zero vector for CelebA.
                zero_rafd = torch.zeros(x_real.size(0), self.c2_dim).to(
                    self.device)  # Zero vector for RaFD.
                mask_celeba = self.label2onehot(torch.zeros(
                    x_real.size(0)), 2).to(self.device)  # Mask vector: [1, 0].
                mask_rafd = self.label2onehot(torch.ones(
                    x_real.size(0)), 2).to(self.device)  # Mask vector: [0, 1].

                x_fake_list = [x_real]
                for c_celeba in c_celeba_list:
                    c_trg = torch.cat([c_celeba, zero_rafd, mask_celeba],
                                      dim=1)
                    x_fake_list.append(self.G(x_real, c_trg))
                for c_rafd in c_rafd_list:
                    c_trg = torch.cat([zero_celeba, c_rafd, mask_rafd], dim=1)
                    x_fake_list.append(self.G(x_real, c_trg))

                x_concat = torch.cat(x_fake_list, dim=3)
                result_path = os.path.join(self.result_dir,
                                           '{}-images.jpg'.format(i + 1))
                save_image(self.denorm(x_concat.data.cpu()),
                           result_path,
                           nrow=1,
                           padding=0)
                print('Saved real and fake images into {}...'.format(
                    result_path))
Пример #52
0
                acc = np.mean(np.asarray(d < e, dtype=np.int))
                swk_errors[key].append(acc)

        print_resutls(swk_errors, pixel_errors)


if __name__ == "__main__":
    class_ = argparse.ArgumentDefaultsHelpFormatter
    parser = argparse.ArgumentParser(description=__doc__,
                                     formatter_class=class_)

    parser.add_argument('--model_type',
                        help="INC, YOLO, simple",
                        default="INC")

    parser.add_argument('--model_name',
                        help="name of saved model (3A4Bh-Ref25)",
                        default="3A4Bh-Ref25")

    args = parser.parse_args()

    # model_name = args.model_name
    model_name = args.model_name
    model_type = args.model_type

    # initial a logger
    logger = Logger(model_type, model_name, "", config, dir="models/")
    logger.log("Start reporting...")

    main(model_type, model_name, logger)
Пример #53
0

def get_conf(lang):
    if lang == 'eng':
        config = get_eng_conf()
    elif lang == 'spa':
        config = get_spa_conf()
    elif lang == 'cmn':
        config = get_cmn_conf()
    return config


USE_CUDA = True
max_seq_length = 4
max_label_length = 8
batch_size = 2
hidden_size = 2
n_layers = 2
encoder_outputs_dim = 2
output_size = 3
cuda_num = 0
clip_norm = 1
beam_size = 6
EOS_token = 2
PAD_token = 100
X = 1
att_mode = 'general'

config = get_cmn_conf()
logger = Logger('./train_logs')
Пример #54
0
class Solver(object):
    """docstring for Solver."""
    def __init__(self, data_loader, config):
        
        self.config = config
        self.data_loader = data_loader
        # Model configurations.
        
        self.lambda_cycle = config.lambda_cycle
        self.lambda_cls = config.lambda_cls
        self.lambda_identity = config.lambda_identity

        # Training configurations.
        self.data_dir = config.data_dir
        self.test_dir = config.test_dir
        self.batch_size = config.batch_size
        self.num_iters = config.num_iters
        self.num_iters_decay = config.num_iters_decay
        self.g_lr = config.g_lr
        self.d_lr = config.d_lr
        self.c_lr = config.c_lr
        self.n_critic = config.n_critic
        self.beta1 = config.beta1
        self.beta2 = config.beta2
        self.resume_iters = config.resume_iters
        

        # Test configurations.
        self.test_iters = config.test_iters
        self.trg_speaker = ast.literal_eval(config.trg_speaker)
        self.src_speaker = config.src_speaker

        # Miscellaneous.
        self.use_tensorboard = config.use_tensorboard
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.spk_enc = LabelBinarizer().fit(speakers)
        # Directories.
        self.log_dir = config.log_dir
        self.sample_dir = config.sample_dir
        self.model_save_dir = config.model_save_dir
        self.result_dir = config.result_dir

        # Step size.
        self.log_step = config.log_step
        self.sample_step = config.sample_step
        self.model_save_step = config.model_save_step
        self.lr_update_step = config.lr_update_step

        # Build the model and tensorboard.
        self.build_model()
        if self.use_tensorboard:
            self.build_tensorboard()
    
    def build_model(self):
        self.G = Generator()
        self.D = Discriminator()
        self.C = DomainClassifier()

        self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
        self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])
        self.c_optimizer = torch.optim.Adam(self.C.parameters(), self.c_lr,[self.beta1, self.beta2])
        
        self.print_network(self.G, 'G')
        self.print_network(self.D, 'D')
        self.print_network(self.C, 'C')
            
        self.G.to(self.device)
        self.D.to(self.device)
        self.C.to(self.device)
    
    def print_network(self, model, name):
        """Print out the network information."""
        num_params = 0
        for p in model.parameters():
            num_params += p.numel()
        print(model)
        print(name)
        print("The number of parameters: {}".format(num_params))

    def build_tensorboard(self):
        """Build a tensorboard logger."""
        from logger import Logger
        self.logger = Logger(self.log_dir)

    def update_lr(self, g_lr, d_lr, c_lr):
        """Decay learning rates of the generator and discriminator and classifier."""
        for param_group in self.g_optimizer.param_groups:
            param_group['lr'] = g_lr
        for param_group in self.d_optimizer.param_groups:
            param_group['lr'] = d_lr
        for param_group in self.c_optimizer.param_groups:
            param_group['lr'] = c_lr

    def train(self):
        # Learning rate cache for decaying.
        g_lr = self.g_lr
        d_lr = self.d_lr
        c_lr = self.c_lr

        start_iters = 0
        if self.resume_iters:
            pass
        
        norm = Normalizer()
        data_iter = iter(self.data_loader)

        print('Start training......')
        start_time = datetime.now()

        for i in range(start_iters, self.num_iters):
            # =================================================================================== #
            #                             1. Preprocess input data                                #
            # =================================================================================== #
             # Fetch real images and labels.
            try:
                x_real, speaker_idx_org, label_org = next(data_iter)
            except:
                data_iter = iter(self.data_loader)
                x_real, speaker_idx_org, label_org = next(data_iter)           

            # Generate target domain labels randomly.
            rand_idx = torch.randperm(label_org.size(0))
            label_trg = label_org[rand_idx]
            speaker_idx_trg = speaker_idx_org[rand_idx]
            
            x_real = x_real.to(self.device)           # Input images.
            label_org = label_org.to(self.device)     # Original domain one-hot labels.
            label_trg = label_trg.to(self.device)     # Target domain one-hot labels.
            speaker_idx_org = speaker_idx_org.to(self.device) # Original domain labels
            speaker_idx_trg = speaker_idx_trg.to(self.device) #Target domain labels

            # =================================================================================== #
            #                             2. Train the discriminator                              #
            # =================================================================================== #
            # Compute loss with real audio frame.
            ## Modified
            CELoss = nn.BCELoss()
            m = nn.Sigmoid()
            cls_real = self.C(x_real).squeeze(1)
            cls_loss_real = CELoss(input=m(cls_real), target=speaker_idx_org)

            self.reset_grad()
            cls_loss_real.backward()
            self.c_optimizer.step()
             # Logging.
            loss = {}
            loss['C/C_loss'] = cls_loss_real.item()

            out_r = self.D(x_real, label_org)
            # Compute loss with fake audio frame.
            x_fake = self.G(x_real, label_trg)
            out_f = self.D(x_fake.detach(), label_trg)
            d_loss_t = F.binary_cross_entropy_with_logits(input=out_f,target=torch.zeros_like(out_f, dtype=torch.float)) + \
                F.binary_cross_entropy_with_logits(input=out_r, target=torch.ones_like(out_r, dtype=torch.float))
           
            ## Modified
            out_cls = self.C(x_fake).squeeze(1)
            d_loss_cls = CELoss(input=m(out_cls), target=speaker_idx_trg)

            # Compute loss for gradient penalty.
            alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
            x_hat = (alpha * x_real.data + (1 - alpha) * x_fake.data).requires_grad_(True)
            out_src = self.D(x_hat, label_trg)
            d_loss_gp = self.gradient_penalty(out_src, x_hat)

            d_loss = d_loss_t + self.lambda_cls * d_loss_cls + 5*d_loss_gp

            self.reset_grad()
            d_loss.backward()
            self.d_optimizer.step()


            # loss['D/d_loss_t'] = d_loss_t.item()
            # loss['D/loss_cls'] = d_loss_cls.item()
            # loss['D/D_gp'] = d_loss_gp.item()
            loss['D/D_loss'] = d_loss.item()

            # =================================================================================== #
            #                               3. Train the generator                                #
            # =================================================================================== #        
            if (i+1) % self.n_critic == 0:
                # Original-to-target domain.
                x_fake = self.G(x_real, label_trg)
                g_out_src = self.D(x_fake, label_trg)
                g_loss_fake = F.binary_cross_entropy_with_logits(input=g_out_src, target=torch.ones_like(g_out_src, dtype=torch.float))
                
                ## Modified
                out_cls = self.C(x_real).squeeze(1)
                g_loss_cls = CELoss(input=m(out_cls), target=speaker_idx_org)

                # Target-to-original domain.
                x_reconst = self.G(x_fake, label_org)
                g_loss_rec = F.l1_loss(x_reconst, x_real )

                # Original-to-Original domain(identity).
                x_fake_iden = self.G(x_real, label_org)
                id_loss = F.l1_loss(x_fake_iden, x_real )

                # Backward and optimize.
                g_loss = g_loss_fake + self.lambda_cycle * g_loss_rec +\
                 self.lambda_cls * g_loss_cls + self.lambda_identity * id_loss
                 
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                # Logging.
                loss['G/loss_fake'] = g_loss_fake.item()
                loss['G/loss_rec'] = g_loss_rec.item()
                loss['G/loss_cls'] = g_loss_cls.item()
                loss['G/loss_id'] = id_loss.item()
                loss['G/g_loss'] = g_loss.item()
            # =================================================================================== #
            #                                 4. Miscellaneous                                    #
            # =================================================================================== #
            # Print out training information.
            if (i+1) % self.log_step == 0:
                et = datetime.now() - start_time
                et = str(et)[:-7]
                log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters)
                for tag, value in loss.items():
                    log += ", {}: {:.4f}".format(tag, value)
                print(log)

                if self.use_tensorboard:
                    for tag, value in loss.items():
                        self.logger.scalar_summary(tag, value, i+1)

            # Translate fixed images for debugging.
            if (i+1) % self.sample_step == 0:
                with torch.no_grad():
                    d, speaker = TestSet(self.test_dir).test_data()
                    target = random.choice([x for x in speakers if x != speaker])
                    label_t = self.spk_enc.transform([target])[0]
                    label_t = np.asarray([label_t])

                    for filename, content in d.items():
                        f0 = content['f0']
                        ap = content['ap']
                        sp_norm_pad = self.pad_coded_sp(content['coded_sp_norm'])
                        
                        convert_result = []
                        for start_idx in range(0, sp_norm_pad.shape[1] - FRAMES + 1, FRAMES):
                            one_seg = sp_norm_pad[:, start_idx : start_idx+FRAMES]
                            
                            one_seg = torch.FloatTensor(one_seg).to(self.device)
                            one_seg = one_seg.view(1,1,one_seg.size(0), one_seg.size(1))
                            l = torch.FloatTensor(label_t)
                            one_seg = one_seg.to(self.device)
                            l = l.to(self.device)
                            one_set_return = self.G(one_seg, l).data.cpu().numpy()
                            one_set_return = np.squeeze(one_set_return)
                            one_set_return = norm.backward_process(one_set_return, target)
                            convert_result.append(one_set_return)

                        convert_con = np.concatenate(convert_result, axis=1)
                        convert_con = convert_con[:, 0:content['coded_sp_norm'].shape[1]]
                        contigu = np.ascontiguousarray(convert_con.T, dtype=np.float64)   
                        decoded_sp = decode_spectral_envelope(contigu, SAMPLE_RATE, fft_size=FFTSIZE)
                        f0_converted = norm.pitch_conversion(f0, speaker, target)
                        wav = synthesize(f0_converted, decoded_sp, ap, SAMPLE_RATE).astype(np.float32)

                        name = f'{speaker}-{target}_iter{i+1}_{filename}'
                        path = os.path.join(self.sample_dir, name)
                        print(f'[save]:{path}')
                        scipy.io.wavfile.write(path, SAMPLE_RATE, wav)
                        
            # Save model checkpoints.
            if (i+1) % self.model_save_step == 0:
                G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
                D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(i+1))
                C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(i+1))
                torch.save(self.G.state_dict(), G_path)
                torch.save(self.D.state_dict(), D_path)
                torch.save(self.C.state_dict(), C_path)
                print('Saved model checkpoints into {}...'.format(self.model_save_dir))

            # Decay learning rates.
            if (i+1) % self.lr_update_step == 0 and (i+1) > (self.num_iters - self.num_iters_decay):
                g_lr -= (self.g_lr / float(self.num_iters_decay))
                d_lr -= (self.d_lr / float(self.num_iters_decay))
                c_lr -= (self.c_lr / float(self.num_iters_decay))
                self.update_lr(g_lr, d_lr, c_lr)
                print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))

    def gradient_penalty(self, y, x):
        """Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
        weight = torch.ones(y.size()).to(self.device)
        dydx = torch.autograd.grad(outputs=y,
                                   inputs=x,
                                   grad_outputs=weight,
                                   retain_graph=True,
                                   create_graph=True,
                                   only_inputs=True)[0]

        dydx = dydx.view(dydx.size(0), -1)
        dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
        return torch.mean((dydx_l2norm-1)**2)

    def reset_grad(self):
        """Reset the gradient buffers."""
        self.g_optimizer.zero_grad()
        self.d_optimizer.zero_grad()
        self.c_optimizer.zero_grad()

    def restore_model(self, resume_iters):
        """Restore the trained generator and discriminator."""
        print('Loading the trained models from step {}...'.format(resume_iters))
        G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
        D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(resume_iters))
        C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(resume_iters))
        self.G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))
        self.D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
        self.C.load_state_dict(torch.load(C_path, map_location=lambda storage, loc: storage))

    @staticmethod
    def pad_coded_sp(coded_sp_norm):
        f_len = coded_sp_norm.shape[1]
        if  f_len >= FRAMES: 
            pad_length = FRAMES-(f_len - (f_len//FRAMES) * FRAMES)
        elif f_len < FRAMES:
            pad_length = FRAMES - f_len

        sp_norm_pad = np.hstack((coded_sp_norm, np.zeros((coded_sp_norm.shape[0], pad_length))))
        return sp_norm_pad 

    def test(self):
        """Translate speech using StarGAN ."""
        # Load the trained generator.
        self.restore_model(self.test_iters)
        norm = Normalizer()

        # Set data loader.
        d, speaker = TestSet(self.test_dir).test_data(self.src_speaker)
        targets = self.trg_speaker
       
        for target in targets:
            print(target)
            assert target in speakers
            label_t = self.spk_enc.transform([target])[0]
            label_t = np.asarray([label_t])
            
            with torch.no_grad():

                for filename, content in d.items():
                    f0 = content['f0']
                    ap = content['ap']
                    sp_norm_pad = self.pad_coded_sp(content['coded_sp_norm'])

                    convert_result = []
                    for start_idx in range(0, sp_norm_pad.shape[1] - FRAMES + 1, FRAMES):
                        one_seg = sp_norm_pad[:, start_idx : start_idx+FRAMES]
                        
                        one_seg = torch.FloatTensor(one_seg).to(self.device)
                        one_seg = one_seg.view(1,1,one_seg.size(0), one_seg.size(1))
                        l = torch.FloatTensor(label_t)
                        one_seg = one_seg.to(self.device)
                        l = l.to(self.device)
                        one_set_return = self.G(one_seg, l).data.cpu().numpy()
                        one_set_return = np.squeeze(one_set_return)
                        one_set_return = norm.backward_process(one_set_return, target)
                        convert_result.append(one_set_return)

                    convert_con = np.concatenate(convert_result, axis=1)
                    convert_con = convert_con[:, 0:content['coded_sp_norm'].shape[1]]
                    contigu = np.ascontiguousarray(convert_con.T, dtype=np.float64)   
                    decoded_sp = decode_spectral_envelope(contigu, SAMPLE_RATE, fft_size=FFTSIZE)
                    f0_converted = norm.pitch_conversion(f0, speaker, target)
                    wav = synthesize(f0_converted, decoded_sp, ap, SAMPLE_RATE).astype(np.float32)

                    name = f'{speaker}-{target}_iter{self.test_iters}_{filename}'
                    path = os.path.join(self.result_dir, name)
                    print(f'[save]:{path}')
                    scipy.io.wavfile.write(path, SAMPLE_RATE, wav)            
Пример #55
0
import socket

from logger import Logger
from .tcpUtils import TcpUtil

logger = Logger('tcpServer.py', active=True, external=False)


class TcpServer:
    def __init__(self, ip, port, bufferSize):
        self.TCP_IP = ip
        self.TCP_PORT = port
        self.BUFFER_SIZE = bufferSize
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.conn = None
        self.addr = None
        self.s.bind((self.TCP_IP, self.TCP_PORT))

    def accept_connection(self):
        self.s.listen(1)
        self.conn, self.addr = self.s.accept()
        logger.debug(self.__class__.__name__,
                     'accept_connection',
                     msg='Connection accepted',
                     id=None)

    def send_response(self, response_string):
        encoded_response = TcpUtil.encode_string(response_string)
        # print('TcpServer.send_response: Sending message: \r\n{}\r\n'.format(encoded_response))
        logger.debug(
Пример #56
0
 def build_tensorboard(self):
     from logger import Logger
     self.logger = Logger(self.log_dir)
Пример #57
0
from dotenv import load_dotenv
from pathlib import Path
import os
import sys

from hub_controller import Controller
from logger import Logger

# Setup dotenv environment variables
env_path = Path('.env')
load_dotenv(dotenv_path=env_path)

# Setup logger
log_path = os.getenv("LOG_OUTPUT_DIR")
log_name = "application.log"
Logger.output_dir(log_path)
Logger.output_name(log_name)
Logger.setup()
logger = Logger()

# Load application
hub = Controller()
try:
    hub.start()

except KeyboardInterrupt as err:
    hub.stop()

except Exception as err:
    logger.critical(
        "Exception encountered when starting main hub: {0}".format(err))
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    logger = Logger(opt)

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)

    ############################################3333
    ## freezing backbone and one head
    # for param in model.parameters():
    # print(param)
    #   param.requires_grad = False
    #
    # # req_grad = ["model.hm_bdd", "model.wh_bdd", "model.reg_bdd"]
    # req_grad = ["model.hm_tl", "model.wh_tl", "model.reg_tl"]
    # # for hd in model.reg_tl:
    # for custom_head in (req_grad):
    #   for hd in eval(custom_head):
    #     # print(hd.parameters())
    #     for wt in hd.parameters():
    #       # print(wt)
    #       wt.requires_grad = True

    ######################################################

    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.val_intervals < opt.num_epochs or opt.test:
        print('Setting up validation data...')
        val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True)

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

    print('Setting up train data...')
    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                if opt.eval_val:
                    val_loader.dataset.run_eval(preds, opt.save_dir)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.save_point:
            # if epoch % opt.save_point[0] == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Пример #59
0
    def get_following(self):
        self.copycookies()
        self.get_createpoint()
        items = self.mongo.db.editors_new.find()
        for item in items:
            self.userID_list.append(item.get('user_id'))
        self.current_proxy = get_IP()
        self.get_cookie()
        # self.user_id_list = extract_last_editors()
        dt = re.sub(r'[^0-9]', '',
                    str(datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')))
        for i in xrange(self.start, self.end):
            self.state = False
            self.user_id = self.userID_list[i]
            self.file.seek(0, 2)
            dt1 = re.sub(r'[^0-9]', '',
                         str(datetime.datetime.now().strftime('%Y-%m-%d')))
            News = self.type + ',' + str(i + 1) + ',' + str(
                self.end) + ',' + str(dt1) + '\n'
            self.file.write(News)
            logfielname = '/log/' + dt + 'editors_' + sys._getframe(
            ).f_code.co_name + '.log'
            logger = Logger(logfilename=logfielname,
                            logname='正在爬取第' + str(i + 1) + '个用户的关注了').getlog()

            following_url = 'https://www.zhihu.com/api/v4/members/' + str(
                self.user_id
            ) + '/followees?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics&offset={0}&limit=20'
            following_count = 0

            # self.following_id_list = extract_editors_following(self.user_id, self.user_id_list)
            # if len(self.following_id_list) == 0:
            #     self.following_type = 0
            # else:
            #     self.following_type = 1
            while 1:
                try:
                    r = requests.get(following_url,
                                     headers=self.headers,
                                     timeout=5,
                                     proxies=self.current_proxy)
                    time.sleep(3)
                    logger.info('第一次请求状态码' + str(r.status_code))
                    if r.status_code == 200:
                        j = r.json()
                        following_count = j['paging']['totals']
                    elif r.status_code == 404:
                        self.is_del = True
                        logger.info('!!!该用户被删!!!')
                        self.delLogger(logger)
                        break
                    elif r.status_code == 401:
                        logger.info('Cookie过期,正在更换')
                        f = open('Cookies/editors_following_cookies.txt', "r")
                        Lines = f.readlines()
                        if len(Lines) == 0:
                            logger.info('备用Cookies用完!')
                            self.delLogger(logger)
                            return
                        else:
                            self.change_cookie()
                            with open(
                                    'User/editors_following_loseuser_' +
                                    str(self.fileNum) + '.txt', 'a+') as f1:
                                f1.write(str(i + 1) + '\n')
                    else:
                        self.delLogger(logger)
                        return

                except Exception, e:
                    logger.error('查看回答数出错!' + str(e))
                    self.current_proxy = get_IP()
                    logger.warning('切换ip代理!中断3秒!')
                    time.sleep(3)
                    continue

                else:
                    # 没有关注者的用户也要保存一下
                    if following_count == 0:
                        logger.warning('用户没有关注者!')
                        self.delLogger(logger)
                        data_plus = {
                            'user_id': self.user_id,
                            "following_count": following_count
                        }
                        self.mongo.db.FR_editors_following.insert(data_plus)
                        break
                    elif self.following_type == 0 and following_count >= 4000:
                        logger.warning('用户关注了数大于4000!')
                        self.delLogger(logger)
                        data_plus = {
                            'user_id': self.user_id,
                            "following_count": following_count
                        }
                        self.mongo.db.FR_editors_following.insert(data_plus)
                        break
                    else:
                        offset = 0
                        while 1:
                            try:
                                soup = requests.get(following_url.format(
                                    str(offset)),
                                                    headers=self.headers,
                                                    timeout=5,
                                                    proxies=self.current_proxy)
                                time.sleep(3)
                                logger.info('请求状态码' + str(soup.status_code))
                            except Exception, e:
                                logger.error('请求关注者出错!' + str(e))
                                self.current_proxy = get_IP()
                                logger.warning('切换ip代理!中断3秒!')
                                time.sleep(3)
                                continue
                            else:
                                following_data = soup.json()
                                data = following_data.get('data')
                                logger.info(
                                    'is_end?' +
                                    str(following_data['paging']['is_end']))
                                if following_data['paging']['is_end']:
                                    following_list = []
                                    for i in range(0, len(data)):
                                        following_id = data[i][
                                            'url_token']  # 用户ID
                                        following_info = data[i]  # 全部信息

                                        info = {
                                            "following_id": following_id,
                                            "following_info": following_info
                                        }
                                        following_list.append(info)
                                    data_plus = {
                                        'user_id': self.user_id,
                                        "following_count": following_count,
                                        # "editor_type":self.following_type,
                                        "following": following_list
                                    }
                                    self.mongo.db.FR_editors_following.insert(
                                        data_plus)

                                    logger.info('已获得所有关注了用户!')
                                    logger.info('成功保存数据!')
                                    self.delLogger(logger)
                                    break
                                else:
                                    offset = offset + 20
                                    following_list = []
                                    for i in range(0, len(data)):
                                        following_id = data[i][
                                            'url_token']  # 用户ID
                                        # if following_id in self.following_id_list:
                                        #     self.state = True
                                        following_info = data[i]  # 全部信息

                                        info = {
                                            "following_id": following_id,
                                            "following_info": following_info
                                        }
                                        following_list.append(info)
                                    data_plus = {
                                        'user_id': self.user_id,
                                        "following_count": following_count,
                                        # "editor_type":self.following_type,
                                        "following": following_list
                                    }
                                    self.mongo.db.FR_editors_following.insert(
                                        data_plus)
                                    # if self.state:
                                    #     self.delLogger(logger)
                                    #     break
                        self.delLogger(logger)
                        self.mongo.client.close()
                        break
Пример #60
0
######################################
# Load pre-trained parameters
pretrain_epoch = 0
pretrain_batch_size = args.batch  #Assume current batch was used in pretraining
if args.finetune != '':
    from utility import loadPreTrained
    model, optimizer, pretrain_epoch, pretrain_batch_size = loadPreTrained(
        args, checkpointFolder, model, optimizer)

######################################
# Log file path + Tensorboard Logging
fileHandler = logging.FileHandler(checkpointFolder +
                                  '/train_log.txt')  #to File
logger.addHandler(fileHandler)
if tensorboard_bLog:
    tb_logger = Logger(checkpointFolder + '/logs')  #tensorboard logger

# Save Option Info
option_str, options_dict = print_options(parser, args)
save_options(checkpointFolder, option_str, options_dict)

######################################
# Input/Output Option
train_X = train_X_raw  #[1,:,:,:]      #1st seller, (num, frameNum, featureDim:73)
# train_X = np.concatenate( (train_X, train_X_raw[2,:,:,:]), axis= 0)

# train_Y = train_X_raw[2,:,:,:]    #1st seller, (num, frameNum, featureDim:73)
# train_Y = np.concatenate( (train_Y, train_X_raw[1,:,:,:]), axis= 0)

test_X = test_X_raw  #[1,:,:,:]      #1st seller, (num, frameNum, featureDim:73)
# test_Y = test_X_raw[2,:,:,:]    #1st seller, (num, frameNum, featureDim:73)