Ejemplo n.º 1
0
    def handle_receive(self, packet, time):
        if isinstance(packet, AckPacket):
            Rn = packet.request_number

            self.last_n_req_nums.append(Rn)
            if len(self.last_n_req_nums) > TCPReno.MAX_DUPLICATES:
                self.last_n_req_nums.pop(0)

            Sn, Sb, Sm = self.host.sequence_nums
            cwnd = self.host.cwnd
            if self.last_drop is None or \
               time - self.last_drop > TCPReno.TIMEOUT_TOLERANCE:
                if len(self.last_n_req_nums) == TCPReno.MAX_DUPLICATES and \
                   all(num == Rn for num in self.last_n_req_nums):
                    # If we've had duplicate ACKs, then enter fast retransmit.
                    self.ssthresh = max(self.host.cwnd / 2, TCPReno.INITIAL_CWND)
                    self.set_window_size(time, self.ssthresh)
                    Logger.warning(time, "Duplicate ACKs received for flow %s." % self.host.flow[0])

                    self.last_drop = time
            if self.ss:
                self.set_window_size(time, cwnd + 1)
                if self.host.cwnd >= self.ssthresh:
                    self.ss = False
                    Logger.info(time, "SS phase over for Flow %s. CA started." % (self.host.flow[0]))
            elif Rn > Sb:
                # If we are in Congestion Avoidance mode, we wait for an RTT to
                # increase the window size, rather than doing it on ACK.
                self.set_window_size(time, cwnd + 1. / cwnd)
Ejemplo n.º 2
0
    def timeout(self, time, packet):
        """
        A timeout occurred at the given time on the given packet. Resend if
        necessary.

        :param time: Time to resend the packet
        :type time: int
        :param packet: Packet which timed out
        :type packet: Packet
        """
        # We already received an Ack for it
        if packet.id not in self.awaiting_ack:
            return
        else:
            # Otherwise, remove it so that it will be added again
            del self.awaiting_ack[packet.id]

        if not isinstance(packet, FlowPacket):
            # If an ACK packet is dropped, don't worry about it, it'll be sent
            # again later
            return
        flow_id = packet.flow_id
        if self.current_request_num is not None and \
              packet.sequence_number < self.current_request_num:
            # Packet was already received
            return

        self.congestion_control.handle_timeout(packet, time)

        # Resend
        Logger.info(time, "Packet %s was dropped, resending" % (packet.id))
        self.queue.add(packet)
        self.send_packets(time, flow_id)
Ejemplo n.º 3
0
class _Job(object):
    def __init__(self):
        self.log = Logger(self.__class__.__name__, fh)
        self.log.debug("Job is created")

    def execute(self, **kwargs):
        try:
            self.log.debug("Start job with kwargs=%s" % kwargs)
            self._execute(**kwargs)
            self.log.debug("Finish job successful")
        except Exception as e:
            self.log.exception("Error during job execution")
            subject = 'Tasker Information. Произошла ошибка в скрипте %s' % self.__class__.__name__
            self.log.debug(subject)
            # send_email(subject, as_text(e.message),
            #            send_from=SMTP_SETTINGS['username'],
            #            server=SMTP_SETTINGS['server'],
            #            port=SMTP_SETTINGS['port'],
            #            user=SMTP_SETTINGS['username'],
            #            passwd=SMTP_SETTINGS['password'],
            #            dest_to=ERROR_EMAILS)

    def _execute(self, **kwargs):
        raise NotImplementedError("%s._execute" % self.__class__.__name__)

    @classmethod
    def run(cls, **kwargs):
        log.debug("in _Job.run!")
        return cls().execute(**kwargs)
Ejemplo n.º 4
0
    def sensing_decision(self, sensing_result, number_cpes):
        """
        Algorithm sensing decision.
        @param sensing_result
        @param number_cpes
        """
        #print "simple cycle"

        #feedback cycle control
        self._fb_cycle += 1

        #initialize sum of scores for each decision (0,1)
        sum_result0 = 0.0
        sum_result1 = 0.0

        #keeps the greatest reward for each decision
        greatest_reward0 = 0.0
        greatest_reward1 = 0.0

        #sum of all scores for each decision
        for decision, reward in zip(sensing_result, self._reward):

            #sum of scores for "0" decision
            if decision == IDLE:
                sum_result0 += reward 
                if reward > greatest_reward0:
                    greatest_reward0 = reward 

            #sum of scores for "1" decision
            elif decision == OCCUPIED:
                sum_result1 += reward 
                if reward > greatest_reward1:
                    greatest_reward1 = reward 

        #divide the sum of scores by the total number of CPEs
        score_r1 = sum_result1 / number_cpes
        score_r0 = sum_result0 / number_cpes

        #verifies which sum of scores is higher, score1 or score0
        if score_r0 > score_r1:
            sensing_decision = IDLE

        elif score_r0 < score_r1:
            sensing_decision = OCCUPIED

        #if both scores are equal, then verifies the decision made by the cpe with the greatest individual score
        elif score_r0 == score_r1:
            if greatest_reward0 >= greatest_reward1:
                sensing_decision = IDLE
            elif greatest_reward0 < greatest_reward1:
                sensing_decision = OCCUPIED

        #verifies if is feedback cycle
        #if (self._fb_cycle % (feedback_control-1) == 0):
        Logger.append('sdc', 'decision', sensing_decision)
        if sensing_decision == OCCUPIED:
            self._total_occ += 1
        else:
            self._total_idle += 1
        self.feedback(sensing_result, sensing_decision, increase_rate, decrease_rate)
Ejemplo n.º 5
0
def main(env_name, num_episodes, render, VideoSave, gamma, lam, kl_targ, batch_size):
    killer = GracefulKiller()
    env, obs_dim, act_dim = init_gym(env_name, render)
    obs_dim += 1  # add 1 to obs dimension for time step feature (see run_episode())
    now = datetime.utcnow().strftime("%b-%d_%H-%M-%S")  # create unique directories
    logger = Logger(logname=env_name, now=now)
    #aigym_path = os.path.join('/tmp', env_name, now)
    #env = wrappers.Monitor(env, aigym_path, force=True) 
    scaler = Scaler(obs_dim, env_name)
    scaler.resume()
    val_func = NNValueFunction(obs_dim, env_name)
    policy = Policy(obs_dim, act_dim, kl_targ, env_name)
    episode = 0
    capture = False
    while episode < num_episodes:
        if VideoSave and not capture:
            env.ScreenCapture(5)
            capture = True
        trajectories = run_policy(env, policy, scaler, logger, episodes=batch_size)
        episode += len(trajectories)
        

        if killer.kill_now:
            if input('Terminate training (y/[n])? ') == 'y':
                break
            killer.kill_now = False
    logger.close()
    policy.close_sess()
    val_func.close_sess()
Ejemplo n.º 6
0
 def _getItems(self):
     try:
         html  = GetContentFromUrl(self.URL)
         return re.compile(self.REGEX).findall(html)
     except urllib2.URLError, e:
         Logger.log("There was an error while getting content from remote server")
         raise NoConnectionError("There was an error while getting content from remote server")
Ejemplo n.º 7
0
    def _writeOpenVPNConfiguration(self, authPath):
        openVpnConfigFilePath  = config.getOpenVPNTemplateConfigFilePath()
        cert    = config.getCertFilePath()

        file    = open(openVpnConfigFilePath, mode='r')
        content = file.read()
        file.close()

        authPath = authPath.replace('\\', '/')
        cert     = cert.replace('\\', '/')

        content = content.replace('#SERVER#',               self._serverAddress)
        content = content.replace('#PORT#',                 self._port)
        content = content.replace('#CERTIFICATE#',    '"' + cert     + '"')
        content = content.replace('#AUTHENTICATION#', '"' + authPath + '"')

        # Adding the log will disable the output to be written to stdout, so Windows will fail reading the status
        # so in case it is needed this should be added on the modifyOpenVPNConfigContent for the correspondent OS
        #content += "\r\nlog " +  "openvpn1.log"

        # The goal is to modify the OpenVPN Config for adding config options that may be needed for some OSs (like Linux Ubuntu)
        content = self._modifyOpenVPNConfigContent(content)

        Logger.log("OpenVPN configuration:%r" % content)

        cfgFilePath = self._getOpenVPNConfigPath()
        file = open(cfgFilePath, mode='w+')
        file.write(content)
        file.close()

        return cfgFilePath
Ejemplo n.º 8
0
def web_socket_transfer_data(request):

	logger.logInfo(NAME+ "Server dostal zgloszenie")

	paramsDictionary = {}
	paramsDictionary["SOCKET"] = request.ws_stream
	paramsDictionary["HOME_PATH"] = request.get_options()["PROJECT_LOCATION"]
	paramsDictionary["CLIENT_ADDRESS"] = request.connection.remote_ip
	stopQueue = Queue()
	listenThread = KillableListenThread(request.ws_stream, stopQueue)

	ticket = request.ws_stream.receive_message()
	currentTicket = TicketUtil.readTempVars()[EXPECTED_TICKET]
	timeOut = (int(ticket) - int(currentTicket)) * 5
	start = time.time()
	while currentTicket < ticket and time.time() - start < timeOut:
		time.sleep(1)
		currentTicket = TicketUtil.readTempVars()[EXPECTED_TICKET]
	if currentTicket == ticket:
		request.ws_stream.send_message(OK)
		listenThread.start()
		if listenThread.getMessage(5) != OK:
			skip(paramsDictionary, ticket)
	else:
		request.ws_stream.send_message(ABORT)
		skip(paramsDictionary, ticket)
	stopQueue.put("STOP")
	return apache.HTTP_OK
Ejemplo n.º 9
0
def web_socket_transfer_data(request):

    logger.logInfo(NAME + "Server dostal zgloszenie")

    paramsDictionary = {}
    paramsDictionary["SOCKET"] = request.ws_stream
    paramsDictionary["HOME_PATH"] = request.get_options()["PROJECT_LOCATION"]
    methodMapping = {PREPARE: prepare, GLOBAL_COMMIT: globalCommit, GLOBAL_ABORT: globalAbort}
    configReader = ConfigurationReader(
        paramsDictionary["HOME_PATH"] + "ServerSide/config/database_config/database.conf"
    )
    dbParamsDict = configReader.readConfigFile()
    paramsDictionary["DB_PARAMS"] = dbParamsDict
    paramsDictionary["CLIENT_ADDRESS"] = request.connection.remote_ip

    login = request.ws_stream.receive_message()

    password = request.ws_stream.receive_message()

    db = DatabaseConnector(login, password, dbParamsDict["DATABASE"], dbParamsDict["HOST"])

    command = request.ws_stream.receive_message()
    lockFilePath = paramsDictionary["HOME_PATH"] + "ServerSide/config/database_config/dbLock.dat"
    lock = FileLock(lockFilePath, 2, 0.05)
    try:
        while command != EXIT:
            methodMapping[command](paramsDictionary, db, lock)
            command = request.ws_stream.receive_message()
        if lock.is_locked:
            lock.release()
    except Exception, e:
        logger.logError(NAME + e.message)
        if lock.is_locked:
            lock.release()
        return apache.HTTP_OK
Ejemplo n.º 10
0
def web_socket_transfer_data(request):
	while True:
		line = request.ws_stream.receive_message()
		logger.logInfo(NAME + "Serwer otrzymal " + line)
		request.ws_stream.send_message(line)
		if line == _GOODBYE_MESSAGE:
			return apache.HTTP_OK
    def __init__(self, in_th, min_th, max_th, delta_th, k):
        """
        CTOR
        @param in_th Initial threshold.
        @param min_th Minimum threshold.
        @param max_th Maximum threshold.
        @param k Bayesian k factor.
        """
        Logger.register('bayes_decision', ['threshold', 'decision', 'risk']) 

        self._feedback = self._feedback_prev =  -1
        self._delta_th = delta_th
        self._min_th_limit = min_th
        self._max_th_limit = max_th

        self._k = k
        self._th = in_th

        self.init(in_th)


	self._th_dec = {}

	self._xx = {};
	self._xx[0] = {0: "00", 1: "01"}
	self._xx[1] = {0: "10", 1: "11"}
Ejemplo n.º 12
0
 def _getKillCmd(self):
     if self._shouldUseCmdList():
         killCmd = self._killCmdPath()
         Logger.log("Kill cmd path:%s" % killCmd, Logger.LOG_DEBUG)
         return [killCmd, '-SIGINT' , 'openvpn']
     else:
         return 'killall -SIGINT openvpn'
Ejemplo n.º 13
0
    def _allowAction(self, action, extra = ''):
        Logger.log("allowAction %s" % action, Logger.LOG_DEBUG)
        user = self._getUsername()
        pwd  = config.getPassword()

        data = {"username" : user, "apiKey" : pwd, "action" : action, "country" : self._countryName, "city" : self._cityName, "server" : self._serverAddress, "extra" : extra, "os" : config.getOS()}
        self._actionNotification.push(data)
Ejemplo n.º 14
0
def execute(paramsDictionary, message):
	logger.logInfo(NAME + "wewnatrz modulu rozglaszania nowej wersji ")

	homePath = paramsDictionary["HOME_PATH"]

	lock = paramsDictionary["LOCK"]
	if lock.is_locked == False:
		return
	try:
		addressesfile = FileProcessor(paramsDictionary["HOME_PATH"]+"ServerSide/config/addresses.conf")
		addressesfile.lockFile()
		addresses = addressesfile.readFile()
		addressesfile.unlockFile()

		versionsFile = FileProcessor(homePath+"ServerSide/config/database_config/data_version.dat")
		versionsFile.lockFile()
		dataVersions = versionsFile.readFile()
		versionsFile.unlockFile()

		myVersion = dataVersions[LOCALHOST_NAME]
		logger.logInfo(NAME + "Moja wersja danych " + myVersion)
		for address in addresses:
			if addresses[address] == "T":
				logger.logInfo(NAME + "wysylanie wersji do " + address)
				connection = Connection(homePath + "ServerSide/config/connection_config.conf" )
				if connection.connect(address, 80, RESOURCE) == OK_FLAG:
					connection.send_message(myVersion)
					connection._do_closing_handshake()
				else:
					logger.logError(NAME + "Nie moge polaczyc sie z serwerem o adresie " + address)
	except Exception, e:
		logger.logError(NAME + e.message)
		lock.release
Ejemplo n.º 15
0
    def decision(self, energy):
        """

        @param energy
        """
        energy = np.sum(energy) / energy.size

        self.cycle_counter += 1

        if self.cycle_counter_max == self.cycle_counter:
            self.cycle_counter = 0

        sp = self.do_action(self.s, self.a)
        rw = self.get_reward(energy, sp)
        ap = self.e_greedy_selection(sp)

        self.update_q_table(self.s, self.a, rw, sp, ap)

        self.s = sp
        self.a = ap

        #self.epsilon *= 0.999

        Logger.append('bayes_learning', 'hypothesis', 1.0 if energy > self.s else 0.0)
        Logger.append('bayes_learning', 'feedback', self._feedback)
        Logger.append('bayes_learning', 'state', self.s)
        Logger.append('bayes_learning', 'reward', rw)
        Logger.append('bayes_learning', 'action', self.a)

        return 1 if (energy > sp) else 0, energy
Ejemplo n.º 16
0
 def add_print_list():
     """
     Adds to the print list (method of the Logger class)
     """
     print "\n******************************************************************\n"
     print "\nPrinting the energy\n"
     Logger.add_to_print_list("energy_decision", "energy")
     print "\n******************************************************************\n"
Ejemplo n.º 17
0
def execute(paramsDictionary, message):
	global timer
	if timer == 0:
		timer = int(paramsDictionary["CONFIG_PARAMS"]["passExchangePeriod"])
		logger.logInfo(NAME + "Wysylam zapytanie o stare haslo do " + paramsDictionary["CLIENT_ADDRESS"])
		paramsDictionary["SOCKET"].send_message(RES_MODULE+NEW_PASS)
	else:
		timer = timer -1
Ejemplo n.º 18
0
def checkIfServerIsUpToDate(dataVersions):
	myVersion = dataVersions[LOCALHOST_NAME]
	for address in dataVersions:
		logger.logInfo(NAME + "analizuje " + address)
		if int(dataVersions[address]) > int(myVersion):
			logger.logInfo(NAME + "Dane na serwerze nie sa aktualne")
			return False
	return True
Ejemplo n.º 19
0
    def _connectAndCheckStatus(self, busy = None):

        # Erase the Remote Controller status file because if not the Controller appends into that file
        self._eraseRemoteControllerStatusFile()

        Logger.log("Android _doConnect starting ...", Logger.LOG_DEBUG)

        ret = self._runRemoteController(self.OPENVPN_CONNECT_ACTION, self._timeout)
        if not ret:
            gui.DialogOK(__language__(30017), __language__(30018), "")
            return

        statusDone = False

        startTime = time.time()
        elapsed = 0
        MAX_TIMEOUT = 60
        statusGrabbed = False
        status = ''
        while elapsed < MAX_TIMEOUT:
            status = self._getCurrentStatus()
            Logger.log("Checking Status:%s" % status, Logger.LOG_DEBUG)

            ASSIGN_IP = 'ASSIGN_IP;'

            # Check if after the ASSIGN_IP notification a CONNECTED; SUCCESS is notified
            if ASSIGN_IP in status  and 'CONNECTED; SUCCESS' in status[status.find(ASSIGN_IP):]:
                Logger.log("VPN IP assigned and connected Ok", Logger.LOG_INFO)
                msg1, msg2, msg3 = self._connectionOkMessage()
                gui.DialogOK(msg1, msg2, msg3)
                statusGrabbed = True
                break

            elif 'USER_DID_NOT_APPROVE_THE_APP' in status:
                gui.DialogOK(__language__(30019), __language__(30005), "")
                statusGrabbed = True
                break

            elif 'EXITING; auth-failure' in status:
                gui.DialogOK(__language__(30038), __language__(30037), __language__(30005))
                self.kill()
                statusGrabbed = True
                break

            xbmc.sleep(1000)
            elapsed = time.time() - startTime

        Logger.log("_GetCurrent status:::")
        print status
        if not statusGrabbed:
            gui.DialogOK(__language__(30020), __language__(30021), __language__(30005))
            Logger.log("ERROR it break the loop with timeout. Check the notification status", Logger.LOG_ERROR)

        if busy:
            busy.close()

        return statusGrabbed
Ejemplo n.º 20
0
def generateFromAST(class_diagram, output_file, target_language, platform):
    succesfull_generation = False
    if target_language == Languages.Python :
        succesfull_generation = PythonGenerator().generate(class_diagram, output_file, platform)
    elif target_language == Languages.CSharp:
        succesfull_generation = CSharpGenerator().generate(class_diagram, output_file, platform)
    # let user know ALL classes have been processed and loaded
    if succesfull_generation :
        Logger.showInfo("The following classes <" + ", ".join(class_diagram.class_names) + "> have been exported to the following file: " + output_file)
Ejemplo n.º 21
0
    def _connectAndCheckStatus(self, busy = None):

        # Erase the Remote Controller status file because if not the Controller appends into that file
        self._eraseRemoteControllerStatusFile()

        Logger.log("Android _doConnect starting ...", Logger.LOG_DEBUG)

        ret = self._runRemoteController(self.OPENVPN_CONNECT_ACTION, self._timeout)
        if not ret:
            gui.DialogOK("It was not possible to execute the VPN Remote Controller", "Please check if it is installed in your Android", "")
            return

        statusDone = False

        startTime = time.time()
        elapsed = 0
        MAX_TIMEOUT = 60
        statusGrabbed = False
        status = ''
        while elapsed < MAX_TIMEOUT:
            status = self._getCurrentStatus()
            Logger.log("Checking Status:%s" % status, Logger.LOG_DEBUG)

            ASSIGN_IP = 'ASSIGN_IP;'

            # Check if after the ASSIGN_IP notification a CONNECTED; SUCCESS is notified
            if ASSIGN_IP in status  and 'CONNECTED; SUCCESS' in status[status.find(ASSIGN_IP):]:
                Logger.log("VPN IP assigned and connected Ok", Logger.LOG_INFO)
                msg1, msg2, msg3 = self._connectionOkMessage()
                gui.DialogOK(msg1, msg2, msg3)
                statusGrabbed = True
                break

            elif 'USER_DID_NOT_APPROVE_THE_APP' in status:
                gui.DialogOK("The VPN Client was not approved", "Please try again", "")
                statusGrabbed = True
                break

            elif 'EXITING; auth-failure' in status:
                gui.DialogOK("There was an error while logging in", "Please check the credentials", "and try again")
                self.kill()
                statusGrabbed = True
                break

            xbmc.sleep(1000)
            elapsed = time.time() - startTime

        Logger.log("_GetCurrent status:::")
        print status
        if not statusGrabbed:
            gui.DialogOK("There was an error", "The VPN client was not able to connect", "please try again")
            Logger.log("ERROR it break the loop with timeout. Check the notification status", Logger.LOG_ERROR)

        if busy:
            busy.close()

        return statusGrabbed
Ejemplo n.º 22
0
def execute(paramsDictionary, message):
    logger.logInfo(NAME + "Resetuje stan serwera")
    file = FileProcessor(paramsDictionary["HOME_PATH"] + "ServerSide/config/addresses.conf")
    file.lockFile()
    addresses = file.readFile()
    for key in addresses:
        addresses[key] = "F"
    file.writeToFile(addresses)
    file.unlockFile()
Ejemplo n.º 23
0
 def run(self):
     self.listener.start()
     while self.stopQueue.empty():
         try:
             message = self.internalQueue.get(True, 2)
         except Empty:
             logger.logInfo(NAME + "No message")
             continue
         self.messageQueue.put(message)
Ejemplo n.º 24
0
	def work(self, input_items, output_items):
		for idx in  range(len(input_items[0])):
			decf, vf = self.algo1.decision(input_items[0][idx])

			if decf == 0:
				decf, vf = self.algo2.decision(input_items[1][idx])	

			Logger.append('hier', 'decision', self._xx[Logger._ch_status][decf])
		return len(input_items[0])
Ejemplo n.º 25
0
	def startTransaction(self):
		logger.logInfo(NAME + "StartMethod")
		ticket = self.inputQueue.get(True, None)
		self.connection.send_message(ticket)
		try:
			answer = self.connection.get_message()
		except Exception, e:
			logger.logError(NAME + e.message )
			return
Ejemplo n.º 26
0
    def handle_timeout(self, packet, time):
        if self.last_drop is None or \
           time - self.last_drop > TCPTahoe.TIMEOUT_TOLERANCE:
            self.ss = True
            self.ssthresh = max(self.host.cwnd / 2, TCPTahoe.INITIAL_CWND)
            self.set_window_size(time, TCPTahoe.INITIAL_CWND)

            self.last_drop = time

            Logger.warning(time, "Timeout Received. SS_Threshold -> %d" % self.ssthresh)
Ejemplo n.º 27
0
def execute(paramsDictionary, message):
	logger.logInfo(NAME + "otrzymalem " + message)
	splited = message.split('%')
	toCall = splited[0]
	if len(splited) > 1:
		argument = splited[1]
	else:
		argument = None
	function = functions[toCall]
	function(paramsDictionary, argument)
Ejemplo n.º 28
0
	def executeTransaction(self, cursor, command):
		try:
			if self.checkActiveServersCount() == True and self.checkDataVersions() == True:
				cursor.execute(command)
				return cursor.fetchall()
			else:
				return ERROR_MESSAGE
		except MySQLdb.Error, e:
			logger.logError(NAME + "%d %s" % (e.args[0], e.args[1]))
			return "%d %s" % (e.args[0], e.args[1])
Ejemplo n.º 29
0
    def _tick(self):
        """
        Called automatically each one second.
        """
        for _d in (self._bps, self._pkts):
            _d['cur'] = _d['counting']
            _d['counting'] = 0

        Logger.append(self._name, 'bps',  self._bps['cur'] ) #pylint: disable=E1101
        Logger.append(self._name, 'pkts', self._pkts['cur']) #pylint: disable=E1101
Ejemplo n.º 30
0
    def _writeOpenVPNConfiguration(self, authPath):
        crl  = ''
        if self._isCustom:
            openVpnConfigFilePath  = config.getOpenVPNCustomTemplateConfigFilePath()
            if not os.path.exists(openVpnConfigFilePath):
                gui.DialogOK(__language__(30049), __language__(30050), __language__(30005) )
                return None

            cert    = config.getCustomCertFilePath()
            if not os.path.exists(cert):
                gui.DialogOK(__language__(30051), __language__(30052), __language__(30005) )
                return None

            crl = config.getCustomCrlFilePath()

        else:
            openVpnConfigFilePath  = config.getOpenVPNTemplateConfigFilePath()
            cert    = config.getCertFilePath()

            if self._usingDathoFreeServers():
                cert = config.getDathoCertFilePath()
                Logger.log("Using datho cert:%s" % cert, Logger.LOG_DEBUG)

        file    = open(openVpnConfigFilePath, mode='r')
        content = file.read()
        file.close()

        authPath = authPath.replace('\\', '/')
        cert     = cert.replace('\\', '/')
        crl     = crl.replace('\\', '/')

        print "SERVER ADDRESS:", self._serverAddress

        content = content.replace('#SERVER#',               self._serverAddress)
        content = content.replace('#PORT#',                 self._port)
        content = content.replace('#CERTIFICATE#',    '"' + cert     + '"')
        content = content.replace('#AUTHENTICATION#', '"' + authPath + '"')
        content = content.replace('#CRL#', '"' + crl + '"')


        # Adding the log will disable the output to be written to stdout, so Windows will fail reading the status
        # so in case it is needed this should be added on the modifyOpenVPNConfigContent for the correspondent OS
        #content += "\r\nlog " +  "openvpn1.log"

        # The goal is to modify the OpenVPN Config for adding config options that may be needed for some OSs (like Linux Ubuntu)
        content = self._modifyOpenVPNConfigContent(content)

        Logger.log("OpenVPN configuration:%r" % content)

        cfgFilePath = self._getOpenVPNConfigPath()
        file = open(cfgFilePath, mode='w+')
        file.write(content)
        file.close()

        return cfgFilePath
Ejemplo n.º 31
0
def main():
    args.cuda = True
    # dataset
    train_dataset = BSDS_RCFLoader(root=args.dataset, split="train")
    test_dataset = BSDS_RCFLoader(root=args.dataset, split="test")
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=8,
                              drop_last=True,
                              shuffle=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             num_workers=8,
                             drop_last=True,
                             shuffle=False)
    with open('data/HED-BSDS_PASCAL/test.lst', 'r') as f:
        test_list = f.readlines()
    test_list = [split(i.rstrip())[1] for i in test_list]
    assert len(test_list) == len(test_loader), "%d vs %d" % (len(test_list),
                                                             len(test_loader))

    # model
    model = RCF()
    model.cuda()
    model.apply(weights_init)
    model_dict = model.state_dict()
    state_dict = torch.load('./model/vgg16.pth')
    pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)

    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'".format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    #tune lr
    net_parameters_id = {}
    net = model
    for pname, p in net.named_parameters():
        if pname in [
                'conv1_1.weight', 'conv1_2.weight', 'conv2_1.weight',
                'conv2_2.weight', 'conv3_1.weight', 'conv3_2.weight',
                'conv3_3.weight', 'conv4_1.weight', 'conv4_2.weight',
                'conv4_3.weight'
        ]:
            print(pname, 'lr:1 de:1')
            if 'conv1-4.weight' not in net_parameters_id:
                net_parameters_id['conv1-4.weight'] = []
            net_parameters_id['conv1-4.weight'].append(p)
        elif pname in [
                'conv1_1.bias', 'conv1_2.bias', 'conv2_1.bias', 'conv2_2.bias',
                'conv3_1.bias', 'conv3_2.bias', 'conv3_3.bias', 'conv4_1.bias',
                'conv4_2.bias', 'conv4_3.bias'
        ]:
            print(pname, 'lr:2 de:0')
            if 'conv1-4.bias' not in net_parameters_id:
                net_parameters_id['conv1-4.bias'] = []
            net_parameters_id['conv1-4.bias'].append(p)
        elif pname in ['conv5_1.weight', 'conv5_2.weight', 'conv5_3.weight']:
            print(pname, 'lr:100 de:1')
            if 'conv5.weight' not in net_parameters_id:
                net_parameters_id['conv5.weight'] = []
            net_parameters_id['conv5.weight'].append(p)
        elif pname in ['conv5_1.bias', 'conv5_2.bias', 'conv5_3.bias']:
            print(pname, 'lr:200 de:0')
            if 'conv5.bias' not in net_parameters_id:
                net_parameters_id['conv5.bias'] = []
            net_parameters_id['conv5.bias'].append(p)
        elif pname in [
                'conv1_1_down.weight', 'conv1_2_down.weight',
                'conv2_1_down.weight', 'conv2_2_down.weight',
                'conv3_1_down.weight', 'conv3_2_down.weight',
                'conv3_3_down.weight', 'conv4_1_down.weight',
                'conv4_2_down.weight', 'conv4_3_down.weight',
                'conv5_1_down.weight', 'conv5_2_down.weight',
                'conv5_3_down.weight'
        ]:
            print(pname, 'lr:0.1 de:1')
            if 'conv_down_1-5.weight' not in net_parameters_id:
                net_parameters_id['conv_down_1-5.weight'] = []
            net_parameters_id['conv_down_1-5.weight'].append(p)
        elif pname in [
                'conv1_1_down.bias', 'conv1_2_down.bias', 'conv2_1_down.bias',
                'conv2_2_down.bias', 'conv3_1_down.bias', 'conv3_2_down.bias',
                'conv3_3_down.bias', 'conv4_1_down.bias', 'conv4_2_down.bias',
                'conv4_3_down.bias', 'conv5_1_down.bias', 'conv5_2_down.bias',
                'conv5_3_down.bias'
        ]:
            print(pname, 'lr:0.2 de:0')
            if 'conv_down_1-5.bias' not in net_parameters_id:
                net_parameters_id['conv_down_1-5.bias'] = []
            net_parameters_id['conv_down_1-5.bias'].append(p)
        elif pname in [
                'score_dsn1.weight', 'score_dsn2.weight', 'score_dsn3.weight',
                'score_dsn4.weight', 'score_dsn5.weight'
        ]:
            print(pname, 'lr:0.01 de:1')
            if 'score_dsn_1-5.weight' not in net_parameters_id:
                net_parameters_id['score_dsn_1-5.weight'] = []
            net_parameters_id['score_dsn_1-5.weight'].append(p)
        elif pname in [
                'score_dsn1.bias', 'score_dsn2.bias', 'score_dsn3.bias',
                'score_dsn4.bias', 'score_dsn5.bias'
        ]:
            print(pname, 'lr:0.02 de:0')
            if 'score_dsn_1-5.bias' not in net_parameters_id:
                net_parameters_id['score_dsn_1-5.bias'] = []
            net_parameters_id['score_dsn_1-5.bias'].append(p)
        elif pname in ['score_final.weight']:
            print(pname, 'lr:0.001 de:1')
            if 'score_final.weight' not in net_parameters_id:
                net_parameters_id['score_final.weight'] = []
            net_parameters_id['score_final.weight'].append(p)
        elif pname in ['score_final.bias']:
            print(pname, 'lr:0.002 de:0')
            if 'score_final.bias' not in net_parameters_id:
                net_parameters_id['score_final.bias'] = []
            net_parameters_id['score_final.bias'].append(p)

    optimizer = torch.optim.SGD([
        {
            'params': net_parameters_id['conv1-4.weight'],
            'lr': args.lr * 1,
            'weight_decay': args.weight_decay
        },
        {
            'params': net_parameters_id['conv1-4.bias'],
            'lr': args.lr * 2,
            'weight_decay': 0.
        },
        {
            'params': net_parameters_id['conv5.weight'],
            'lr': args.lr * 100,
            'weight_decay': args.weight_decay
        },
        {
            'params': net_parameters_id['conv5.bias'],
            'lr': args.lr * 200,
            'weight_decay': 0.
        },
        {
            'params': net_parameters_id['conv_down_1-5.weight'],
            'lr': args.lr * 0.1,
            'weight_decay': args.weight_decay
        },
        {
            'params': net_parameters_id['conv_down_1-5.bias'],
            'lr': args.lr * 0.2,
            'weight_decay': 0.
        },
        {
            'params': net_parameters_id['score_dsn_1-5.weight'],
            'lr': args.lr * 0.01,
            'weight_decay': args.weight_decay
        },
        {
            'params': net_parameters_id['score_dsn_1-5.bias'],
            'lr': args.lr * 0.02,
            'weight_decay': 0.
        },
        {
            'params': net_parameters_id['score_final.weight'],
            'lr': args.lr * 0.001,
            'weight_decay': args.weight_decay
        },
        {
            'params': net_parameters_id['score_final.bias'],
            'lr': args.lr * 0.002,
            'weight_decay': 0.
        },
    ],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.stepsize,
                                    gamma=args.gamma)

    # optimizer = torch.optim.Adam([
    #         {'params': net_parameters_id['conv1-4.weight']      , 'lr': args.lr*1    , 'weight_decay': args.weight_decay},
    #         {'params': net_parameters_id['conv1-4.bias']        , 'lr': args.lr*2    , 'weight_decay': 0.},
    #         {'params': net_parameters_id['conv5.weight']        , 'lr': args.lr*100  , 'weight_decay': args.weight_decay},
    #         {'params': net_parameters_id['conv5.bias']          , 'lr': args.lr*200  , 'weight_decay': 0.},
    #         {'params': net_parameters_id['conv_down_1-5.weight'], 'lr': args.lr*0.1  , 'weight_decay': args.weight_decay},
    #         {'params': net_parameters_id['conv_down_1-5.bias']  , 'lr': args.lr*0.2  , 'weight_decay': 0.},
    #         {'params': net_parameters_id['score_dsn_1-5.weight'], 'lr': args.lr*0.01 , 'weight_decay': args.weight_decay},
    #         {'params': net_parameters_id['score_dsn_1-5.bias']  , 'lr': args.lr*0.02 , 'weight_decay': 0.},
    #         {'params': net_parameters_id['score_final.weight']  , 'lr': args.lr*0.001, 'weight_decay': args.weight_decay},
    #         {'params': net_parameters_id['score_final.bias']    , 'lr': args.lr*0.002, 'weight_decay': 0.},
    #     ], lr=args.lr, betas=(0.9, 0.99), weight_decay=args.weight_decay)
    # scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)

    # log
    log = Logger(join(TMP_DIR, '%s-%d-log.txt' % ('sgd', args.lr)))
    sys.stdout = log

    train_loss = []
    train_loss_detail = []
    for epoch in range(args.start_epoch, args.maxepoch):
        if epoch == 0:
            print("Performing initial testing...")
            multiscale_test(model,
                            test_loader,
                            epoch=epoch,
                            test_list=test_list,
                            save_dir=join(TMP_DIR, 'initial-testing-record'))

        tr_avg_loss, tr_detail_loss = train(
            train_loader,
            model,
            optimizer,
            epoch,
            save_dir=join(TMP_DIR, 'epoch-%d-training-record' % epoch))
        test(model,
             test_loader,
             epoch=epoch,
             test_list=test_list,
             save_dir=join(TMP_DIR, 'epoch-%d-testing-record-view' % epoch))
        multiscale_test(model,
                        test_loader,
                        epoch=epoch,
                        test_list=test_list,
                        save_dir=join(TMP_DIR,
                                      'epoch-%d-testing-record' % epoch))
        log.flush()  # write log
        # Save checkpoint
        save_file = os.path.join(TMP_DIR,
                                 'checkpoint_epoch{}.pth'.format(epoch))
        save_checkpoint(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            filename=save_file)
        scheduler.step()  # will adjust learning rate
        # save train/val loss/accuracy, save every epoch in case of early stop
        train_loss.append(tr_avg_loss)
        train_loss_detail += tr_detail_loss
Ejemplo n.º 32
0
def main(env_name, num_episodes, gamma, lam, kl_targ, batch_size, eval):
    """ Main training loop

    Args:
        env_name: OpenAI Gym environment name, e.g. 'Hopper-v1'
        num_episodes: maximum number of episodes to run
        gamma: reward discount factor (float)
        lam: lambda from Generalized Advantage Estimate
        kl_targ: D_KL target for policy update [D_KL(pi_old || pi_new)
        batch_size: number of episodes per policy training batch
    """

    if eval:
        print("Evaluating: ")
        evaluate(env_name, num_episodes)
        exit()

    killer = GracefulKiller()
    env, obs_dim, act_dim = init_gym(env_name)
    now = datetime.utcnow().strftime(
        "%b-%d_%H:%M:%S")  # create unique directories
    logger = Logger(logname=env_name, now=now)
    aigym_path = os.path.join('/tmp', env_name, now)
    #env = wrappers.Monitor(env, aigym_path, force=True)
    scaler = Scaler(obs_dim)
    val_func = NNValueFunction(obs_dim)
    policy = Policy(obs_dim, act_dim, kl_targ)
    #policy.restore_weights() ## -------------
    #val_func.restore_weights() ## -------------
    # run a few episodes of untrained policy to initialize scaler:
    run_policy(env, policy, scaler, logger, episodes=5)
    episode = 0
    while episode < num_episodes:
        trajectories = run_policy(env,
                                  policy,
                                  scaler,
                                  logger,
                                  episodes=batch_size)
        episode += len(trajectories)
        add_value(trajectories, val_func)  # add estimated values to episodes
        add_disc_sum_rew(trajectories,
                         gamma)  # calculated discounted sum of Rs
        add_gae(trajectories, gamma, lam)  # calculate advantage
        # concatenate all episodes into single NumPy arrays
        observes, actions, advantages, disc_sum_rew = build_train_set(
            trajectories)
        # add various stats to training log:
        log_batch_stats(observes, actions, advantages, disc_sum_rew, logger,
                        episode)
        policy.update(observes, actions, advantages, logger)  # update policy
        val_func.fit(observes, disc_sum_rew, logger)  # update value function
        logger.write(display=True)  # write logger results to file and stdout

        if killer.kill_now:
            if input('Terminate training (y/[n])? ') == 'y':
                break
            killer.kill_now = False

    print("Scaler vars,means: ")
    print(scaler.vars, scaler.means)

    for i in range(3):
        run_episode(env, policy, scaler, animate=True)

    #policy.save_weights()
    #val_func.save_weights()

    #WARNING: scaler is disabled

    logger.close()
    policy.close_sess()
    val_func.close_sess()
Ejemplo n.º 33
0
     temp_method = TemporalRandomCrop(opt.sample_duration)
 elif opt.train_temporal == 'ranpad':
     temp_method = TemporalPadRandomCrop(opt.sample_duration,
                                         opt.temporal_pad)
 temporal_transform = Compose([temp_method])
 target_transform = ClassLabel()
 training_data = get_training_set(opt, spatial_transform,
                                  temporal_transform, target_transform)
 # pdb.set_trace()
 train_loader = torch.utils.data.DataLoader(training_data,
                                            batch_size=opt.batch_size,
                                            shuffle=True,
                                            num_workers=opt.n_threads,
                                            pin_memory=True)
 train_logger = Logger(
     os.path.join(opt.result_path,
                  'train_{}.log'.format(opt.store_name)),
     ['epoch', 'loss', 'acc', 'precision', 'recall', 'lr'])
 train_batch_logger = Logger(
     os.path.join(opt.result_path,
                  'train_batch_{}.log'.format(opt.store_name)), [
                      'epoch', 'batch', 'iter', 'loss', 'acc',
                      'precision', 'recall', 'lr'
                  ])
 train_tensorboard_writer = SummaryWriter(
     os.path.join(opt.result_path, "tensorboard"))
 if opt.nesterov:
     dampening = 0
 else:
     dampening = opt.dampening
 # optimizer = optim.SGD(
 #     parameters,
Ejemplo n.º 34
0
import torch.nn.functional as F

# Hyper Parameters
input_size = 784
hidden_size = 1000
num_classes = 10
num_epochs = 20
batch_size = 100
learning_rate = 0.01

I = 3
I = float(I)
D = 100
D = float(D)

logger = Logger('pid.txt', title='mnist')
logger.set_names(
    ['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])

# MNIST Dataset
train_dataset = dsets.MNIST(root='./data',
                            train=True,
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./data',
                           train=False,
                           transform=transforms.ToTensor())

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
def main():
    args = parser.parse_args()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    if args.remark != None:
        args.remark = args.remark
    else:
        args.remark = args.dataset + "-" + args.task + "-" + args.norm

    if args.dataset == "shapenet":
        args.num_class = 16
    else:
        args.num_class = 40

    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    # experiment_dir = Path('./exp/v2/')
    experiment_dir = Path('/data-x/g12/zhangjie/3dIP/exp3.0/v2')
    # experiment_dir = Path('/data-x/g10/zhangjie/3D/exp/v2')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath(args.remark + "_" + timestr)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG_curve'''
    title = args.dataset + "-" + args.task + "-" + args.norm
    logger_loss = Logger(os.path.join(log_dir, 'log_loss.txt'), title=title)
    logger_loss.set_names([
        'Train AVE Loss', 'Train Public Loss', 'Train Private Loss',
        'Valid AVE Loss', 'Valid Public Loss', 'Valid Private Loss'
    ])
    logger_acc = Logger(os.path.join(log_dir, 'log_acc.txt'), title=title)
    logger_acc.set_names([
        'Train Public Acc.', 'Train Private Acc.', 'Test Public Acc.',
        'Test Private Acc.'
    ])
    '''LOG'''  #创建log文件
    logger = logging.getLogger("Model")  #log的名字
    # print("FFFFFFFF",logger) #<Logger Model (WARNING)>
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)  #log的最低等级
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)  #log文件名
    log_string('PARAMETER ...')
    # print("FFFFFFF",logger.info)  #<bound method Logger.info of <Logger Model (INFO)>>
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    if args.dataset == "shapenet":
        trainDataLoader = getData.get_dataLoader(train=True,
                                                 Shapenet=True,
                                                 batchsize=args.batch_size)
        testDataLoader = getData.get_dataLoader(train=False,
                                                Shapenet=True,
                                                batchsize=args.batch_size)
    else:
        trainDataLoader = getData.get_dataLoader(train=True,
                                                 Shapenet=False,
                                                 batchsize=args.batch_size)
        testDataLoader = getData.get_dataLoader(train=False,
                                                Shapenet=False,
                                                batchsize=args.batch_size)

    log_string('Finished ...')
    log_string('Load model ...')
    '''MODEL LOADING'''
    num_class = args.num_class
    MODEL = importlib.import_module(args.model)
    # 当在写代码时,我们希望能够根据传入的选项设置,如args.model来确定要导入使用的是哪个model.py文件,而不是一股脑地导入, 这种时候就需要用上python的动态导入模块

    # 复制model文件到exp——dir
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util.py', str(experiment_dir))
    shutil.copy('train_2_cls.py', str(experiment_dir))
    shutil.copy('./data/getData.py', str(experiment_dir))
    shutil.copytree('./models/layers', str(experiment_dir) + "/layers")

    classifier = MODEL.get_model(num_class, channel=3).cuda()
    criterion = MODEL.get_loss().cuda()

    pprint(classifier)

    try:
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []
    mean_correct2 = []
    mean_loss = []
    mean_loss1 = []
    mean_loss2 = []
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        time_start = datetime.datetime.now()
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            points = points.data.numpy()
            points = provider.random_point_dropout(
                points)  #provider是自己写的一个对点云操作的函数,随机dropout,置为第一个点的值
            points[:, :,
                   0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                   0:3])  #点的放缩
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :,
                                                                  0:3])  #点的偏移
            points = torch.Tensor(points)
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()

            for m in classifier.modules():
                if isinstance(m, SignLoss):
                    m.reset()

            loss1 = torch.tensor(0.).cuda()
            loss2 = torch.tensor(0.).cuda()
            sign_loss = torch.tensor(0.).cuda()

            for ind in range(2):
                if ind == 0:
                    pred, trans_feat = classifier(points, ind=ind)
                    loss1 = criterion(pred, target.long(), trans_feat)
                    mean_loss1.append(loss1.item() / float(points.size()[0]))
                    pred_choice = pred.data.max(1)[1]
                    correct = pred_choice.eq(target.long().data).cpu().sum()
                    mean_correct.append(correct.item() /
                                        float(points.size()[0]))

                else:
                    pred2, trans_feat2 = classifier(points, ind=ind)
                    loss2 = criterion(pred2, target.long(), trans_feat2)
                    mean_loss2.append(loss2.item() / float(points.size()[0]))
                    pred_choice2 = pred2.data.max(1)[1]
                    correct2 = pred_choice2.eq(target.long().data).cpu().sum()
                    mean_correct2.append(correct2.item() /
                                         float(points.size()[0]))

            for m in classifier.modules():
                if isinstance(m, SignLoss):
                    sign_loss += m.loss

            loss = args.beta * loss1 + loss2 + sign_loss
            mean_loss.append(loss.item() / float(points.size()[0]))

            # loss = loss2
            loss.backward()
            optimizer.step()
            global_step += 1

        train_instance_acc = np.mean(mean_correct)
        train_instance_acc2 = np.mean(mean_correct2)
        train_instance_acc_ave = (train_instance_acc + train_instance_acc2) / 2
        train_loss = np.mean(mean_loss) / 2
        train_loss1 = np.mean(mean_loss1)
        train_loss2 = np.mean(mean_loss2)

        log_string('Train Instance Public Accuracy: %f' % train_instance_acc)
        log_string('Train Instance Private Accuracy: %f' % train_instance_acc2)

        sign_acc = torch.tensor(0.).cuda()
        count = 0

        for m in classifier.modules():
            if isinstance(m, SignLoss):
                sign_acc += m.acc
                count += 1

        if count != 0:
            sign_acc /= count

        log_string('Sign Accuracy: %f' % sign_acc)

        with torch.no_grad():
            for ind in range(2):
                if ind == 0:
                    val_loss1, test_instance_acc1, class_acc1 = test(
                        classifier,
                        testDataLoader,
                        num_class=args.num_class,
                        ind=0)
                else:
                    val_loss2, test_instance_acc2, class_acc2 = test(
                        classifier,
                        testDataLoader,
                        num_class=args.num_class,
                        ind=1)

            log_string(
                'Test Instance Public Accuracy: %f, Class Public Accuracy: %f'
                % (test_instance_acc1, class_acc1))
            log_string(
                'Test Instance Private Accuracy: %f, Class Private Accuracy: %f'
                % (test_instance_acc2, class_acc2))

            val_loss = (val_loss1 + val_loss2) / 2
            test_instance_acc = (test_instance_acc1 + test_instance_acc2) / 2
            class_acc = (class_acc1 + class_acc2) / 2

            if (test_instance_acc >= best_instance_acc):
                best_instance_acc = test_instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string(
                'Test Instance Average Accuracy: %f, Class Average Accuracy: %f'
                % (test_instance_acc, class_acc))
            log_string(
                'Best Instance Average Accuracy: %f, Class Average Accuracy: %f'
                % (best_instance_acc, best_class_acc))

            if (test_instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s' % savepath)
                log_string('best_epoch %s' % str(best_epoch))
                state = {
                    'epoch': best_epoch,
                    'instance_acc': test_instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

        logger_loss.append([
            train_loss, train_loss1, train_loss2, val_loss, val_loss1,
            val_loss2
        ])
        logger_acc.append([
            train_instance_acc, train_instance_acc2, test_instance_acc1,
            test_instance_acc2
        ])

        time_end = datetime.datetime.now()
        time_span_str = str((time_end - time_start).seconds)
        log_string('Epoch time : %s S' % (time_span_str))

    logger_loss.close()
    logger_loss.plot()
    savefig(os.path.join(log_dir, 'log_loss.eps'))
    logger_acc.close()
    logger_acc.plot()
    savefig(os.path.join(log_dir, 'log_acc.eps'))

    log_string('best_epoch %s' % str(best_epoch))

    logger.info('End of training...')
Ejemplo n.º 36
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            baseWidth=args.base_width,
            cardinality=args.cardinality,
        )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
Ejemplo n.º 37
0
class Solver(object):
    def __init__(self, hps, data_loader, log_dir='./log/'):
        self.hps = hps
        self.data_loader = data_loader
        self.model_kept = []
        self.max_keep = 100
        self.build_model()
        self.logger = Logger(log_dir) if log_dir != None else None

    def build_model(self):
        hps = self.hps
        ns = self.hps.ns
        emb_size = self.hps.emb_size
        self.Encoder = cc(Encoder(ns=ns, dp=hps.enc_dp))
        self.Decoder = cc(Decoder(ns=ns, c_a=hps.n_speakers, emb_size=emb_size))
        self.Generator = cc(Decoder(ns=ns, c_a=hps.n_speakers, emb_size=emb_size))
        self.SpeakerClassifier = cc(SpeakerClassifier(ns=ns, n_class=hps.n_speakers, dp=hps.dis_dp))
        self.PatchDiscriminator = cc(nn.DataParallel(PatchDiscriminator(ns=ns, n_class=hps.n_speakers)))
        betas = (0.5, 0.9)
        params = list(self.Encoder.parameters()) + list(self.Decoder.parameters())
        self.ae_opt = optim.Adam(params, lr=self.hps.lr, betas=betas)
        self.clf_opt = optim.Adam(self.SpeakerClassifier.parameters(), lr=self.hps.lr, betas=betas)
        self.gen_opt = optim.Adam(self.Generator.parameters(), lr=self.hps.lr, betas=betas)
        self.patch_opt = optim.Adam(self.PatchDiscriminator.parameters(), lr=self.hps.lr, betas=betas)

    def save_model(self, model_path, iteration, enc_only=True):
        if not enc_only:
            all_model = {
                'encoder': self.Encoder.state_dict(),
                'decoder': self.Decoder.state_dict(),
                'generator': self.Generator.state_dict(),
                'classifier': self.SpeakerClassifier.state_dict(),
                'patch_discriminator': self.PatchDiscriminator.state_dict(),
            }
        else:
            all_model = {
                'encoder': self.Encoder.state_dict(),
                'decoder': self.Decoder.state_dict(),
                'generator': self.Generator.state_dict(),
            }
        new_model_path = '{}-{}'.format(model_path, iteration)
        with open(new_model_path, 'wb') as f_out:
            torch.save(all_model, f_out)
        self.model_kept.append(new_model_path)

        if len(self.model_kept) >= self.max_keep:
            os.remove(self.model_kept[0])
            self.model_kept.pop(0)

    def load_model(self, model_path, enc_only=True):
        print('load model from {}'.format(model_path))
        with open(model_path, 'rb') as f_in:
            all_model = torch.load(f_in)
            self.Encoder.load_state_dict(all_model['encoder'])
            self.Decoder.load_state_dict(all_model['decoder'])
            self.Generator.load_state_dict(all_model['generator'])
            if not enc_only:
                self.SpeakerClassifier.load_state_dict(all_model['classifier'])
                self.PatchDiscriminator.load_state_dict(all_model['patch_discriminator'])

    def set_eval(self):
        self.Encoder.eval()
        self.Decoder.eval()
        self.Generator.eval()
        self.SpeakerClassifier.eval()
        self.PatchDiscriminator.eval()

    def test_step(self, x, c, gen=False):
        self.set_eval()
        x = to_var(x).permute(0, 2, 1)
        enc = self.Encoder(x)
        x_tilde = self.Decoder(enc, c)
        if gen:
            x_tilde += self.Generator(enc, c)
        return x_tilde.data.cpu().numpy()

    def permute_data(self, data):
        C = to_var(data[0], requires_grad=False)
        X = to_var(data[1]).permute(0, 2, 1)
        return C, X

    def sample_c(self, size):
        n_speakers = self.hps.n_speakers
        c_sample = Variable(
                torch.multinomial(torch.ones(n_speakers), num_samples=size, replacement=True),  
                requires_grad=False)
        c_sample = c_sample.cuda() if torch.cuda.is_available() else c_sample
        return c_sample

    def encode_step(self, x):
        enc = self.Encoder(x)
        return enc

    def decode_step(self, enc, c):
        x_tilde = self.Decoder(enc, c)
        return x_tilde

    def patch_step(self, x, x_tilde, is_dis=True):
        D_real, real_logits = self.PatchDiscriminator(x, classify=True)
        D_fake, fake_logits = self.PatchDiscriminator(x_tilde, classify=True)
        if is_dis:
            w_dis = torch.mean(D_real - D_fake)
            gp = calculate_gradients_penalty(self.PatchDiscriminator, x, x_tilde)
            return w_dis, real_logits, gp
        else:
            return -torch.mean(D_fake), fake_logits

    def gen_step(self, enc, c):
        x_gen = self.Decoder(enc, c) + self.Generator(enc, c)
        return x_gen 

    def clf_step(self, enc):
        logits = self.SpeakerClassifier(enc)
        return logits

    def cal_loss(self, logits, y_true):
        # calculate loss 
        criterion = nn.CrossEntropyLoss()
        loss = criterion(logits, y_true)
        return loss

    def train(self, model_path, flag='train', mode='train'):
        if not os.path.isdir(model_path):
            os.makedirs(model_path)
            os.chmod(model_path, 0o755)
        model_path = os.path.join(model_path, 'model.pkl')
        # load hyperparams
        hps = self.hps
        if mode == 'pretrain_G':
            for iteration in range(hps.enc_pretrain_iters):
                data = next(self.data_loader)
                c, x = self.permute_data(data)
                # encode
                enc = self.encode_step(x)
                x_tilde = self.decode_step(enc, c)
                loss_rec = torch.mean(torch.abs(x_tilde - x))
                reset_grad([self.Encoder, self.Decoder])
                loss_rec.backward()
                grad_clip([self.Encoder, self.Decoder], self.hps.max_grad_norm)
                self.ae_opt.step()
                # tb info
                info = {
                    f'{flag}/pre_loss_rec': loss_rec.item(),
                }
                slot_value = (iteration + 1, hps.enc_pretrain_iters) + tuple([value for value in info.values()])
                log = 'pre_G:[%06d/%06d], loss_rec=%.3f'
                print(log % slot_value)
                if iteration % 100 == 0:
                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, iteration + 1)
        elif mode == 'pretrain_D':
            for iteration in range(hps.dis_pretrain_iters):
                data = next(self.data_loader)
                c, x = self.permute_data(data)
                # encode
                enc = self.encode_step(x)
                # classify speaker
                logits = self.clf_step(enc)
                loss_clf = self.cal_loss(logits, c)
                # update 
                reset_grad([self.SpeakerClassifier])
                loss_clf.backward()
                grad_clip([self.SpeakerClassifier], self.hps.max_grad_norm)
                self.clf_opt.step()
                # calculate acc
                acc = cal_acc(logits, c)
                info = {
                    f'{flag}/pre_loss_clf': loss_clf.item(),
                    f'{flag}/pre_acc': acc,
                }
                slot_value = (iteration + 1, hps.dis_pretrain_iters) + tuple([value for value in info.values()])
                log = 'pre_D:[%06d/%06d], loss_clf=%.2f, acc=%.2f'
                print(log % slot_value)
                if iteration % 100 == 0:
                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, iteration + 1)
        elif mode == 'patchGAN':
            for iteration in range(hps.patch_iters):
                #=======train D=========#
                for step in range(hps.n_patch_steps):
                    data = next(self.data_loader)
                    c, x = self.permute_data(data)
                    ## encode
                    enc = self.encode_step(x)
                    # sample c
                    c_prime = self.sample_c(x.size(0))
                    # generator
                    x_tilde = self.gen_step(enc, c_prime)
                    # discriminstor
                    w_dis, real_logits, gp = self.patch_step(x, x_tilde, is_dis=True)
                    # aux classification loss 
                    loss_clf = self.cal_loss(real_logits, c)
                    loss = -hps.beta_dis * w_dis + hps.beta_clf * loss_clf + hps.lambda_ * gp
                    reset_grad([self.PatchDiscriminator])
                    loss.backward()
                    grad_clip([self.PatchDiscriminator], self.hps.max_grad_norm)
                    self.patch_opt.step()
                    # calculate acc
                    acc = cal_acc(real_logits, c)
                    info = {
                        f'{flag}/w_dis': w_dis.item(),
                        f'{flag}/gp': gp.item(), 
                        f'{flag}/real_loss_clf': loss_clf.item(),
                        f'{flag}/real_acc': acc, 
                    }
                    slot_value = (step, iteration+1, hps.patch_iters) + tuple([value for value in info.values()])
                    log = 'patch_D-%d:[%06d/%06d], w_dis=%.2f, gp=%.2f, loss_clf=%.2f, acc=%.2f'
                    print(log % slot_value)
                    if iteration % 100 == 0:
                        for tag, value in info.items():
                            self.logger.scalar_summary(tag, value, iteration + 1)
                #=======train G=========#
                data = next(self.data_loader)
                c, x = self.permute_data(data)
                # encode
                enc = self.encode_step(x)
                # sample c
                c_prime = self.sample_c(x.size(0))
                # generator
                x_tilde = self.gen_step(enc, c_prime)
                # discriminstor
                loss_adv, fake_logits = self.patch_step(x, x_tilde, is_dis=False)
                # aux classification loss 
                loss_clf = self.cal_loss(fake_logits, c_prime)
                loss = hps.beta_clf * loss_clf + hps.beta_gen * loss_adv
                reset_grad([self.Generator])
                loss.backward()
                grad_clip([self.Generator], self.hps.max_grad_norm)
                self.gen_opt.step()
                # calculate acc
                acc = cal_acc(fake_logits, c_prime)
                info = {
                    f'{flag}/loss_adv': loss_adv.item(),
                    f'{flag}/fake_loss_clf': loss_clf.item(),
                    f'{flag}/fake_acc': acc, 
                }
                slot_value = (iteration+1, hps.patch_iters) + tuple([value for value in info.values()])
                log = 'patch_G:[%06d/%06d], loss_adv=%.2f, loss_clf=%.2f, acc=%.2f'
                print(log % slot_value)
                if iteration % 100 == 0:
                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, iteration + 1)
                if iteration % 1000 == 0 or iteration + 1 == hps.patch_iters:
                    self.save_model(model_path, iteration + hps.iters)
        elif mode == 'train':
            for iteration in range(hps.iters):
                # calculate current alpha
                if iteration < hps.lat_sched_iters:
                    current_alpha = hps.alpha_enc * (iteration / hps.lat_sched_iters)
                else:
                    current_alpha = hps.alpha_enc
                #==================train D==================#
                for step in range(hps.n_latent_steps):
                    data = next(self.data_loader)
                    c, x = self.permute_data(data)
                    # encode
                    enc = self.encode_step(x)
                    # classify speaker
                    logits = self.clf_step(enc)
                    loss_clf = self.cal_loss(logits, c)
                    loss = hps.alpha_dis * loss_clf
                    # update 
                    reset_grad([self.SpeakerClassifier])
                    loss.backward()
                    grad_clip([self.SpeakerClassifier], self.hps.max_grad_norm)
                    self.clf_opt.step()
                    # calculate acc
                    acc = cal_acc(logits, c)
                    info = {
                        f'{flag}/D_loss_clf': loss_clf.item(),
                        f'{flag}/D_acc': acc,
                    }
                    slot_value = (step, iteration + 1, hps.iters) + tuple([value for value in info.values()])
                    log = 'D-%d:[%06d/%06d], loss_clf=%.2f, acc=%.2f'
                    print(log % slot_value)
                    if iteration % 100 == 0:
                        for tag, value in info.items():
                            self.logger.scalar_summary(tag, value, iteration + 1)
                #==================train G==================#
                data = next(self.data_loader)
                c, x = self.permute_data(data)
                # encode
                enc = self.encode_step(x)
                # decode
                x_tilde = self.decode_step(enc, c)
                loss_rec = torch.mean(torch.abs(x_tilde - x))
                # classify speaker
                logits = self.clf_step(enc)
                acc = cal_acc(logits, c)
                loss_clf = self.cal_loss(logits, c)
                # maximize classification loss
                loss = loss_rec - current_alpha * loss_clf
                reset_grad([self.Encoder, self.Decoder])
                loss.backward()
                grad_clip([self.Encoder, self.Decoder], self.hps.max_grad_norm)
                self.ae_opt.step()
                info = {
                    f'{flag}/loss_rec': loss_rec.item(),
                    f'{flag}/G_loss_clf': loss_clf.item(),
                    f'{flag}/alpha': current_alpha,
                    f'{flag}/G_acc': acc,
                }
                slot_value = (iteration + 1, hps.iters) + tuple([value for value in info.values()])
                log = 'G:[%06d/%06d], loss_rec=%.3f, loss_clf=%.2f, alpha=%.2e, acc=%.2f'
                print(log % slot_value)
                if iteration % 100 == 0:
                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, iteration + 1)
                if iteration % 1000 == 0 or iteration + 1 == hps.iters:
                    self.save_model(model_path, iteration)
Ejemplo n.º 38
0
def main():
    parser = argparse.ArgumentParser(description='GraphSAGE')
    parser.add_argument("--dataset", type=str)
    parser.add_argument("--device", type=int, default=0)
    parser.add_argument("--dropout",
                        type=float,
                        default=0.5,
                        help="dropout probability")
    parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
    parser.add_argument("--epochs",
                        type=int,
                        default=200,
                        help="number of training epochs")
    parser.add_argument("--n-hidden",
                        type=int,
                        default=16,
                        help="number of hidden gcn units")
    parser.add_argument("--aggr",
                        type=str,
                        choices=['sum', 'mean'],
                        default='mean',
                        help='Aggregation for messages')
    parser.add_argument("--weight-decay",
                        type=float,
                        default=5e-4,
                        help="Weight for L2 loss")
    parser.add_argument("--eval",
                        action='store_true',
                        help='If not set, we will only do the training part.')
    parser.add_argument("--runs", type=int, default=10)
    args = parser.parse_args()
    print(args)

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    path = osp.join('dataset', args.dataset)
    dataset = Planetoid(path, args.dataset, transform=T.NormalizeFeatures())
    data = dataset[0]

    features = data.x.to(device)
    labels = data.y.to(device)
    edge_index = data.edge_index.to(device)
    adj = SparseTensor(row=edge_index[0], col=edge_index[1])
    train_mask = torch.BoolTensor(data.train_mask).to(device)
    val_mask = torch.BoolTensor(data.val_mask).to(device)
    test_mask = torch.BoolTensor(data.test_mask).to(device)

    model = GraphSAGE(dataset.num_features, args.n_hidden, dataset.num_classes,
                      args.aggr, F.relu, args.dropout).to(device)

    loss_fcn = nn.CrossEntropyLoss()

    logger = Logger(args.runs, args)
    dur = []
    for run in range(args.runs):
        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
        for epoch in range(1, args.epochs + 1):
            model.train()
            if epoch >= 3:
                t0 = time.time()
            # forward
            logits = model(features, adj)
            loss = loss_fcn(logits[train_mask], labels[train_mask])

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if epoch >= 3:
                dur.append(time.time() - t0)
                print('Training time/epoch {}'.format(np.mean(dur)))

            if not args.eval:
                continue

            train_acc, val_acc, test_acc = evaluate(model, features, adj,
                                                    labels, train_mask,
                                                    val_mask, test_mask)
            logger.add_result(run, (train_acc, val_acc, test_acc))

            print(
                "Run {:02d} | Epoch {:05d} | Loss {:.4f} | Train {:.4f} | Val {:.4f} | Test {:.4f}"
                .format(run, epoch, loss.item(), train_acc, val_acc, test_acc))

        if args.eval:
            logger.print_statistics(run)

    if args.eval:
        logger.print_statistics()
Ejemplo n.º 39
0
batch_size = 1
learning_rate = 0.01
momentum = 0.9
weight_decay = 0.0001
nesterov = True

file = h5py.File(os.path.join(data_path,'ExtractedFrames.h5'),'r')
n_frames = file['X'].shape[0]
shuffle = np.random.permutation(np.arange(n_frames))
train_test_split = int(shuffle.shape[0]*train_test_split) 

#train_loader = Generator(file,batch_size=batch_size, idcs=shuffle[0:train_test_split])#,n_frames = 5)
#val_loader = Generator(file,batch_size=batch_size, idcs=shuffle[train_test_split:])#,n_frames = 5)

train_logger = Logger(
os.path.join(data_path, 'train.log'),
['epoch', 'loss', 'acc', 'lr'])

train_batch_logger = Logger(
os.path.join(data_path, 'train_batch.log'),
['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

val_logger = Logger(
os.path.join(data_path, 'test.log'),
['epoch', 'loss', 'acc', 'lr'])


#res2d_pruned =  nn.Sequential(*list(resnet18.children())[2:-4])
#res3d_pruned = nn.Sequential(*(list(model.children())[:5]))#+(list(inter1.children()))))#+list(resnet18.children())[2:-4]))
#inter1 = Inter_Layers()
Ejemplo n.º 40
0
transforms_ = [
    transforms.Resize(int(opt.size * 1.12), Image.BICUBIC),
    transforms.RandomCrop(opt.size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
dataloader = DataLoader(ImageDataset(opt.dataroot,
                                     transforms_=transforms_,
                                     unaligned=True),
                        batch_size=opt.batchSize,
                        shuffle=True,
                        num_workers=opt.n_cpu)

# Loss plot
logger = Logger(opt.n_epochs, len(dataloader))
###################################

###### Training ######
for epoch in range(opt.epoch, opt.n_epochs):
    for i, batch in enumerate(dataloader):
        # Set model input
        real_A = Variable(input_A.copy_(batch['A']))
        real_B = Variable(input_B.copy_(batch['B']))

        ###### Generators A2B and B2A ######
        optimizer_G.zero_grad()

        # Identity loss
        # G_A2B(B) should equal B if real B is fed
        same_B = netG_A2B(real_B)
Ejemplo n.º 41
0
            training_data_summe, training_data_etmd, training_data_avad
        ])

        opt.batch_sizes = {
            'global': opt.effective_batch_size,
            'sal': opt.effective_batch_size
        }

        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   drop_last=True,
                                                   pin_memory=True)
        train_logger = Logger(
            os.path.join(opt.result_path, 'train.log'),
            ['epoch', 'loss', 'loss_sal', 'sal_cross', 'cc', 'nss', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'cc', 'nss', 'lr'])

        if opt.nesterov:
            dampening = 0
        else:
            dampening = opt.dampening

        optimizer = {'global': [], 'sal': [], 'sound': [], 'fusion': []}
        optimizer['global'] = optim.SGD(parameters['global'],
                                        lr=opt.learning_rate_global,
                                        momentum=opt.momentum,
                                        dampening=dampening,
Ejemplo n.º 42
0
def main():
    global best_top1, best_top5
    
    args.world_size = 1
    
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code    
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    crop_size = 224
    val_size = 256

    pipe = HybridTrainPipe(batch_size=args.train_batch, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir, crop=crop_size, dali_cpu=args.dali_cpu)
    pipe.build()
    train_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    pipe = HybridValPipe(batch_size=args.test_batch, num_threads=args.workers, device_id=args.local_rank, data_dir=valdir, crop=crop_size, size=val_size)
    pipe.build()
    val_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
                    baseWidth=args.base_width,
                    cardinality=args.cardinality,
                )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    if args.optimizer.lower() == 'sgd':
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    elif args.optimizer.lower() == 'adamw':
        optimizer = AdamW(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay, warmup = 0)
    elif args.optimizer.lower() == 'radam':
        optimizer = RAdam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)
    elif args.optimizer.lower() == 'lsadam': 
        optimizer = LSAdamW(model.parameters(), lr=args.lr*((1.+4.*args.sigma)**(0.25)), 
                           betas=(args.beta1, args.beta2),
                           weight_decay=args.weight_decay, 
                           sigma=args.sigma)
    elif args.optimizer.lower() == 'lsradam':
        sigma = 0.1
        optimizer = LSRAdam(model.parameters(), lr=args.lr*((1.+4.*args.sigma)**(0.25)), 
                           betas=(args.beta1, args.beta2),
                           weight_decay=args.weight_decay, 
                           sigma=args.sigma)
    elif args.optimizer.lower() == 'srsgd':
        iter_count = 1
        optimizer = SGD_Adaptive(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, iter_count=iter_count, restarting_iter=args.restart_schedule[0])
    elif args.optimizer.lower() == 'sradam':
        iter_count = 1
        optimizer = SRNAdam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, restarting_iter=args.restart_schedule[0]) 
    elif args.optimizer.lower() == 'sradamw':
        iter_count = 1
        optimizer = SRAdamW(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[0]) 
    elif args.optimizer.lower() == 'srradam':
        #NOTE: need to double-check this
        iter_count = 1
        optimizer = SRRAdam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[0]) 
        
    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_top1 = checkpoint['best_top1']
        best_top5 = checkpoint['best_top5']
        start_epoch = checkpoint['epoch'] - 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        iter_count = optimizer.param_groups[0]['iter_count']
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Top1', 'Valid Top1', 'Train Top5', 'Valid Top5'])
        
    logger.file.write('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))


    if args.evaluate:
        logger.file.write('\nEvaluation only')
        test_loss, test_top1, test_top5 = test(val_loader, model, criterion, start_epoch, use_cuda, logger)
        logger.file.write(' Test Loss:  %.8f, Test Top1:  %.2f, Test Top5: %.2f' % (test_loss, test_top1, test_top5))
        return

    # Train and val
    schedule_index = 1
    for epoch in range(start_epoch, args.epochs):
        if args.optimizer.lower() == 'srsgd':
            if epoch in args.schedule:
                optimizer = SGD_Adaptive(model.parameters(), lr=args.lr * (args.gamma**schedule_index), weight_decay=args.weight_decay, iter_count=iter_count, restarting_iter=args.restart_schedule[schedule_index])
                schedule_index += 1
                
        elif args.optimizer.lower() == 'sradam':
            if epoch in args.schedule:
                optimizer = SRNAdam(model.parameters(), lr=args.lr * (args.gamma**schedule_index), betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, restarting_iter=args.restart_schedule[schedule_index]) 
                schedule_index += 1
                
        elif args.optimizer.lower() == 'sradamw':
            if epoch in args.schedule:
                optimizer = SRAdamW(model.parameters(), lr=args.lr * (args.gamma**schedule_index), betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[schedule_index])
                schedule_index += 1
                
        elif args.optimizer.lower() == 'srradam':
            if epoch in args.schedule:
                optimizer = SRRAdam(model.parameters(), lr=args.lr * (args.gamma**schedule_index), betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[schedule_index])
                schedule_index += 1
            
        else:
            adjust_learning_rate(optimizer, epoch)

        logger.file.write('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
        
        if args.optimizer.lower() == 'srsgd' or args.optimizer.lower() == 'sradam' or args.optimizer.lower() == 'sradamw' or args.optimizer.lower() == 'srradam':
            train_loss, train_top1, train_top5, iter_count = train(train_loader, model, criterion, optimizer, epoch, use_cuda, logger)
        else:
            train_loss, train_top1, train_top5 = train(train_loader, model, criterion, optimizer, epoch, use_cuda, logger)

        test_loss, test_top1, test_top5 = test(val_loader, model, criterion, epoch, use_cuda, logger)

        # append logger file
        logger.append([state['lr'], train_loss, test_loss, train_top1, test_top1, train_top5, test_top5])

        writer.add_scalars('train_loss', {args.model_name: train_loss}, epoch)
        writer.add_scalars('test_loss', {args.model_name: test_loss}, epoch)
        writer.add_scalars('train_top1', {args.model_name: train_top1}, epoch)
        writer.add_scalars('test_top1', {args.model_name: test_top1}, epoch)
        writer.add_scalars('train_top5', {args.model_name: train_top5}, epoch)
        writer.add_scalars('test_top5', {args.model_name: test_top5}, epoch)

        # save model
        is_best = test_top1 > best_top1
        best_top1 = max(test_top1, best_top1)
        best_top5 = max(test_top5, best_top5)
        save_checkpoint({
                'epoch': epoch + 1,
                'schedule_index': schedule_index,
                'state_dict': model.state_dict(),
                'top1': test_top1,
                'top5': test_top5,
                'best_top1': best_top1,
                'best_top5': best_top5,
                'optimizer' : optimizer.state_dict(),
            }, is_best, epoch, checkpoint=args.checkpoint)
        
        # reset DALI iterators
        train_loader.reset()
        val_loader.reset()
        
    logger.file.write('Best top1: %f'%best_top1)
    logger.file.write('Best top5: %f'%best_top5)
    
    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best top1: %f'%best_top1)
    print('Best top5: %f'%best_top5)
    
    with open("./all_results_imagenet.txt", "a") as f:
        fcntl.flock(f, fcntl.LOCK_EX)
        f.write("%s\n"%args.checkpoint)
        f.write("best_top1 %f, best_top5 %f\n\n"%(best_top1,best_top5))
        fcntl.flock(f, fcntl.LOCK_UN)
Ejemplo n.º 43
0
    def send(self, time, packet, origin, from_free=False):
        """
        Sends a packet to a destination.

        Args:
            time (int):                The time at which the packet was sent.
            packet (Packet):           The packet.
            origin (Host|Router):      The node origin of the packet.
        """
        origin_id = self.get_direction_by_node(origin)
        dst_id = 3 - origin_id
        destination = self.get_node_by_direction(dst_id)
        if self.in_use or self.packets_on_link[origin_id] != []:
            if self.current_dir is not None:
                Logger.debug(
                    time, "Link %s in use, currently sending to node "
                    "%d (trying to send %s)" %
                    (self.id, self.current_dir, packet))
            else:
                Logger.debug(
                    time, "Link %s in use, currently sending to node "
                    "%d (trying to send %s)" % (self.id, origin_id, packet))
            if self.buffer.size() >= self.buffer_size:
                # Drop packet if buffer is full
                Logger.debug(time, "Buffer full; packet %s dropped." % packet)
                self.dispatch(DroppedPacketEvent(time, self.id))
                return
            self.buffer.add_to_buffer(packet, dst_id, time)
        else:
            if not from_free and self.buffer.buffers[dst_id] != []:
                # Since events are not necessarily executed in the order we
                # would expect, there may be a case where the link was free
                # (nothing on the other side and nothing currently being put
                # on) but the actual event had not yet fired.
                #
                # In such a case, the buffer will not have been popped from
                # yet, so put the packet we want to send on the buffer and
                # take the first packet instead.
                self.buffer.add_to_buffer(packet, dst_id, time)
                packet = self.buffer.pop_from_buffer(dst_id, time)
            Logger.debug(
                time, "Link %s free, sending packet %s to %s" %
                (self.id, packet, destination))
            self.in_use = True
            self.current_dir = dst_id
            transmission_delay = self.transmission_delay(packet)

            self.dispatch(
                PacketSentOverLinkEvent(time, packet, destination, self))

            # Link will be free to send to same spot once packet has passed
            # through fully, but not to send from the current destination until
            # the packet has completely passed.
            # Transmission delay is delay to put a packet onto the link
            self.dispatch(
                LinkFreeEvent(time + transmission_delay, self, dst_id, packet))
            self.dispatch(
                LinkFreeEvent(time + transmission_delay + self.delay, self,
                              self.get_other_id(dst_id), packet))
            self.update_link_throughput(time, packet,
                                        time + transmission_delay + self.delay)
Ejemplo n.º 44
0
def main():
    global best_acc
    # global record
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('===> Preparing dataset cifar10...')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    dataloader = datasets.CIFAR10
    num_classes = 10

    trainset = dataloader(root='./data',
                          train=True,
                          download=True,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root='./data',
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> Creating Model ResNet")
    model = resnet.ResNet()
    print("==> Create Successfully!")
    # print(model)

    if not args.fixbit:
        print("    Train both model and bitwise parameters...")
    else:
        print(
            "    Train model with trained fixed bit number and parameters...")

    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'cifar-10-' + 'ResNet-with-BIB'
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        resnet.RecordActivation = False
        resnet.FirstEpoch = False
        if not args.fixbit:
            if epoch == start_epoch:
                resnet.FirstEpoch = False
            else:
                resnet.FirstEpoch = False
        else:
            resnet.FirstEpoch = False
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(trainloader, model, criterion, optimizer,
                                      epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

        if not args.fixbit:
            calculate_optimal_alpha.cal_quant()

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
Ejemplo n.º 45
0
def main():
    parser = argparse.ArgumentParser(description='OGBN-MolHiv')
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--log_steps', type=int, default=1)
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--num_layers', type=int, default=4)
    parser.add_argument('--hidden_size', type=int, default=128)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--epochs', type=int, default=200)
    parser.add_argument('--runs', type=int, default=10)
    parser.add_argument('--eval', action='store_true',
                        help='If not set, we will only do the training part.')
    parser.add_argument('--eval_batch_size', type=int, default=2048)
    args = parser.parse_args()
    print(args)

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    dataset = TUDataset('dataset', name='ENZYMES', use_node_attr=True)
    dataset = dataset.shuffle()

    train_loader = DataLoader(dataset[:len(dataset) // 10 * 8], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
    val_loader = DataLoader(dataset[len(dataset) // 10 * 8 : len(dataset) // 10 * 9], batch_size=args.eval_batch_size, shuffle=False, num_workers=0)
    test_loader = DataLoader(dataset[len(dataset) // 10 * 9:], batch_size=args.eval_batch_size, shuffle=False, num_workers=0)

    model = GCN(dataset.num_features,
                args.hidden_size,
                num_classes=dataset.num_classes,
                num_layers=args.num_layers,
                dropout=args.dropout).to(device)

    logger = Logger(args.runs, args)
    dur = []
    for run in range(args.runs):
        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

        for epoch in range(1, args.epochs + 1):
            t0 = time.time()
            loss = train(model, device, train_loader, optimizer)
            if epoch >= 3:
                dur.append(time.time() - t0)
                print('Training time/epoch {}'.format(np.mean(dur)))

            if not args.eval:
                continue

            val_acc = test(model, device, val_loader)
            test_acc = test(model, device, test_loader)
            logger.add_result(run, (0.0, val_acc, test_acc))

            if epoch % args.log_steps == 0:
                print(f'Run: {run + 1:02d}, '
                      f'Epoch: {epoch:02d}, '
                      f'Loss: {loss:.4f}, '
                      f'Valid: {val_acc * 100:.2f}% '
                      f'Test: {test_acc * 100:.2f}%')

        if args.eval:
            logger.print_statistics(run)
    if args.eval:
        logger.print_statistics()
Ejemplo n.º 46
0
def main():
    global best_prec1, args

    args.gpu = 0
    args.world_size = 1

    if args.distributed:
        args.gpu = args.local_rank % torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        args.world_size = torch.distributed.get_world_size()

    args.total_batch_size = args.world_size * args.batch_size

    if not os.path.isdir(args.checkpoint) and args.local_rank == 0:
        mkdir_p(args.checkpoint)

    if args.fp16:
        assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."

    if args.static_loss_scale != 1.0:
        if not args.fp16:
            print("Warning:  if --fp16 is not used, static_loss_scale will be ignored.")

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    model = model.cuda()
    if args.fp16:
        model = network_to_half(model)
    if args.distributed:
        # shared param/delay all reduce turns off bucketing in DDP, for lower latency runs this can improve perf
        # for the older version of APEX please use shared_param, for newer one it is delay_allreduce
        model = DDP(model, delay_allreduce=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    if args.fp16:
        optimizer = FP16_Optimizer(optimizer,
                                   static_loss_scale=args.static_loss_scale,
                                   dynamic_loss_scale=args.dynamic_loss_scale)

    # optionally resume from a checkpoint
    title = 'ImageNet-' + args.arch
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu))
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
            if args.local_rank == 0:
                logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        if args.local_rank == 0:
            logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
            logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.', 'Valid Top5.'])

    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')

    if(args.arch == "inception_v3"):
        crop_size = 299
        val_size = 320 # I chose this value arbitrarily, we can adjust.
    else:
        crop_size = 224
        val_size = 256

    pipe = HybridTrainPipe(batch_size=args.batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir, crop=crop_size, dali_cpu=args.dali_cpu)
    pipe.build()
    train_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    pipe = HybridValPipe(batch_size=args.batch_size, num_threads=8, device_id=args.local_rank, data_dir=valdir, crop=crop_size, size=val_size)
    pipe.build()
    val_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    total_time = AverageMeter()
    for epoch in range(args.start_epoch, args.epochs):
        # train for one epoch
        adjust_learning_rate(optimizer, epoch,args)

        if args.local_rank == 0:
            print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, optimizer.param_groups[0]['lr']))

        [train_loss, train_acc, avg_train_time] = train(train_loader, model, criterion, optimizer, epoch)
        total_time.update(avg_train_time)
        # evaluate on validation set
        [test_loss, prec1, prec5] = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        if args.local_rank == 0:
            # append logger file
            logger.append([optimizer.param_groups[0]['lr'], train_loss, test_loss, train_acc, prec1, prec5])

            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best,checkpoint=args.checkpoint)
            if epoch == args.epochs - 1:
                print('##Top-1 {0}\n'
                      '##Top-5 {1}\n'
                      '##Perf  {2}'.format(prec1, prec5, args.total_batch_size / total_time.avg))

        # reset DALI iterators
        train_loader.reset()
        val_loader.reset()

    if args.local_rank == 0:
        logger.close()
Ejemplo n.º 47
0
def main(args):
    torch.manual_seed(args.rnd_seed)
    np.random.seed(args.rnd_seed)
    random.seed(args.rnd_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    multitask_data = set(['ppi'])
    multitask = args.dataset in multitask_data

    # load and preprocess dataset
    data = load_data(args)

    train_nid = np.nonzero(data.train_mask)[0].astype(np.int64)

    # Normalize features
    if args.normalize:
        train_feats = data.features[train_nid]
        scaler = sklearn.preprocessing.StandardScaler()
        scaler.fit(train_feats)
        features = scaler.transform(data.features)
    else:
        features = data.features

    features = torch.FloatTensor(features)
    if not multitask:
        labels = torch.LongTensor(data.labels)
    else:
        labels = torch.FloatTensor(data.labels)
    if hasattr(torch, 'BoolTensor'):
        train_mask = torch.BoolTensor(data.train_mask)
        val_mask = torch.BoolTensor(data.val_mask)
        test_mask = torch.BoolTensor(data.test_mask)
    else:
        train_mask = torch.ByteTensor(data.train_mask)
        val_mask = torch.ByteTensor(data.val_mask)
        test_mask = torch.ByteTensor(data.test_mask)
    in_feats = features.shape[1]
    n_classes = data.num_labels
    n_edges = data.graph.number_of_edges()

    n_train_samples = train_mask.int().sum().item()
    n_val_samples = val_mask.int().sum().item()
    n_test_samples = test_mask.int().sum().item()

    print("""----Data statistics------'
    #Edges %d
    #Classes %d
    #Train samples %d
    #Val samples %d
    #Test samples %d""" %
            (n_edges, n_classes,
            n_train_samples,
            n_val_samples,
            n_test_samples))
    # create GCN model
    g = data.graph
    if args.self_loop and not args.dataset.startswith('reddit'):
        g.remove_edges_from(nx.selfloop_edges(g))
        g.add_edges_from(zip(g.nodes(), g.nodes()))
        print("adding self-loop edges")
    g = DGLGraph(g, readonly=True)

    # set device for dataset tensors
    if args.gpu < 0:
        cuda = False
    else:
        cuda = True
        torch.cuda.set_device(args.gpu)
        features = features.cuda()
        labels = labels.cuda()
        train_mask = train_mask.cuda()
        val_mask = val_mask.cuda()
        test_mask = test_mask.cuda()

    print(torch.cuda.get_device_name(0))

    g.ndata['features'] = features
    g.ndata['labels'] = labels
    g.ndata['train_mask'] = train_mask
    print('labels shape:', labels.shape)

    cluster_iterator = ClusterIter(
        args.dataset, g, args.psize, args.batch_size, train_nid, use_pp=args.use_pp)

    print("features shape, ", features.shape)

    model = GraphSAGE(in_feats,
                      args.n_hidden,
                      n_classes,
                      args.n_layers,
                      F.relu,
                      args.dropout,
                      args.use_pp)

    if cuda:
        model.cuda()

    # logger and so on
    log_dir = save_log_dir(args)
    writer = SummaryWriter(log_dir)
    logger = Logger(os.path.join(log_dir, 'loggings'))
    logger.write(args)

    # Loss function
    if multitask:
        print('Using multi-label loss')
        loss_f = nn.BCEWithLogitsLoss()
    else:
        print('Using multi-class loss')
        loss_f = nn.CrossEntropyLoss()

    # use optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # set train_nids to cuda tensor
    if cuda:
        train_nid = torch.from_numpy(train_nid).cuda()
    print("current memory after model before training",
          torch.cuda.memory_allocated(device=train_nid.device) / 1024 / 1024)
    start_time = time.time()
    best_f1 = -1

    for epoch in range(args.n_epochs):
        for j, cluster in enumerate(cluster_iterator):
            # sync with upper level training graph
            cluster.copy_from_parent()
            model.train()
            # forward
            pred = model(cluster)
            batch_labels = cluster.ndata['labels']
            batch_train_mask = cluster.ndata['train_mask']
            loss = loss_f(pred[batch_train_mask],
                          batch_labels[batch_train_mask])

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # in PPI case, `log_every` is chosen to log one time per epoch. 
            # Choose your log freq dynamically when you want more info within one epoch
            if j % args.log_every == 0:
                print(f"epoch:{epoch}/{args.n_epochs}, Iteration {j}/"
                      f"{len(cluster_iterator)}:training loss", loss.item())
                writer.add_scalar('train/loss', loss.item(),
                                  global_step=j + epoch * len(cluster_iterator))
        print("current memory:",
              torch.cuda.memory_allocated(device=pred.device) / 1024 / 1024)

        # evaluate
        if epoch % args.val_every == 0:
            val_f1_mic, val_f1_mac = evaluate(
                model, g, labels, val_mask, multitask)
            print(
                "Val F1-mic{:.4f}, Val F1-mac{:.4f}". format(val_f1_mic, val_f1_mac))
            if val_f1_mic > best_f1:
                best_f1 = val_f1_mic
                print('new best val f1:', best_f1)
                torch.save(model.state_dict(), os.path.join(
                    log_dir, 'best_model.pkl'))
            writer.add_scalar('val/f1-mic', val_f1_mic, global_step=epoch)
            writer.add_scalar('val/f1-mac', val_f1_mac, global_step=epoch)

    end_time = time.time()
    print(f'training using time {start_time-end_time}')

    # test
    if args.use_val:
        model.load_state_dict(torch.load(os.path.join(
            log_dir, 'best_model.pkl')))
    test_f1_mic, test_f1_mac = evaluate(
        model, g, labels, test_mask, multitask)
    print("Test F1-mic{:.4f}, Test F1-mac{:.4f}". format(test_f1_mic, test_f1_mac))
    writer.add_scalar('test/f1-mic', test_f1_mic)
    writer.add_scalar('test/f1-mac', test_f1_mac)
Ejemplo n.º 48
0
model = modelC(args).to(device)

if pretrained_path:
    print('Loading model {} from {}'.format(model.modelName, pretrained_path))
    model.load_state_dict(torch.load(pretrained_path + '/model.rar'))
    model._pz_params = model._pz_params

if not args.experiment:
    args.experiment = model.modelName

# set up run path
runId = datetime.datetime.now().isoformat()
experiment_dir = Path('../experiments/' + args.experiment)
experiment_dir.mkdir(parents=True, exist_ok=True)
runPath = mkdtemp(prefix=runId, dir=str(experiment_dir))
sys.stdout = Logger('{}/run.log'.format(runPath))
print('Expt:', runPath)
print('RunID:', runId)

# save args to run
with open('{}/args.json'.format(runPath), 'w') as fp:
    json.dump(args.__dict__, fp)
# -- also save object because we want to recover these for other things
torch.save(args, '{}/args.rar'.format(runPath))

# preparation for training
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=1e-3,
                       amsgrad=True)
train_loader, test_loader = model.getDataLoaders(args.batch_size,
                                                 device=device)
Ejemplo n.º 49
0
                acc += 1
            cnt += 1
        acc = acc * 100.0 / cnt
        self.log.info('the top5 accuracy of the dataset is: {}%'.format(acc))
        self.log.info('====== testing over ======')

    def save_model(self, path):
        torch.save(
            {
                'length': self.length,
                'feature_prob': self.feature_prob,
                'transform': self.transform
            }, path)
        self.log.info('model saved in path:{}'.format(os.path.abspath(path)))

    def load_model(self, path):
        params = torch.load(path)
        self.length = params['length']
        self.feature_prob = params['feature_prob']
        self.transform = params['transform']
        self.log.info('NaiveBayesian model loaded')


if __name__ == '__main__':
    log = Logger('logs/bayesian/Bayesian.log', level='debug')
    my_net = NaiveBayes(log.logger)
    my_net.fit(data_loader('.\\data'))
    my_net.test(data_loader('.\\test'))
    my_net.top5rate(data_loader('.\\test'))
    my_net.save_model('.\\Model\\naivebayes.pkl')
Ejemplo n.º 50
0
def main():
    logger = Logger(name="train", log_level="INFO", file_log=True)
    logger.info("start")

    train = pd.concat(
        [
            pd.read_pickle(f"../features/{cls.name}_train.pkl")
            for cls in get_features(globals())
        ],
        axis=1,
    )

    logger.info("loaded train features")

    test = pd.concat(
        [
            pd.read_pickle(f"../features/{cls.name}_test.pkl")
            for cls in get_features(globals())
        ],
        axis=1,
    )
    logger.info("loaded test features")

    target = pd.read_pickle("../input/target.pkl")

    categorical_cols = [
        "species",
        "owner",
        "countryof_origin",
        "mill",
        "ico_number",
        "company",
        "region",
        "producer",
        "bag_weight",
        "in_country_partner",
        "harvest_year",
        "grading_date",
        "owner1",
        "variety",
        "processing_method",
        "color",
        "expiration",
        "unit_of_measurement",
    ]

    cv = get_validator("stratified", n_splits=10, random_state=42)

    target_bin = pd.qcut(
        target["total_cup_points"], 50, duplicates="drop", labels=False
    )

    # target encording

    for target_col in target.columns:
        ce_te = ce.TargetEncoder(
            cols=categorical_cols,
            handle_missing="return_nan",
            handle_unknown="return_nan",
        )
        cv_ft = CVFeatureTransformer(cv=cv, transformer=ce_te, as_frame=True)
        cv_ft.fit(
            train.loc[:, categorical_cols], target[target_col], cv_target=target_bin,
        )
        train_te = cv_ft.transform(train.loc[:, categorical_cols]).add_prefix(
            f"te-{target_col}-"
        )
        train = pd.concat([train, train_te], axis=1)
        test_te = cv_ft.transform(test.loc[:, categorical_cols]).add_prefix(
            f"te-{target_col}-"
        )
        test = pd.concat([test, test_te], axis=1)

    logger.info("finish target encording")

    params = {
        "objective": "mae",
        "boosting_type": "gbdt",
        "num_leaves": 33,
        "cat_smooth": 5,
        "n_estimators": 10000,
        "learning_rate": 0.01,
        "random_state": 42,
        "importance_type": "gain",
    }

    fit_params = {
        "early_stopping_rounds": 20,
        "verbose": 500,
        "categorical_feature": categorical_cols,
    }

    fti = pd.DataFrame(index=train.columns, columns=target.columns)
    oof = pd.DataFrame(index=train.index, columns=target.columns)
    pred = pd.DataFrame(index=test.index, columns=target.columns)
    oof_scores = []
    for target_col in target.columns:
        logger.info(f"start train {target_col}")

        model = LgbModel(
            cv=cv,
            eval_metric=mae,
            params=params,
            fit_params=fit_params,
            type_of_target="regression",
            logger=logger,
            cv_target=target_bin,
        )
        model = model.fit(train, target[target_col])
        fti[target_col] = model.get_feature_importance()
        logger.info(model.oof_score)
        logger.info(model.oof_fold_scores)
        oof[target_col] = np.clip(model.oof, 0, 100)
        pred[target_col] = np.clip(model.predict(test), 0, 100)
        oof_scores.append(model.oof_score)

    score = np.mean(oof_scores)
    logger.info(target.columns)
    logger.info(f"oof scores: {oof_scores}")
    logger.info(f"mean mae score: {score}")

    oof.to_csv(f"../output/oof_{score}.csv")
    pred.to_csv(f"../output/sub_{score}.csv", index=False)
    fti.to_csv(f"../output/fti_{score}.csv")

    logger.info("finish")
Ejemplo n.º 51
0
def main():
    torch.set_num_threads(1)
    device = torch.device("cuda:0" if args.cuda else "cpu")

    run_id = "alpha{}".format(args.gcn_alpha)
    if args.use_logger:
        from utils import Logger
        folder = "{}/{}".format(args.folder, run_id)
        logger = Logger(algo_name=args.algo,
                        environment_name=args.env_name,
                        folder=folder,
                        seed=args.seed)
        logger.save_args(args)

        print("---------------------------------------")
        print('Saving to', logger.save_folder)
        print("---------------------------------------")

    else:
        print("---------------------------------------")
        print('NOTE : NOT SAVING RESULTS')
        print("---------------------------------------")
    all_rewards = []

    envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
                         args.gamma, args.log_dir, args.add_timestep, device,
                         False)

    actor_critic = Policy(envs.observation_space.shape,
                          envs.action_space,
                          args.env_name,
                          base_kwargs={'recurrent': args.recurrent_policy})
    actor_critic.to(device)

    if args.algo == 'a2c':
        agent = algo.A2C_ACKTR(actor_critic,
                               args.value_loss_coef,
                               args.entropy_coef,
                               lr=args.lr,
                               eps=args.eps,
                               alpha=args.alpha,
                               max_grad_norm=args.max_grad_norm)
    elif args.algo == 'ppo':
        agent = algo.PPO(actor_critic,
                         args.clip_param,
                         args.ppo_epoch,
                         args.num_mini_batch,
                         args.value_loss_coef,
                         args.entropy_coef,
                         lr=args.lr,
                         eps=args.eps,
                         max_grad_norm=args.max_grad_norm)
    elif args.algo == 'acktr':
        agent = algo.A2C_ACKTR(actor_critic,
                               args.value_loss_coef,
                               args.entropy_coef,
                               acktr=True)

    rollouts = RolloutStorage(args.num_steps, args.num_processes,
                              envs.observation_space.shape, envs.action_space,
                              actor_critic.recurrent_hidden_state_size,
                              actor_critic.base.output_size)

    obs = envs.reset()
    rollouts.obs[0].copy_(obs)
    rollouts.to(device)

    ############################
    # GCN Model and optimizer
    from pygcn.train import update_graph
    from pygcn.models import GCN
    gcn_model = GCN(nfeat=actor_critic.base.output_size, nhid=args.gcn_hidden)
    gcn_model.to(device)
    gcn_optimizer = optim.Adam(gcn_model.parameters(),
                               lr=args.gcn_lr,
                               weight_decay=args.gcn_weight_decay)
    gcn_loss = nn.NLLLoss()
    gcn_states = [[] for _ in range(args.num_processes)]
    Gs = [nx.Graph() for _ in range(args.num_processes)]
    node_ptrs = [0 for _ in range(args.num_processes)]
    rew_states = [[] for _ in range(args.num_processes)]
    ############################

    episode_rewards = deque(maxlen=100)
    avg_fwdloss = deque(maxlen=100)
    rew_rms = RunningMeanStd(shape=())
    delay_rew = torch.zeros([args.num_processes, 1])
    delay_step = torch.zeros([args.num_processes])

    start = time.time()
    for j in range(num_updates):

        if args.use_linear_lr_decay:
            # decrease learning rate linearly
            update_linear_schedule(
                agent.optimizer, j, num_updates,
                agent.optimizer.lr if args.algo == "acktr" else args.lr)

        for step in range(args.num_steps):
            # Sample actions
            with torch.no_grad():
                value, action, action_log_prob,\
                 recurrent_hidden_states, hidden_states = actor_critic.act(
                        rollouts.obs[step],
                        rollouts.recurrent_hidden_states[step],
                        rollouts.masks[step])

            # Obser reward and next obs
            obs, reward, done, infos = envs.step(action)
            delay_rew += reward
            delay_step += 1

            for idx, (info, hid,
                      eps_done) in enumerate(zip(infos, hidden_states, done)):

                if eps_done or delay_step[idx] == args.reward_freq:
                    reward[idx] = delay_rew[idx]
                    delay_rew[idx] = delay_step[idx] = 0
                else:
                    reward[idx] = 0

                if 'episode' in info.keys():
                    episode_rewards.append(info['episode']['r'])

                if args.gcn_alpha < 1.0:
                    gcn_states[idx].append(hid)
                    node_ptrs[idx] += 1
                    if not eps_done:
                        Gs[idx].add_edge(node_ptrs[idx] - 1, node_ptrs[idx])
                    if reward[idx] != 0. or eps_done:
                        rew_states[idx].append(
                            [node_ptrs[idx] - 1, reward[idx]])
                    if eps_done:
                        adj = nx.adjacency_matrix(Gs[idx]) if len(Gs[idx].nodes)\
                                        else sp.csr_matrix(np.eye(1,dtype='int64'))
                        update_graph(gcn_model, gcn_optimizer,
                                     torch.stack(gcn_states[idx]), adj,
                                     rew_states[idx], gcn_loss, args, envs)
                        gcn_states[idx] = []
                        Gs[idx] = nx.Graph()
                        node_ptrs[idx] = 0
                        rew_states[idx] = []

            # If done then clean the history of observations.
            masks = torch.FloatTensor([[0.0] if done_ else [1.0]
                                       for done_ in done])
            rollouts.insert(obs, recurrent_hidden_states, action,
                            action_log_prob, value, reward, masks,
                            hidden_states)

        with torch.no_grad():
            next_value = actor_critic.get_value(
                rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
                rollouts.masks[-1]).detach()

        rollouts.compute_returns(next_value, args.use_gae, args.gamma,
                                 args.tau, gcn_model, args.gcn_alpha)
        agent.update(rollouts)
        rollouts.after_update()

        ####################### Saving and book-keeping #######################
        if (j % int(num_updates / 5.) == 0
                or j == num_updates - 1) and args.save_dir != "":
            print('Saving model')
            print()

            save_dir = "{}/{}/{}".format(args.save_dir, args.folder, run_id)
            save_path = os.path.join(save_dir, args.algo, 'seed' +
                                     str(args.seed)) + '_iter' + str(j)
            try:
                os.makedirs(save_path)
            except OSError:
                pass

            # A really ugly way to save a model to CPU
            save_model = actor_critic
            save_gcn = gcn_model
            if args.cuda:
                save_model = copy.deepcopy(actor_critic).cpu()
                save_gcn = copy.deepcopy(gcn_model).cpu()

            save_model = [
                save_gcn, save_model,
                hasattr(envs.venv, 'ob_rms') and envs.venv.ob_rms or None
            ]

            torch.save(save_model,
                       os.path.join(save_path, args.env_name + "ac.pt"))

        total_num_steps = (j + 1) * args.num_processes * args.num_steps

        if j % args.log_interval == 0 and len(episode_rewards) > 1:
            end = time.time()
            print("Updates {}, num timesteps {}, FPS {} \n Last {}\
             training episodes: mean/median reward {:.2f}/{:.2f},\
              min/max reward {:.2f}/{:.2f}, success rate {:.2f}, avg fwdloss {:.2f}\n"
                  .format(
                      j,
                      total_num_steps,
                      int(total_num_steps / (end - start)),
                      len(episode_rewards),
                      np.mean(episode_rewards),
                      np.median(episode_rewards),
                      np.min(episode_rewards),
                      np.max(episode_rewards),
                      np.count_nonzero(np.greater(episode_rewards, 0)) /
                      len(episode_rewards),
                      np.mean(avg_fwdloss),
                  ))

            all_rewards.append(np.mean(episode_rewards))
            if args.use_logger:
                logger.save_task_results(all_rewards)
        ####################### Saving and book-keeping #######################

    envs.close()
Ejemplo n.º 52
0
def main():
    # init model, ResNet18() can be also used here for training
    # model = WideResNet().to(device)
    if args.network == 'smallCNN':
        model = SmallCNN().to(device)
    elif args.network == 'wideResNet':
        model = WideResNet().to(device)
    elif args.network == 'resnet':
        model = ResNet().to(device)
    else:
        model = VGG(args.network, num_classes=10).to(device)

    sys.stdout = Logger(os.path.join(args.log_dir, args.log_file))
    print(model)
    criterion_tla = TripletLoss(10, args.feat_size)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    if args.fine_tune:
        base_dir = args.base_dir
        state_dict = torch.load("{}/{}_ep{}.pt".format(base_dir,
                                                       args.base_model,
                                                       args.checkpoint))
        opt = torch.load("{}/opt-{}_ep{}.tar".format(base_dir, args.base_model,
                                                     args.checkpoint))
        model.load_state_dict(state_dict)
        optimizer.load_state_dict(opt)

    natural_acc = []
    robust_acc = []

    for epoch in range(1, args.epochs + 1):
        # adjust learning rate for SGD
        adjust_learning_rate(optimizer, epoch)

        start_time = time.time()

        # adversarial training
        train(model, device, train_loader, optimizer, criterion_tla, epoch)

        # evaluation on natural examples
        print(
            '================================================================')
        print("Current time: {}".format(
            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        # eval_train(model, device, train_loader)
        # eval_test(model, device, test_loader)
        natural_err_total, robust_err_total = eval_adv_test_whitebox(
            model, device, test_loader)
        with open(os.path.join(stats_dir, '{}.txt'.format(args.save_model)),
                  "a") as f:
            f.write("{} {} {}\n".format(epoch, natural_err_total,
                                        robust_err_total))

        print('using time:',
              datetime.timedelta(seconds=round(time.time() - start_time)))

        natural_acc.append(natural_err_total)
        robust_acc.append(robust_err_total)

        file_name = os.path.join(
            stats_dir, '{}_stat{}.npy'.format(args.save_model, epoch))
        # np.save(file_name, np.stack((np.array(self.train_loss), np.array(self.test_loss),
        #                              np.array(self.train_acc), np.array(self.test_acc),
        #                              np.array(self.elasticity), np.array(self.x_grads),
        #                              np.array(self.fgsms), np.array(self.pgds),
        #                              np.array(self.cws))))
        np.save(file_name,
                np.stack((np.array(natural_acc), np.array(robust_acc))))

        # save checkpoint
        if epoch % args.save_freq == 0:
            torch.save(
                model.state_dict(),
                os.path.join(model_dir,
                             '{}_ep{}.pt'.format(args.save_model, epoch)))
            torch.save(
                optimizer.state_dict(),
                os.path.join(model_dir,
                             'opt-{}_ep{}.tar'.format(args.save_model, epoch)))
            print("Ep{}: Model saved as {}.".format(epoch, args.save_model))
        print(
            '================================================================')
Ejemplo n.º 53
0
import requests
from bs4 import BeautifulSoup as bs
import json
from faker import Faker
from random import randint, choice
import string

from distutils.util import strtobool
from utils import Logger

logger = Logger()
fake = Faker()

class Generator():

	def __init__(self):
		"""
		Deals with config file
		"""
		with open('config.json') as file:
			self.config = json.load(file)
			file.close()

		"""
		Deals with user entered emails
		"""		
		with open('info.json') as file:
			self.info = json.load(file)
			file.close()
		self.count = 0
Ejemplo n.º 54
0
random.seed(2020)

# 命令行参数
parser = argparse.ArgumentParser(description='itemcf 召回')
parser.add_argument('--mode', default='valid')
parser.add_argument('--logfile', default='test.log')

args = parser.parse_args()

mode = args.mode
logfile = args.logfile

# 初始化日志
os.makedirs('user_data/log', exist_ok=True)
log = Logger(f'user_data/log/{logfile}').logger
log.info(f'itemcf 召回,mode: {mode}')


def cal_sim(df):
    user_item_ = df.groupby('user_id')['click_article_id'].agg(
        lambda x: list(x)).reset_index()
    user_item_dict = dict(
        zip(user_item_['user_id'], user_item_['click_article_id']))

    item_cnt = defaultdict(int)
    sim_dict = {}

    # 依次对每个用户点击的新闻序列两两+1,所得到的是:所有用户点击过的所有新闻的共现矩阵 值为同时喜欢两个新闻的用户数
    for _, items in tqdm(user_item_dict.items()):
        for loc1, item in enumerate(items):
Ejemplo n.º 55
0
# =============================================================================
# This script is designed to be run periodically using crontab. The purpose
# of this script is perform maintenance tasks e.g. auto delete old data
# =============================================================================

import os
from config import settings
from crontab import CronTab
import click, arrow
from utils import Logger

log = Logger()


class Maintenance():
    @staticmethod
    def task_purge_logs():
        log.funcexec('Purging log data')
        log.purge(
            log=True,
            end=arrow.utcnow().shift(days=-settings.PURGE['days_to_keep_log']))

    @staticmethod
    def task_purge_unregistered_devices_data():
        log.funcexec('Auto deleting unregistered devices data')
        from utils import Devices
        devices = Devices()
        devices.purge(unregistered=True,
                      end=arrow.utcnow().shift(
                          minutes=-settings.PURGE['unreg_keep_for']))
Ejemplo n.º 56
0
def main():
    # init or load model
    print("init model with input shape", config["input_shape"])
    model = NvNet(config=config)
    parameters = model.parameters()
    optimizer = optim.Adam(parameters,
                           lr=config["initial_learning_rate"],
                           weight_decay=config["L2_norm"])
    start_epoch = 1
    if config["VAE_enable"]:
        loss_function = CombinedLoss(k1=config["loss_k1_weight"],
                                     k2=config["loss_k2_weight"])
    else:
        loss_function = SoftDiceLoss()
    # data_generator
    print("data generating")
    training_data = BratsDataset(phase="train", config=config)
    valildation_data = BratsDataset(phase="validate", config=config)

    train_logger = Logger(model_name=config["model_file"],
                          header=['epoch', 'loss', 'acc', 'lr'])

    if config["cuda_devices"] is not None:
        # model = nn.DataParallel(model)  # multi-gpu training
        model = model.cuda()
        loss_function = loss_function.cuda()

    if not config["overwrite"] and config["saved_model_file"] is not None:
        if not os.path.exists(config["saved_model_file"]):
            raise Exception("Invalid model path!")
        model, start_epoch, optimizer = load_old_model(
            model, optimizer, saved_model_path=config["saved_model_file"])
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               factor=config["lr_decay"],
                                               patience=config["patience"])

    print("training on label:{}".format(config["labels"]))
    max_val_acc = 0.
    for i in range(start_epoch, config["epochs"]):
        train_epoch(epoch=i,
                    data_set=training_data,
                    model=model,
                    criterion=loss_function,
                    optimizer=optimizer,
                    opt=config,
                    logger=train_logger)

        val_loss, val_acc = val_epoch(epoch=i,
                                      data_set=valildation_data,
                                      model=model,
                                      criterion=loss_function,
                                      opt=config,
                                      optimizer=optimizer,
                                      logger=train_logger)
        scheduler.step(val_loss)
        if config["checkpoint"] and val_acc > max_val_acc:
            max_val_acc = val_acc
            save_dir = os.path.join(
                config["result_path"],
                config["model_file"].split("/")[-1].split(".h5")[0])
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            save_states_path = os.path.join(
                save_dir, 'epoch_{0}_val_loss_{1:.4f}_acc_{2:.4f}.pth'.format(
                    i, val_loss, val_acc))
            states = {
                'epoch': i + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            torch.save(states, save_states_path)
            save_model_path = os.path.join(save_dir, "best_model_file.pth")
            if os.path.exists(save_model_path):
                os.system("rm " + save_model_path)
            torch.save(model, save_model_path)
Ejemplo n.º 57
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'valf')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    data_aug_scale = (0.08, 1.0) if args.modelsize == 'large' else (0.92, 1.0)

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224, scale=data_aug_scale),
                transforms.RandomHorizontalFlip(),
                # transforms.ToTensor(),
                # normalize,
            ])),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True,
        collate_fn=fast_collate)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                # transforms.ToTensor(),
                # normalize,
            ])),
        batch_size=args.test_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True,
        collate_fn=fast_collate)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif 'resnext' in args.arch:
        model = models.__dict__[args.arch](
            baseWidth=args.base_width,
            cardinality=args.cardinality,
        )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    flops, params = get_model_complexity_info(model, (224, 224),
                                              as_strings=False,
                                              print_per_layer_stat=False)
    print('Flops:  %.3f' % (flops / 1e9))
    print('Params: %.2fM' % (params / 1e6))

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..', args.resume)
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        # model may have more keys
        t = model.state_dict()
        c = checkpoint['state_dict']
        flag = True
        for k in t:
            if k not in c:
                print('not in loading dict! fill it', k, t[k])
                c[k] = t[k]
                flag = False
        model.load_state_dict(c)
        if flag:
            print('optimizer load old state')
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('new optimizer !')
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()

    print('Best acc:')
    print(best_acc)
Ejemplo n.º 58
0
def main():
    chkpoint_name = args.checkpoint[args.checkpoint.rfind('/') + 1:]
    # import pdb; pdb.set_trace()
    global writer
    writer = SummaryWriter(log_dir='/runs/' + chkpoint_name)
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100

    trainset = dataloader(root='./data',
                          train=True,
                          download=True,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root='./data',
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # writer =

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
        )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    # model = torch.nn.DataParallel(model).cuda()
    # model = model.cuda(torch.device('cuda:1'))
    model = model.cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    torch.save(model, 'tempolary.pth')
    new_model = torch.load('tempolary.pth')

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'cifar-10-' + args.arch

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])
    print(model)

    import pdb
    pdb.set_trace()
    look_up_table = get_look_up_table(model)

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))

        if DEBUG:
            # print(model)
            show_low_rank(model,
                          input_size=[32, 32],
                          criterion=ValueThreshold(t),
                          type=args.type)

        print(' Start decomposition:')

        # set different threshold for model compression and test accuracy
        all_channs_ = []
        thresholds = np.arange(
            0.1, 1.0, 0.01).tolist()  #[5e-2] if args.type != 'ND' else [0.85]
        sigma_criterion = ValueThreshold if args.type != 'ND' else EnergyThreshold
        T = np.array(thresholds)
        cr = np.zeros(T.shape)
        acc = np.zeros(T.shape)

        model_path = 'net.pth'
        torch.save(model, model_path)
        result = 'result.pth' if not args.retrain else 'result-retrain.pth'

        for i, t in enumerate(thresholds):
            test_model = torch.load(model_path)
            print('=====================================Threshold is', t)
            cr[i], channs_ = show_low_rank(test_model,
                                           look_up_table,
                                           input_size=[32, 32],
                                           criterion=sigma_criterion(t),
                                           type=args.type)
            all_channs_.append(channs_)
            test_model = f_decouple(test_model,
                                    look_up_table,
                                    criterion=sigma_criterion(t),
                                    train=False,
                                    stride_1_only=args.stride_1_only)
            #print(model)
            print(' Done! test decoupled model')
            test_loss, test_acc = test(testloader, test_model, criterion,
                                       start_epoch, use_cuda)
            print(' Test Loss :  %.8f, Test Acc:  %.2f' %
                  (test_loss, test_acc))
            acc[i] = test_acc

            if args.retrain:
                # retrain model
                finetune_epoch = 4
                acc[i] = model_retrain(finetune_epoch, test_model, trainloader, \
                     testloader, criterion, look_up_table, use_cuda)
        torch.save(test_model, 'model.pth.tar')
        torch.save(OrderedDict([('acc', acc), ('cr', cr)]), result)
        print('compression ratio:')
        print(cr)
        print('accuracy:')
        print(acc)
        print(all_channs_)

        return

    # Train and val

    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(trainloader, model, criterion, optimizer,
                                      look_up_table, epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
Ejemplo n.º 59
0
        #         if parse.metrics=="F1_score":
        #             results["metrics"]= calculate_F1(ind_actual,ind_pred)
        #         results["prediction"]=predict
        #     print(predict,"prediction")
        return results


if __name__ == "__main__":
    arg = ml_arg()
    argg = arg.parse_args()

    data = get_data(argg)
    directory = argg.log_file + "current/"
    if not os.path.exists(directory):
        os.makedirs(directory)
    sys.stdout = Logger(directory + "log.txt")
    print(argg)
    re = itera_tree(data.X_train, data.label_train, parse=argg)
    tree_dec_np = conver2numpy(re)

    #         print(tree_dec_np)
    results = batch_predic(data.X_val, data.label_val, tree_dec_np, argg)
    print(results, "val")
    data_xl = pd.DataFrame.from_dict(results["prediction"])
    data_xl.to_csv(directory + "log" + str(datetime.now()) + ".csv")
    param = argg
    results = batch_predic(data.X_train, data.label_train, tree_dec_np, argg)
    print(results, "train")
    results = batch_predic(data.X_test, data.label_test, tree_dec_np, argg)
    print(results, "test")
    os.rename(
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.save_dir):
        mkdir_p(args.save_dir)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100

    trainset = dataloader(root='./data',
                          train=True,
                          download=True,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root='./data',
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
        )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    model = torch.nn.DataParallel(model).cuda()
    model.cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'cifar-10-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
    else:
        logger = Logger(os.path.join(args.save_dir, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    print('\nEvaluation only')
    test_loss0, test_acc0 = test(testloader, model, criterion, start_epoch,
                                 use_cuda)
    print('Before pruning: Test Loss:  %.8f, Test Acc:  %.2f' %
          (test_loss0, test_acc0))

    # -------------------------------------------------------------
    #pruning
    total = 0
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            total += m.weight.data.numel()
    conv_weights = torch.zeros(total)
    index = 0
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            size = m.weight.data.numel()
            conv_weights[index:(index +
                                size)] = m.weight.data.view(-1).abs().clone()
            index += size

    y, i = torch.sort(conv_weights)
    thre_index = int(total * args.percent)
    thre = y[thre_index]
    pruned = 0
    print('Pruning threshold: {}'.format(thre))
    zero_flag = False
    for k, m in enumerate(model.modules()):
        if isinstance(m, nn.Conv2d):
            weight_copy = m.weight.data.abs().clone()
            mask = weight_copy.gt(thre).float().cuda()
            pruned = pruned + mask.numel() - torch.sum(mask)
            m.weight.data.mul_(mask)
            if int(torch.sum(mask)) == 0:
                zero_flag = True
            print(
                'layer index: {:d} \t total params: {:d} \t remaining params: {:d}'
                .format(k, mask.numel(), int(torch.sum(mask))))
    print('Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}'.
          format(total, pruned, pruned / total))
    # -------------------------------------------------------------

    print('\nTesting')
    test_loss1, test_acc1 = test(testloader, model, criterion, start_epoch,
                                 use_cuda)
    print('After Pruning: Test Loss:  %.8f, Test Acc:  %.2f' %
          (test_loss1, test_acc1))
    save_checkpoint(
        {
            'epoch': 0,
            'state_dict': model.state_dict(),
            'acc': test_acc1,
            'best_acc': 0.,
        },
        False,
        checkpoint=args.save_dir)

    with open(os.path.join(args.save_dir, 'prune.txt'), 'w') as f:
        f.write('Before pruning: Test Loss:  %.8f, Test Acc:  %.2f\n' %
                (test_loss0, test_acc0))
        f.write(
            'Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}\n'
            .format(total, pruned, pruned / total))
        f.write('After Pruning: Test Loss:  %.8f, Test Acc:  %.2f\n' %
                (test_loss1, test_acc1))

        if zero_flag:
            f.write("There exists a layer with 0 parameters left.")
    return