Ejemplo n.º 1
0
    def __init__(self,
                 dinput,
                 nstates,
                 sigma=0.1,
                 fbias=0.0,
                 last_state_only=False):
        W = random(nstates * 4, dinput + nstates + 1) * 0.1
        W[0 * nstates:1 * nstates,
          dinput:-1] = orthogonalize(random(nstates, nstates))
        W[1 * nstates:2 * nstates,
          dinput:-1] = orthogonalize(random(nstates, nstates))
        W[2 * nstates:3 * nstates,
          dinput:-1] = orthogonalize(random(nstates, nstates))
        W[3 * nstates:4 * nstates,
          dinput:-1] = orthogonalize(random(nstates, nstates))
        W[:, -1] = 0  # initialize all biases to zero
        W[2 * nstates:3 * nstates, -1] = fbias  # forget bias
        self.W = W

        self.c_0 = np.zeros((nstates, 1))
        self.Y_0 = np.zeros((nstates, 1))

        self.dinput = dinput
        self.nstates = nstates
        self.last_state_only = last_state_only

        self.forget()
Ejemplo n.º 2
0
	def __init__(self, dinput, nstates, ngroups, doutput, sigma=0.1):
		Wi = random(nstates, dinput + 1) * sigma
		Wh = random(nstates, nstates + 1) * sigma
		Wo = random(doutput, nstates + 1) * sigma

		base = np.zeros((ngroups, 1))
		tick = np.random.random((ngroups, 1)) - 1.5
		
		#for n in range(ngroups):
		#	tick[n] = 137.0 / (2.0 ** n)

		# store it all
		self.dinput = dinput
		self.nstates = nstates
		self.ngroups = ngroups
		self.doutput = doutput

		self.Wi = Wi
		self.Wh = Wh
		self.Wo = Wo

		self.base = base
		self.tick = tick

		self.H_last = None
Ejemplo n.º 3
0
	def __init__(self, dinput, nstates, doutput, periods, sigma=0.1, first_layer=False, last_state_only=False):
		'''

			Clockwork Recurrent Neural Network
			This follows the variant described in the paper by Koutnik et al.

			dinput: 
				dimension of the input (per time step)
			
			nstates: 
				number of states per module/clock
			
			doutput: 
				required dimension of the output
			
			periods: 
				the periods of clocks (order is maintained and not sorted)
			
			first_layer:
				True: if this is the first layer of the network. If it is, the gradients w.r.t inputs
						are not calculated as it is useless for training. saves time
				False: gradients w.r.t are calculated and returned
		''' 
		nclocks = len(periods)
		
		Wi = random(nstates, dinput + 1) * sigma
		Wh = random(nclocks * nstates, nclocks * nstates + 1) * sigma
		#Wh = np.zeros((nclocks * nstates, nclocks * nstates + 1))
		#for i in range(nclocks):
		#	for j in range(nclocks):
		#		Wh[i * nstates: (i + 1) * nstates, j * nstates: (j + 1) * nstates] = orthogonalize(random(nstates, nstates))

		Wo = random(doutput, nclocks * nstates + 1) * sigma
		
		H_0 = np.zeros((nclocks * nstates, 1))
	
		mask = recurrent_mask(nclocks, nstates)
		Wh[:,:-1] *= mask

		# column vector to selectively activate rows based on time
		schedules = make_schedule(periods, nstates)
		schedules = np.array(schedules).reshape(-1, 1)

		# store it all
		self.dinput = dinput
		self.nstates = nstates
		self.doutput = doutput
		self.periods = periods
		self.nclocks = nclocks
		self.Wi = Wi
		self.Wh = Wh
		self.Wo = Wo
		self.H_0 = H_0
		self.mask = mask
		self.schedules = schedules
		self.sigma = sigma
		self.first_layer = first_layer
		self.last_state_only = last_state_only

		self.forget()
Ejemplo n.º 4
0
	def __init__(self, dinput, nstates, ngroups, doutput, sigma=0.1):
		Wi = random(nstates, dinput + 1) * sigma
		Wh = random(nstates, nstates + 1) * sigma
		Wo = random(doutput, nstates + 1) * sigma
		
		H_0 = np.zeros((nstates, 1))
	
		# column vector to selectively activate rows based on time
		base = np.random.random((ngroups, 1))
		tick = np.random.random((ngroups, 1))
		
		# store it all
		self.dinput = dinput
		self.nstates = nstates
		self.ngroups = ngroups
		self.doutput = doutput

		self.Wi = Wi
		self.Wh = Wh
		self.Wo = Wo
		self.H_0 = H_0

		self.base = base
		self.tick = tick

		self.H_last = None
Ejemplo n.º 5
0
 def appendchild(self, node):
     node.halfmoveno = self.halfmoveno + 1
     node.build()
     self.childs.append(node)
     self.containerdiv.a(node)
     if len(self.childs) > 1:
         rgb = "rgb({},{},{})".format(int(random() * 128 + 127),
                                      int(random() * 128 + 127),
                                      int(random() * 128 + 127))
         self.containerdiv.bc(rgb).bds("solid").bdw(10).bdr(20).bdc(rgb)
Ejemplo n.º 6
0
def train(net, perturb, optimizer, testloader, device, epoch):
    correct = 0
    total = 0
    # confidence = []
    # predictions = []
    total_loss = 0
    for batch_index, (images, labels) in enumerate(testloader):
        images, labels = images.to(device), labels.to(device)
        images = images + perturb * random(images.shape, device)
        outputs = net(images)
        prob, predicted = softmax(outputs).max(1)
        optimizer.zero_grad()
        loss = torch.dot(prob, 1 - prob) / (labels.size(0))
        loss.backward()
        optimizer.step()
        # confidence.extend(prob.detach().cpu().numpy())
        # predictions.extend(predicted.detach().cpu().numpy())
        total += labels.size(0)
        correct += predicted.eq(labels).sum().item()
        total_loss += loss.item()

    print("Epoch {}-- Accuracy: {:.4f}, Loss: {:.4f}".format(
        epoch, correct / total, total_loss / (batch_index + 1)))
    log_metric("Accuracy_target", 1.0 * correct / total, epoch)
    log_metric("Loss_target", total_loss / (batch_index + 1), epoch)
Ejemplo n.º 7
0
    async def post(self):
        url = self.get_argument('url')
        code = self.get_argument('code', None)

        if code is not None:
            try:
                await store.sCreate(url, code)
                self._send_reponse(code)
            except store.IntegrityError as e:
                reason = ('code "%s" existed' % code)
                self.send_error(409, reason=reason)
        else:
            retry = 0
            while retry < MAX_RETRY:
                code = random(RANDOM_LEN)
                try:
                    await store.sCreate(url, code)
                except store.IntegrityError:
                    retry += 1
                    continue
                self._send_reponse(code)
                return

            reason = ('can not found proper code for "%s"' % url)
            self.send_error(508, reason=reason)
Ejemplo n.º 8
0
	def __init__(self, dinput, doutput):
		W = random(doutput, dinput + 1)
		W = glorotize(W)
		self.W = W

		self.dinput = dinput
		self.doutput = doutput
Ejemplo n.º 9
0
    def __init__(self, dinput, doutput):
        W = random(doutput, dinput + 1)
        W = glorotize(W)
        self.W = W

        self.dinput = dinput
        self.doutput = doutput
Ejemplo n.º 10
0
def train(epochs):
    for epoch in range(epochs):
        correct = 0
        total = 0
        confidence = []
        predictions = []
        noise = random(args.batch_size)
        total_loss = 0
        for batch_index, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            inputs = inputs + args.perturb * noise[: len(targets)]
            # inputs = inputs + args.perturb * random(len(targets))
            outputs = args.weight * pretrained(inputs) + (1 - args.weight) * net(inputs)
            prob, predicted = softmax(outputs).max(1)
            optimizer.zero_grad()
            loss = torch.dot(prob, 1 - prob) / (targets.size(0))
            loss.backward()
            optimizer.step()
            confidence.extend(prob.detach().cpu().numpy())
            predictions.extend(predicted.detach().cpu().numpy())
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            total_loss += loss.item()

        # print("Epoch {}: Loss: {:.4f}".format(epoch + 1, total_loss/(batch_index + 1)))
        # print("Epoch {}: Accuracy on Test data {:.4f} ({}/{})".format(epoch + 1, correct / total, correct, total))
        print(
            "Epoch {}-- Accuracy: {:.4f}, Loss: {:.4f}".format(
                epoch + 1, correct / total, total_loss / (batch_index + 1)
            )
        )
Ejemplo n.º 11
0
    def __init__(self, dinput, nstates, sigma=0.1, fbias=0.0, last_state_only=False):
        W = random(nstates * 4, dinput + nstates + 1) * 0.1
        W[0 * nstates : 1 * nstates, dinput:-1] = orthogonalize(random(nstates, nstates))
        W[1 * nstates : 2 * nstates, dinput:-1] = orthogonalize(random(nstates, nstates))
        W[2 * nstates : 3 * nstates, dinput:-1] = orthogonalize(random(nstates, nstates))
        W[3 * nstates : 4 * nstates, dinput:-1] = orthogonalize(random(nstates, nstates))
        W[:, -1] = 0  # initialize all biases to zero
        W[2 * nstates : 3 * nstates, -1] = fbias  # forget bias
        self.W = W

        self.c_0 = np.zeros((nstates, 1))
        self.Y_0 = np.zeros((nstates, 1))

        self.dinput = dinput
        self.nstates = nstates
        self.last_state_only = last_state_only

        self.forget()
Ejemplo n.º 12
0
    def resetAdmin(self, time):
        code = utils.random(time, 100000, 999999)

        data = {"operation": "write", "vercode": code}

        json = self.post("login?form=vercode", data).json()
        if json["success"] == True:
            print "Found code %d, admin password reset" % code
            return True
        return False
Ejemplo n.º 13
0
    def __init__(self,
                 dinput,
                 nstates,
                 doutput,
                 clock_periods,
                 full_recurrence=False,
                 learn_state=True,
                 first_layer=False):
        super(CRNN, self).__init__()
        nclocks = len(clock_periods)

        Wi = random(nclocks * nstates, dinput + 1)
        Wh = random(nclocks * nstates, nclocks * nstates + 1)
        Wo = random(doutput, nclocks * nstates + 1)

        H_0 = np.zeros((nclocks * nstates, 1))

        Wi = glorotize(Wi)
        Wh[:, :-1] = orthogonalize(Wh[:, :-1])
        Wo = glorotize(Wo)

        utri_mask = recurrent_mask(nclocks, nstates)
        if not full_recurrence:
            Wh[:, :-1] *= utri_mask

        schedules = make_schedule(clock_periods, nstates)

        self.dinput = dinput
        self.nstates = nstates
        self.doutput = doutput
        self.clock_periods = clock_periods
        self.nclocks = nclocks
        self.Wi = nn.Parameter(torch.from_numpy(Wi).float())
        self.Wh = nn.Parameter(torch.from_numpy(Wh).float())
        self.Wo = nn.Parameter(torch.from_numpy(Wo).float())
        self.H_0 = torch.from_numpy(H_0).float()
        self.utri_mask = utri_mask
        self.schedules = schedules
        self.full_recurrence = full_recurrence
        self.learn_state = learn_state
        self.first_layer = first_layer
        self.H_last = None
Ejemplo n.º 14
0
	def __init__(self, dinput, nstates, clocks, doutput, sigma=0.1):
		ngroups = len(clocks)
		Wi = random(ngroups * nstates, dinput + 1) * sigma
		Wh = random(ngroups * nstates, ngroups * nstates + 1) * sigma
		Wo = random(doutput, ngroups * nstates + 1) * sigma

		connection_matrix = np.random.random((ngroups, ngroups)) - 0.5

		schedules = make_schedule(clocks, nstates)
		schedules = np.array(schedules).reshape(-1, 1)
		
		# store it all
		self.dinput = dinput
		self.nstates = nstates
		self.ngroups = ngroups
		self.doutput = doutput

		self.Wi = Wi
		self.Wh = Wh
		self.Wo = Wo
		self.connection_matrix = connection_matrix
		self.schedules = schedules

		self.H_last = None
Ejemplo n.º 15
0
    def __init__(self, dinput, nstates, clocks, doutput, sigma=0.1):
        ngroups = len(clocks)
        Wi = random(ngroups * nstates, dinput + 1) * sigma
        Wh = random(ngroups * nstates, ngroups * nstates + 1) * sigma
        Wo = random(doutput, ngroups * nstates + 1) * sigma

        connection_matrix = np.random.random((ngroups, ngroups)) - 0.5

        schedules = make_schedule(clocks, nstates)
        schedules = np.array(schedules).reshape(-1, 1)

        # store it all
        self.dinput = dinput
        self.nstates = nstates
        self.ngroups = ngroups
        self.doutput = doutput

        self.Wi = Wi
        self.Wh = Wh
        self.Wo = Wo
        self.connection_matrix = connection_matrix
        self.schedules = schedules

        self.H_last = None
Ejemplo n.º 16
0
    async def post(self):
        body = self.request.body
        if len(body) == 0:
            self.send_error(400)
            return

        retry = 0
        while retry < MAX_RETRY:
            code = random(RANDOM_LEN)
            try:
                await store.pCreate(body, code)
            except store.IntegrityError:
                retry += 1
                continue
            self.set_status(201)
            self.set_header('content-type', 'text/plain')
            self.write('%s://%s/p/%s\n' %
                       (self.request.protocol, self.request.host, code))
            return

        self.send_error(508)
Ejemplo n.º 17
0
	def __init__(self, dinput, nstates, doutput, clock_periods, full_recurrence=False, learn_state=True, first_layer=False):
		'''

			Clockwork Recurrent Neural Network
			This follows the variant described in the paper by Koutnik et al.

			dinput: 
				dimension of the input (per time step)
			
			nstates: 
				number of states per module/clock
			
			doutput: 
				required dimension of the output
			
			clock_periods: 
				the periods of clocks (order is maintained and not sorted)
			
			full_recurrence:
				True: all modules can 'see' the hidden states every module
				False: as per the original paper - only faster modules can see slower modules

			learn_state:
				True: initial state is randomly initalized and learnt during training
				False: start with all zero initial state and don't learn

			first_layer:
				True: if this is the first layer of the network. If it is, the gradients w.r.t inputs
						are not calculated as it is useless for training. saves time
				False: gradients w.r.t are calculated and returned
		''' 

		nclocks = len(clock_periods)
		
		Wi = random(nclocks * nstates, dinput + 1)
		Wh = random(nclocks * nstates, nclocks * nstates + 1)
		Wo = random(doutput, nclocks * nstates + 1)
		
		if learn_state:
			H_0 = random(nclocks * nstates, 1)
		else:
			H_0 = np.zeros((nclocks * nstates, 1))

		# some fancy inits
		Wi = glorotize(Wi)
		Wh[:, :-1] = orthogonalize(Wh[:, :-1])
		Wo = glorotize(Wo)
	
		# mask to make Wh a block upper triangle matrix
		utri_mask = recurrent_mask(nclocks, nstates)
		if not full_recurrence:
			Wh[:,:-1] *= utri_mask

		# column vector to selectively activate rows based on time
		schedules = make_schedule(clock_periods, nstates)
		schedules = np.array(schedules).reshape(-1, 1)

		# store it all
		self.dinput = dinput
		self.nstates = nstates
		self.doutput = doutput
		self.clock_periods = clock_periods
		self.nclocks = nclocks
		self.Wi = Wi
		self.Wh = Wh
		self.Wo = Wo
		self.H_0 = H_0
		self.utri_mask = utri_mask
		self.schedules = schedules
		self.full_recurrence = full_recurrence
		self.learn_state = learn_state
		self.first_layer = first_layer

		self.forget()
Ejemplo n.º 18
0
    def __init__(self,
                 dinput,
                 nstates,
                 doutput,
                 clock_periods,
                 full_recurrence=False,
                 learn_state=True,
                 first_layer=False):
        '''

			Clockwork Recurrent Neural Network
			This follows the variant described in the paper by Koutnik et al.

			dinput: 
				dimension of the input (per time step)
			
			nstates: 
				number of states per module/clock
			
			doutput: 
				required dimension of the output
			
			clock_periods: 
				the periods of clocks (order is maintained and not sorted)
			
			full_recurrence:
				True: all modules can 'see' the hidden states every module
				False: as per the original paper - only faster modules can see slower modules

			learn_state:
				True: initial state is randomly initalized and learnt during training
				False: start with all zero initial state and don't learn

			first_layer:
				True: if this is the first layer of the network. If it is, the gradients w.r.t inputs
						are not calculated as it is useless for training. saves time
				False: gradients w.r.t are calculated and returned
		'''

        nclocks = len(clock_periods)

        Wi = random(nclocks * nstates, dinput + 1)
        Wh = random(nclocks * nstates, nclocks * nstates + 1)
        Wo = random(doutput, nclocks * nstates + 1)

        if learn_state:
            H_0 = random(nclocks * nstates, 1)
        else:
            H_0 = np.zeros((nclocks * nstates, 1))

        # some fancy inits
        Wi = glorotize(Wi)
        Wh[:, :-1] = orthogonalize(Wh[:, :-1])
        Wo = glorotize(Wo)

        # mask to make Wh a block upper triangle matrix
        utri_mask = recurrent_mask(nclocks, nstates)
        if not full_recurrence:
            Wh[:, :-1] *= utri_mask

        # column vector to selectively activate rows based on time
        schedules = make_schedule(clock_periods, nstates)
        schedules = np.array(schedules).reshape(-1, 1)

        # store it all
        self.dinput = dinput
        self.nstates = nstates
        self.doutput = doutput
        self.clock_periods = clock_periods
        self.nclocks = nclocks
        self.Wi = Wi
        self.Wh = Wh
        self.Wo = Wo
        self.H_0 = H_0
        self.utri_mask = utri_mask
        self.schedules = schedules
        self.full_recurrence = full_recurrence
        self.learn_state = learn_state
        self.first_layer = first_layer

        self.forget()
Ejemplo n.º 19
0
    def train(self, epoch, alpha):
        self.embedding.train()
        self.classifier.train()
        classifier_loss = 0
        discriminator_loss = 0
        net_loss = 0
        correct_d0 = 0
        correct_d1 = 0
        correct_domain = 0
        total = 0

        for batch_idx, (images, labels) in enumerate(self.trainloader):

            images_d0 = images.to(self.device)
            labels = labels.to(self.device)
            images_d1 = images_d0 + self.perturb * random(
                images.shape, self.device)

            embeddings_d0 = self.embedding(images_d0)
            embeddings_d1 = self.embedding(images_d1)

            outputs_d0_class, outputs_d0 = self.classifier(
                embeddings_d0, alpha)
            outputs_d1_class, outputs_d1 = self.classifier(
                embeddings_d1, alpha)

            # 0 for original, 1 for noisy
            domains = torch.cat((
                torch.zeros(images.size(0), device=self.device),
                torch.ones(images.size(0), device=self.device),
            )).long()
            outputs_domains = torch.cat((outputs_d0, outputs_d1))

            loss_class, loss_dis = self.calc_loss(outputs_d0_class,
                                                  outputs_domains, labels,
                                                  domains)
            loss = loss_class + loss_dis
            loss.backward()
            self.optimizer.step()
            self.optimizer.zero_grad()

            classifier_loss += loss_class.item()
            discriminator_loss += loss_dis.item()
            net_loss += loss.item()

            predicted_d0 = outputs_d0_class.argmax(1)
            correct_d0 += predicted_d0.eq(labels).sum().item()

            predicted_d1 = outputs_d1_class.argmax(1)
            correct_d1 += predicted_d1.eq(labels).sum().item()

            predicted_domain = outputs_domains.argmax(1)
            correct_domain += domains.eq(predicted_domain).sum().item()

            total += labels.size(0)

        log_metric("Train/Net_loss", net_loss / (batch_idx + 1), epoch)
        log_metric("Train/Acc_clean", 1.0 * correct_d0 / total, epoch)
        log_metric("Train/Acc_noise", 1.0 * correct_d1 / total, epoch)
        log_metric("Train/Classifier_loss", classifier_loss / (batch_idx + 1),
                   epoch)
        log_metric("Train/Discriminator_loss",
                   discriminator_loss / (batch_idx + 1), epoch)
        log_metric("Train/Acc_domain", 1.0 * correct_domain / (2 * total),
                   epoch)