def generate_N(self, p, n_generate=20, max_length=MAX_LEN):
        generate_list = []
        samples = random.sample(p.passwords_string, n_generate)

        for i in range(n_generate):
            start_letter = samples[i][0]
            input_tensor = P.passwordToInputTensor(start_letter)
            with torch.no_grad():
                hidden = torch.rand(self.layers, 1,
                                    self.hidden_size).to(device)
                output_password = start_letter

                for c in range(max_length):
                    output, hidden = self.gru(self.embedding(input_tensor),
                                              hidden)
                    output = self.h2o(output)
                    output = output.view(1, -1)
                    topv, topi = output.topk(1)
                    topi = topi[0][0]
                    if topi == CHARMAP_LEN - 1:
                        break
                    else:
                        letter = P.all_letters[topi]
                        output_password += letter
                    input_tensor = P.passwordToInputTensor(letter).to(device)

            generate_list.append(output_password)

        return generate_list
예제 #2
0
    def generate_rand_N(self, p, n_generate=20, max_length=MAX_LEN):
        generate_list = []
        samples = random.sample(p.passwords_string, n_generate)

        for i in range(n_generate):
            start_letter = samples[i][0]
            input_tensor = P.passwordToInputTensor(start_letter)
            with torch.no_grad():
                hidden = torch.rand(self.layers, 1,
                                    self.hidden_size).to(device)
                output_password = start_letter

                for c in range(max_length):
                    output, hidden = self.gru(self.embedding(input_tensor),
                                              hidden)
                    output = self.h2o(output)
                    output = output.view(-1)
                    output = F.softmax(output, dim=0)
                    output = output.cpu().numpy()
                    index = np.random.choice(range(len(output)), p=output)
                    if index == CHARMAP_LEN - 1:
                        break
                    else:
                        letter = P.all_letters[index]
                        output_password += letter
                    input_tensor = P.passwordToInputTensor(letter).to(device)

            generate_list.append(output_password)

        return generate_list
예제 #3
0
 def test(self, password):
     tensor = P.passwordToPretrainTensor(password).float()
     tensor = self.embedding(tensor)
     hidden = torch.zeros(self.layers, 1, self.hidden_size).to(device)
     out, _ = self.gru(tensor, hidden)
     loss = self.h2o(out[:, -1, :])
     return loss
예제 #4
0
def get_real(strings_in, seq_len):
    real = torch.FloatTensor(BATCH_SIZE, MAX_LEN + 1,
                             CHARMAP_LEN).zero_().to(device)
    for i in range(BATCH_SIZE):
        # l = len(strings_in[i]) if len(strings_in[i]) <= seq_len else seq_len
        if len(strings_in[i]) <= seq_len:
            l = len(strings_in[i])
            real[i][l][CHARMAP_LEN - 1] = 1
        else:
            l = seq_len
        for j in range(l):
            real[i][j][P.letterToIndex(strings_in[i][j])] = 1

    #realreal = torch.zeros(0, MAX_LEN+1, CHARMAP_LEN).to(device)

    for i in range(1, seq_len):
        real = torch.cat(
            (real,
             torch.cat(
                 (real[:BATCH_SIZE, :i + 1, :],
                  torch.zeros(BATCH_SIZE, MAX_LEN - i, CHARMAP_LEN).scatter_(
                      -1,
                      torch.full([BATCH_SIZE, MAX_LEN - i, 1],
                                 CHARMAP_LEN - 1).long(), 1).to(device)),
                 dim=1)),
            dim=0)

    return real
    def pre_train(self, p):
        passwords = random.sample(p.passwords_string, PRE_GEN_ITERS)
        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.SGD(self.parameters(), lr=0.1)
        for password in passwords:
            index = [P.letterToIndex(letter) for letter in password]
            index.append(CHARMAP_LEN - 1)
            hidden = torch.rand(self.layers, 1, self.hidden_size).to(device)

            self.zero_grad()
            loss = torch.tensor(0,
                                dtype=torch.float32,
                                requires_grad=True,
                                device=device)
            for j in range(len(password)):
                tensor_in = torch.LongTensor(1, 1).zero_().to(device)
                tensor_in[0][0] = index[j]
                tensor_in = self.embedding(tensor_in)

                tensor_out, hidden = self.gru(tensor_in, hidden)
                tensor_out = self.h2o(tensor_out)

                expected_out = torch.tensor([index[j + 1]]).to(device)

                loss = loss + criterion(tensor_out[0], expected_out)

            loss.backward()
            optimizer.step()
예제 #6
0
    def pre_train(self, p, G):
        passwords = random.sample(p.passwords_string, PRE_DISC_ITERS)
        softmax = nn.LogSoftmax(dim=1)
        optimizer = torch.optim.SGD(self.parameters(), lr=0.1)
        for real in passwords:
            fake = G.generate_from(real[0])
            fake = P.passwordToPretrainTensor(fake).float()
            fake = self.embedding(fake)
            real = P.passwordToPretrainTensor(real).float()
            real = self.embedding(real)
            hidden = torch.zeros(self.layers, 1, self.hidden_size).to(device)

            fake_out, _ = self.gru(fake, hidden)
            real_out, _ = self.gru(real, hidden)
            fake_loss = self.h2o(fake_out[:, -1, :])[0][0]
            real_loss = -self.h2o(real_out[:, -1, :])[0][0]

            loss = fake_loss + real_loss
            loss.backward()
            optimizer.step()
예제 #7
0
    def generate_from(self, start_letter, max_length=MAX_LEN):
        input_tensor = P.passwordToInputTensor(start_letter)
        with torch.no_grad():
            hidden = torch.rand(self.layers, 1, self.hidden_size).to(device)
            output_password = start_letter

            for c in range(max_length):
                output, hidden = self.gru(self.embedding(input_tensor), hidden)
                output = self.h2o(output)
                output = output.view(1, -1)
                topv, topi = output.topk(1)
                topi = topi[0][0]
                if topi == CHARMAP_LEN - 1:
                    break
                else:
                    letter = P.all_letters[topi]
                    output_password += letter
                input_tensor = P.passwordToInputTensor(letter).to(device)

        return output_password
    def generatePassTensor(self, max_length = 18):
        start_letter = p.passwords_string[random.randint(0,len(p.passwords_string) - 1)][0]
        with torch.no_grad():
            input_tensor = P.passwordToInputTensor(start_letter).to(device)
            self.hidden = self.initHiddenZeros()
            password = start_letter

            for c in range(max_length):
                output = self(input_tensor[0])
                output = output.view(1,-1)
                topv, topi = output.topk(1)
                topi = topi[0][0]
                if topi == P.n_letters - 1:
                    break
                else:
                    letter = P.all_letters[topi]
                    password += letter
                input_tensor = P.passwordToInputTensor(letter).to(device)
        
        return P.passwordToInputTensor(password)
def toTensor(strings_in, seq_len, embedding):
        tensor_in = torch.LongTensor(BATCH_SIZE, seq_len+1).zero_().to(device) # +1 because every real pwd has an EOS 
        pwd_len = []
        for i in range(len(strings_in)):
            l = len(strings_in[i]) if len(strings_in[i]) <= seq_len else seq_len
            pwd_len.append(l)
            for j in range(l):
                tensor_in[i][j] = P.letterToIndex(strings_in[i][j])
            tensor_in[i][l] = CHARMAP_LEN - 1
        tensor_in = embedding(tensor_in)
        return nn.utils.rnn.pack_padded_sequence(tensor_in, pwd_len, batch_first=True, enforce_sorted=False)
예제 #10
0
def get_real(strings_in, seq_len):
    #init with all EOS
    real = torch.zeros(BATCH_SIZE, seq_len, CHARMAP_LEN).to(device)
    for i in range(BATCH_SIZE):
        # l = len(strings_in[i]) if len(strings_in[i]) <= seq_len else seq_len
        if len(strings_in[i]) <= seq_len - 1:
            l = len(strings_in[i])
        else:
            l = seq_len
        for j in range(l):
            real[i][j][P.letterToIndex(strings_in[i][j])] = 1
        for j in range(l, seq_len):
            real[i][j][CHARMAP_LEN - 1] = 1
    return real
def get_real(strings_in, seq_len):
    real = torch.LongTensor(BATCH_SIZE, seq_len + 1,
                            CHARMAP_LEN).zero_().to(device)
    real_sub = torch.FloatTensor(0, seq_len + 1,
                                 CHARMAP_LEN).zero_().to(device)
    for i in range(BATCH_SIZE):
        # l = len(strings_in[i]) if len(strings_in[i]) <= seq_len else seq_len
        if len(strings_in[i]) <= seq_len:
            l = len(strings_in[i])
            real[i][l][CHARMAP_LEN - 1] = 1
        else:
            l = seq_len
        for j in range(l):
            real[i][j][P.letterToIndex(strings_in[i][j])] = 1

    for i in range(1, seq_len + 1):
        real_sub = torch.cat(
            (real_sub,
             torch.cat(
                 (torch.zeros(BATCH_SIZE, seq_len - i, CHARMAP_LEN).to(device),
                  real[:, :i + 1, :].to(torch.float)),
                 dim=1)),
            dim=0)
    return real, real_sub