Exemple #1
0
def link_extract(query, number):
    try:
        print "Extracting URLs from Google for following dork: " + query
        discovery_id = id_generator()
        results = google_search(query,
                                GOOGLE_API_KEY,
                                GOOGLE_CSE_ID,
                                num=number)
        i = 0
        l = len(results)
        for result in results:
            printProgressBar(i,
                             l,
                             prefix='Progress:',
                             suffix='Complete',
                             length=50)
            filename = os.path.join(BASE_DIR, 'assets', 'discovered',
                                    'google_web_' + str(discovery_id) + '.txt')
            with open(filename, "a") as links:
                links.write(result['link'])
                links.write("\n")
            i += 1
            sys.stdout.write(
                printProgressBar(i,
                                 l,
                                 prefix='Progress:',
                                 suffix='Complete',
                                 length=50))
            time.sleep(0.1)
            sys.stdout.flush()
        print "\nFinished"
    except Exception as e:
        print "Link extraction failed! Probably your API limit exceeded"
        print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e),
              e)
Exemple #2
0
def censys_search(query, protocol):
    print "Extracting IPs for following query: " + query
    print "Please wait.."
    discovery_id = id_generator()
    pages = 2
    output_file = ""
    page = 1
    while page <= pages:
        print "Extracting IPs from page %s" % str(page)
        params = {'query': query, 'page': page}
        try:
            res = requests.post(CENSYS_API_URL + "/search/ipv4",
                                json=params,
                                auth=(CENSYS_UID, CENSYS_SECRET))
        except:
            print "Cannot communicate with Censys.io"
        payload = res.json()
        ip_list = []

        i = 0
        l = len(payload['results'])

        # Initial call to print 0% progress
        printProgressBar(i,
                         l,
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
        for r in payload['results']:
            ip = r["ip"]
            if ip in ip_list:
                continue
            ip_list.append(ip)
            output_file = get_output_file_by_scanner('censys', discovery_id,
                                                     protocol)
            try:
                with open(output_file, "a") as ips:
                    ips.write(ip)
                    ips.write("\n")
            except IOError:
                print "There is no such file: %s" % output_file
                return 0

            # Update Progress Bar
            i += 1
            sys.stdout.write(
                printProgressBar(i,
                                 l,
                                 prefix='Progress:',
                                 suffix='Complete',
                                 length=50))
            sleep(0.1)
            sys.stdout.flush()
        print ""
        if page == 1:
            pages = payload['metadata']['pages']
        page += 1
    print "Results saved under: %s" % output_file
    print "Finished"
    return 1
Exemple #3
0
    def mark(self, img, mask=None, img_name="Img"):
        reconstructed = np.zeros((img.shape[0], img.shape[1]))
        if img.max() > 1:
            img = img / 255
        if mask is not None:
            points = LearnData.get_possible_points(mask)
        else:
            points = [[x, y] for x in range(0, img.shape[0] - MASK_SIZE - 1)
                      for y in range(0, img.shape[1] - MASK_SIZE - 1)]
        total = len(points)
        printProgressBar(0,
                         total,
                         prefix='Progress (' + img_name + '):',
                         suffix='Complete',
                         length=self.progress_length)
        for i, p in enumerate(points):
            x, y = p
            centerX = x + int(MASK_SIZE / 2)
            centerY = y + int(MASK_SIZE / 2)
            sample = img[x:(x + MASK_SIZE), y:(y + MASK_SIZE)]
            sample = array(sample).reshape(MASK_SIZE, MASK_SIZE, 1)
            prediction = self.predict(sample)
            result = prediction.T[0]

            if result > MARK_ACCEPT_PROP:
                reconstructed[centerX][centerY] = 255
            else:
                reconstructed[centerX][centerY] = 0
            printProgressBar(i + 1,
                             total,
                             prefix='Progress (' + img_name + '):',
                             suffix='Complete',
                             length=self.progress_length)
        return array(reconstructed)
Exemple #4
0
def popModelsAndCheckQuery(modelStrings, query, lockR, lockW, validModels):
    tempDirPath = state.get('tempDirPath')
    tempFilePath = join(tempDirPath, uuid.uuid4().hex + '.las')
    totalNumOfModels = state.get('numOfInputModels')
    labelledModelIds = state.get('labelledModelIds')
    maxModelsAtOnce = MODELS_PER_PROC

    while True:
        currModels = list()
        lockR.acquire()
        if (not len(modelStrings)):
            lockR.release()
            return

        numOfModels = min(maxModelsAtOnce, len(modelStrings))
        # print(numOfModels)
        for idx in range(numOfModels):
            currModels.append(modelStrings.pop())
        # print(len(modelStrings))
        lockR.release()

        modelObjs = list(map(utils.computeModelObjFromModelStr, currModels))
        validCurrModels = getValidModels(modelObjs, query, tempFilePath)
        nonLabelledModels = list(
            filter(lambda m: m.modelId not in labelledModelIds,
                   validCurrModels))

        lockW.acquire()
        validModels.extend(nonLabelledModels)
        utils.printProgressBar(totalNumOfModels, numOfIterations=numOfModels)
        lockW.release()
Exemple #5
0
def get_slices(slice_len=512):
    """Slice audiofiles into slices of size 'slice_len'

    In addition to slicing, each slice is normalized between -1 and 1.

    :param slice_len: number of samples of one slice

    :return: returns a tuple containg:
                -a 2-D array with the shape (slice_len, num_slices) 
                 where num_slices is defined by slice_len and the number of files specified in the 
                 global variables section
                -a 2-D array with the shape (num_labels, num_slices) containg one-hot-encoded labels
    """

    features = None
    labels = None

    num_files = len(filenames)
    progress = 0

    printProgressBar(progress, num_files, prefix='Progress',
                     suffix='Complete', length=50)

    for f in filenames:

        # read file
        _, data = wav.read(f)
        data = data[:, 0]

        num_slices = len(data) // slice_len
        assert num_slices > 0, 'slice_len is to big'
        num_samples = num_slices * slice_len

        new_features = np.array(
            np.split(data[:num_samples], num_slices), dtype=np.float16)

        if features is None:
            features = new_features
        else:
            features = np.vstack((features, new_features))

        label = get_label(f)
        num_labels = np.shape(new_features)[0]
        new_labels = np.repeat(label, num_labels)

        if labels is None:
            labels = new_labels
        else:
            labels = np.append(labels, new_labels)

        progress += 1
        printProgressBar(progress, num_files, prefix='Progress',
                         suffix='Complete', length=50)

    for feature in features:
        feature_max = np.max(np.abs(feature))
        if feature_max != 0:
            feature /= feature_max

    return features, labels
Exemple #6
0
def create_dataframe_domain():
    """Creates a dataframe with the specified features 

    The specified features are extracted from the specified files and scaled using the StandardScaler.

    :return: pandas dataframe with the corresponding label as the last column
    """

    features = None
    labels = None

    num_files = len(filenames)
    progress = 0

    printProgressBar(progress, num_files, prefix='Progress',
                     suffix='Complete', length=50)

    for f in filenames:

        new_features = extract_features_file(f)
        if features is None:
            features = new_features
        else:
            features = np.vstack((features, new_features))

        label = get_label(f)
        num_labels = np.shape(new_features)[0]
        new_labels = np.repeat(label, num_labels)

        if labels is None:
            labels = new_labels
        else:
            labels = np.append(labels, new_labels)

        progress += 1
        printProgressBar(progress, num_files, prefix='Progress',
                         suffix='Complete', length=50)

    # scale data
    features_scaled = StandardScaler().fit_transform(features)

    df_features = pd.DataFrame(features_scaled)
    df_features.columns = feature_names

    # df['Label'] = labels NOTE: Old version simple string versions instead of one hot

    labels_enc = label_encoder.transform(labels)
    labels_one_hot = to_categorical(labels_enc)


    df_labels = pd.DataFrame(labels_one_hot)
    df_labels.columns = instruments

    assert len(df_features.index) == len(df_labels.index)

    df = pd.concat([df_features, df_labels], axis=1, join='inner')

    print(df.head())

    return df
Exemple #7
0
def dat_to_arrDict(filename):
    """
    Return dict from beamformer dat file. Each dict entry is an
    array of either width 1 (CW) or sweep-width with a buffer.

    Parameters
    ----------
    filename : string
    """
    global DATfile
    DATfile = filename

    dump = 8229
    # dataLength = 8192  # not needed, just interesting

    frequency = du.get_frequency()
    data_width, padding = du.get_dataWidth_padding()

    chan = 4096 - frequency  # channels start @ 0 @ 4096

    arrDict = {}

    antDict = du.get_antennaDict()

    with open(filename, 'rb') as f:
        current = 0

        while (current < os.path.getsize(filename)):
            str = f.read(dump)
            utils.printProgressBar(current + dump,
                                   os.path.getsize(filename),
                                   prefix='  Building arrDict:',
                                   length=50)

            topic, sts, data = str.split(b' ', 2)
            dummy, row, col = topic.split(b':')
            row = int(row)
            col = int(col)

            if row in antDict.keys() and col in antDict.keys():

                data = np.frombuffer(data, dtype=np.complex)
                chanData = np.zeros(
                    (1, data_width + 2 * padding)).astype(np.complex)
                chanData[0, :] = data[chan - padding:chan + data_width +
                                      padding]

                antStr = 'ant{}{}'.format(antDict[row], antDict[col])

                try:
                    arrDict[antStr]
                except KeyError:
                    arrDict[antStr] = np.zeros(
                        (0, data_width + 2 * padding)).astype(np.complex)

                arrDict[antStr] = np.append(arrDict[antStr], chanData, 0)

            current = current + dump

    return arrDict
Exemple #8
0
    def run(self):
        self.problem.net.train()
        for it in range(self.max_iter):
            ind1 = torch.randperm(self.problem.input.shape[0])
            self.problem.input[:] = self.problem.input[ind1,:,:,:]
            # np.random.shuffle(self.problem.input)
            for ib in range(self.num_batches):
                utils.printProgressBar(ib,self.num_batches)

                start = ib*self.batch_size
                end = (ib+1)*self.batch_size
                # batchIn = np.expand_dims(self.problem.input[start:end,0,:,:],axis=1)
                # batchTarget = np.expand_dims(self.problem.input[start:end,1,:,:],axis=1)
                batchIn = self.problem.input[start:end,0,:,:].unsqueeze(1)
                batchOut = self.problem.input[start:end,1,:,:].unsqueeze(1)

                self.optimizer.zero_grad()
                out = self.problem.forward(batchIn)
                loss = self.problem.loss(out,batchOut)
                self.loss[0,it] += loss.item() / self.num_batches
                loss.backward()
                self.optimizer.step()

            loss = self.problem.loss_valid()
            self.loss[1,it] = loss.item()

            print("Iteration %s: train_loss = %f, valid_loss = %f" % (it, self.loss[0,it], self.loss[1,it]))
            if(it % self.flush_res==0):
                torch.save(self.problem.net, self.flush_file + '%s' % (it))
                np.save('cnn_loss.npy',self.loss)
            self.lr_scheduler.step()
Exemple #9
0
def testNN():
    correct = 0
    traincorrect = 0
    for x in range(10000):
        utils.printProgressBar(x,
                               10000,
                               prefix='Progress:',
                               suffix='Complete',
                               length=50)
        predicted = nn.predict(test[x])
        trainpredict = nn.predict(train[x])
        actual = testY[x]
        trainact = trainY[x]
        # print("predicted: " + str(predicted) + " actual: " + str(actual))
        # image = Image.fromarray(testX[x])
        # image.show()
        # print("predicted: " + str(predicted) + " actual: " + str(actual))
        if predicted == actual:
            correct += 1
        # else:
        #     image = Image.fromarray(testX[x])
        #     image.show()
        #     print("predicted: " + str(predicted) + " actual: " + str(actual))
        if trainpredict == trainact:
            traincorrect += 1

    print("Training data accuracy: " + str(traincorrect / 100) + "%")
    print("Test data accuracy: " + str(correct / 100) + "%")
def testXOR():

    nn = NerualNetwork(2, 3, 1, LEARNING_RATE)

    # show a progress bar
    printProgressBar(0,
                     ITERATIONS,
                     prefix='Progress:',
                     suffix='Complete | 0 Iterations',
                     length=50)

    for i in range(ITERATIONS):

        printProgressBar(i + 1,
                         ITERATIONS,
                         prefix='Progress:',
                         suffix='Complete | ' + str(i + 1) + ' Iterations',
                         length=50)

        data = random.choice(training_data)
        nn.train(data['inputs'], data['targets'])

    print(nn.feedforward([0, 0]))
    print(nn.feedforward([0, 1]))
    print(nn.feedforward([1, 0]))
    print(nn.feedforward([1, 1]))
Exemple #11
0
    def test(self, test_loader):

        loss_total = 0
        all_p = []
        all_t = []
        length = len(test_loader)
        print('Testing')
        num = 0
        for inc, data in enumerate(test_loader):
            num += 1
            loss, predicted, true = self.model(
                data,
                std=self.std,
                mean=self.mean,
                C_SGEN_layers=self.C_SGEN_layers)
            all_p += list(predicted.flatten())
            all_t += list(true.flatten())
            loss_total += loss.to('cpu').data.numpy()

            printProgressBar(inc + 1,
                             length,
                             prefix='Progress',
                             suffix='Complete')

        loss_mean = loss_total / num
        RMSE = rms_score(all_t, all_p)
        return loss_mean, RMSE, all_p, all_t
Exemple #12
0
    def train(self, train_loader):
        loss_total = 0
        num = 0
        print('Training')
        all_p = []
        all_t = []
        length = len(train_loader)
        for inc, data in enumerate(train_loader):
            num += 1
            self.optimizer.zero_grad()
            loss, pred, true = self.model(data,
                                          C_SGEN_layers=self.C_SGEN_layers)

            all_p += list(pred.flatten())
            all_t += list(true.flatten())

            loss.backward()
            self.optimizer.step()
            loss_total += loss.to('cpu').data.numpy()

            printProgressBar(inc + 1,
                             length,
                             prefix='Progress',
                             suffix='Complete')

        loss_mean = loss_total / num
        return loss_mean, all_p, all_t
def BoostDataset(X, y, n_samples=0):
    # Techniques from
    # https://medium.com/@thimblot/data-augmentation-boost-your-image-dataset-with-few-lines-of-python-155c2dc1baec

    orig_shape = X.shape[0]
    index = orig_shape
    print("Boosting Dataset...")
    for i in range(n_samples):
        if i % 5 == 0 or i + 1 == n_samples:
            u.printProgressBar(i + 1, n_samples)
        num_sample = random.randint(0, orig_shape)
        patch = X[num_sample, :, :, :]
        #print(patch.shape)
        num = random.randint(0, 4)
        if (num == 0):
            new_patch = np.flipud(patch)

        if (num == 1):
            new_patch = np.fliplr(patch)

        if (num == 2):
            new_patch = sk.util.random_noise(patch)

        if (num == 3 or num == 4):
            random_degree = random.uniform(-25, 25)
            new_patch = sk.transform.rotate(patch, random_degree)

        #print(new_patch.shape)
        #time.sleep(5)

        X = np.append(X, [new_patch], axis=0)
        y = np.append(y, y[num_sample])

    return X, y
Exemple #14
0
    def run_cycles(self, should_print=False):
        for i in range(self.cycles):
            new_cylce_grid = [[[['.' for k in range(self.size)]
                                for j in range(self.size)]
                               for i in range(self.size)]
                              for i in range(self.size)]

            for w in range(1, self.size - 1):
                for z in range(1, self.size - 1):
                    for x in range(1, self.size - 1):
                        for y in range(1, self.size - 1):
                            if self.grid[w][z][y][
                                    x] == '#' and not self.count_neighbours(
                                        x, y, z, w) in [2, 3]:
                                new_cylce_grid[w][z][y][x] = '.'
                            elif self.grid[w][z][y][
                                    x] == '.' and self.count_neighbours(
                                        x, y, z, w) == 3:
                                new_cylce_grid[w][z][y][x] = '#'
                            else:
                                new_cylce_grid[w][z][y][x] = self.grid[w][z][
                                    y][x]
                printProgressBar(i * (self.size - 1) + w,
                                 (self.cycles) * (self.size - 1), 'Progress:')
            self.grid = deepcopy(new_cylce_grid)
        printProgressBar(100, 100, 'Progress:')
        count = 0
        for w in range(1, self.size - 1):
            for z in range(1, self.size - 1):
                for x in range(1, self.size - 1):
                    for y in range(1, self.size - 1):
                        if self.grid[w][z][y][x] == '#':
                            count += 1
        return count
Exemple #15
0
def fg(model, x, y, mask, target):

    x_adv = np.zeros(x.shape, dtype=np.float32)
    grad_fn = gradient_fn(model)

    for i, x_in in enumerate(x):
        utils.printProgressBar(i + 50,
                               100,
                               prefix='Progress ITERATIVE TARGET ATTACK:',
                               suffix='Complete',
                               length=50)

        if target == True:
            grad = -1 * grad_fn([x_in.reshape(INPUT_SHAPE), y[i], 0])[0][0]
        else:
            grad = grad_fn([x_in.reshape(INPUT_SHAPE), y[i], 0])[0][0]

        mask_rep = np.repeat(mask[i, :, :, np.newaxis], N_CHANNEL, axis=2)
        grad *= mask_rep

        try:
            grad /= np.linalg.norm(grad)
        except ZeroDivisionError:
            raise

        x_adv[i] = x_in + grad * 3.5

    x_adv = np.clip(x_adv, 0, 1)

    return x_adv
Exemple #16
0
def train(epoch):
    model.train()
    print("({}) LR: {:.2e}".format(epoch, optimizer.param_groups[0]['lr']))
    train_batch_loss = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)

        optimizer.zero_grad()
        loss.backward()

        # 2: Do optimization step
        optimizer.step()

        # 3: Make sure to clip weights / biases between -1, 1.
        model.back_clamp()

        train_batch_loss = 0.9 * train_batch_loss + 0.1 * loss.data[0]
        printProgressBar((batch_idx + 1) * len(data),
                         len(train_loader.dataset),
                         length=50)
        # if batch_idx % args.log_interval == 0:
        #     print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
        #         epoch, batch_idx * len(data), len(train_loader.dataset),
        #         100. * batch_idx / len(train_loader), loss.data[0]))

    # Decay LR
    optimizer.param_groups[0]['lr'] *= LR_decay
Exemple #17
0
def process_all():
    output_file = 'stat.txt'
    output_f = open(output_file, 'w')

    # get processed
    tab = pd.read_table('processed.txt', header=None, names=['id'])
    processed = {k: 1 for k in tab['id'].values}
    g = open('processed.txt', 'a')

    f = open('../../../chain_seq.json')
    s = f.read()
    dump = json.loads(s)
    f.close()

    keys = dump.keys()
    for i in range(len(keys)):
        pdbID = keys[i]

        if pdbID in processed.keys(): continue
        # pdbID = '4FF1'
        print(pdbID)
        # pdbID = '4BB9'
        # if pdbID in ['3SX7']: continue

        input = '../../../pdb/%s.pdb.gz' % pdbID
        process_pdb_file(input, output_f)

        g.write(pdbID + '\n')

        printProgressBar(i, len(keys), length=50, fill=chr(219))
        # break
    output_f.close()
    g.close()
def iterative(model, x, y, mask, target):
    x_adv = np.zeros(x.shape, dtype=np.float32)
    grad_fn = gradient_fn(model)

    for i, x_in in enumerate(x):

        utils.printProgressBar(i+50, 100, prefix = 'Progress ITERATIVE TARGET ATTACK:', suffix = 'Complete', length = 50)

        x_cur = np.copy(x_in)
        mask_rep = np.repeat(mask[i, :, :, np.newaxis], N_CHANNEL, axis=2)

        for _ in range(60):
            
            if target == True:
                grad = -1 * gradient_input(grad_fn, x_cur, y[i])
            else:
                grad = gradient_input(grad_fn, x_cur, y[i])

            try:
                grad /= np.linalg.norm(grad)
            except ZeroDivisionError:
                raise

            grad *= mask_rep

            x_cur += grad * 0.09
            x_cur = np.clip(x_cur, 0, 1)
        
        x_adv[i] = np.copy(x_cur)

    return x_adv
def verify_experiment(ex, params=None):
	std_dev_percent = get_std_dev_percent(ex)
	print('Verifying ' + str(std_dev_percent) + '% training noise')

	if params is None:
		params = get_best_params(ex)

	test_percentages = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
	# test_percentages = [22, 24, 26, 28, 30]

	results = {}

	for test_percentage in test_percentages:
		print('Testing on ' + str(test_percentage) + '% test noise')
		successful = []
		fails = []

		variation_paths = generate_variations(ex['default_morphology'], test_percentage)

		test_id = 0
		for path in variation_paths:
			printProgressBar(test_id, 100, prefix = 'Progress:', suffix = 'Complete', length = 50)
			succes, st, d, ec, ah, sh = sim.evaluate(path, False, params, [], False, False)
			if succes and d > 0:
				successful.append((d/10, ec/10))
			else:
				fails.append((d/10, ec/10, st))
			test_id += 1
		printProgressBar(test_id, 100, prefix = 'Progress:', suffix = 'Complete', length = 50)
		print()
		results[test_percentage] = {'successful': successful, 'fails': fails}

	return (std_dev_percent, results)
Exemple #20
0
def iterate_rows(rows):
    mashup = AudioSegment.empty()
    ding = AudioSegment.from_mp3("ding_sound.mp3")
    less_loud_ding = ding - 20
    half_second_of_silence = AudioSegment.silent(duration=500)

    i = 0
    l = len(rows)
    # Initial call to print 0% progress
    utils.printProgressBar(i,
                           l,
                           prefix='Progress:',
                           suffix='Complete',
                           length=50)

    for row in rows:
        word_file_1 = generate_sound_file(row[0].value, lang_from)
        word_file_2 = generate_sound_file(row[1].value, lang_to)
        word1 = AudioSegment.from_mp3(word_file_1)
        word2 = AudioSegment.from_mp3(word_file_2)
        #first - unknown lang, second - unknown lang
        mashup = mashup + word2 + half_second_of_silence + word1 + less_loud_ding
        os.remove(word_file_1)
        os.remove(word_file_2)
        i += 1
        utils.printProgressBar(i,
                               l,
                               prefix='Progress:',
                               suffix='Complete',
                               length=50)

    return mashup
Exemple #21
0
def resumeTraining():
    with open('models/model1.pkl', 'rb') as input:
        nn = pickle.load(input)

    # show a progress bar
    printProgressBar(0,
                     ITERATIONS,
                     prefix='Progress:',
                     suffix='Complete | 0 Iterations',
                     length=50)

    for i in range(nn.iterations, ITERATIONS):

        printProgressBar(i + 1,
                         ITERATIONS,
                         prefix='Progress:',
                         suffix='Complete | ' + str(i + 1) + ' Iterations',
                         length=50)
        index = randrange(5999)
        inputs = [x / 255 for x in data[0][index]]

        targets = data[1][index]
        t = []
        for j in range(10):
            if (j == targets):
                t.append(1)
            else:
                t.append(0)
        nn.train(inputs, t)
        if keyboard.is_pressed('x'):
            break

    with open('models/model1.pkl', 'wb') as output:

        pickle.dump(nn, output, pickle.HIGHEST_PROTOCOL)
def censys_search(query, protocol):
    print "Extracting IPs for following query: " + query
    print "Please wait.."
    discovery_id = id_generator()
    pages = 2
    output_file = ""
    page = 1
    while page <= pages:
        try:
            print "Extracting IPs from page %s" % str(page)
            params = {'query': query, 'page': page}
            try:
                res = requests.post(CENSYS_API_URL + "/search/ipv4", json=params, auth=(CENSYS_UID, CENSYS_SECRET))
            except:
                print "Cannot communicate with Censys.io"
                return   
            payload = res.json()
            ip_list = []
            if 'results' in payload.keys():
                i = 0
                l = len(payload['results'])

                # Initial call to print 0% progress
                printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50)
                for r in payload['results']:
                    ip = r["ip"]
                    if ip in ip_list:
                        continue
                    ip_list.append(ip)
                    output_file = get_output_file_by_scanner('censys', discovery_id, protocol)
                    try:
                        with open(output_file, "a") as ips:
                            ips.write(ip)
                            ips.write("\n")
                    except IOError:
                        print "There is no such file: %s" % output_file
                        return 0
                    except Exception as e:
                        print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e)
                        return 0
                        

                    # Update Progress Bar
                    i += 1
                    sys.stdout.write(printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50))
                    sleep(0.1)
                    sys.stdout.flush()
                print ""
                if page == 1:
                    pages = payload['metadata']['pages']
                page += 1
            else:
                print "Can not communicate with Censys"
                return 0
        except KeyboardInterrupt:
            break
    print "Results saved under: %s" % output_file
    print "Finished"
    return 1
Exemple #23
0
def popModelsAndClassify(modelsStrings, labelsFile, labelsCounter, lockR,
                         lockW):
    tempDirPath = state.get('tempDirPath')
    tempFilePath = join(tempDirPath, uuid.uuid4().hex + '.las')
    output = state.get('classifOutput')
    totalNumOfModels = state.get('numOfInputModels')
    maxModelsAtOnce = MODELS_PER_PROC

    while True:
        currModels = list()
        lockR.acquire()
        if (not len(modelsStrings)):
            lockR.release()
            return

        numOfModels = min(maxModelsAtOnce, len(modelsStrings))
        # print(numOfModels)
        for idx in range(numOfModels):
            currModels.append(modelsStrings.pop())
        # print(len(modelsStrings))
        lockR.release()

        modelObjs = list(map(utils.computeModelObjFromModelStr, currModels))
        labelPredsForModels = utils.computeLabelPredsForModels(
            modelObjs, tempFilePath)
        modelLabelsMap = utils.getModelLabelsMap(labelPredsForModels)

        lockW.acquire()

        output = state.get('classifOutput')
        for label in labelPredsForModels:
            output += label + '.\n'
        state.set('classifOutput', output)

        for model in list(modelLabelsMap.keys()):
            if len(modelLabelsMap[model]) == 1:
                labelsCounter[modelLabelsMap[model][0]] += 1
            else:
                labelsCounter[MULTIPLE_LABELS_STRING] += 1

        labelsCounter[NO_LABEL_STRING] += numOfModels - len(
            list(modelLabelsMap.keys()))

        for model in modelObjs:
            mId = model.modelId
            if (mId not in list(modelLabelsMap.keys())):
                noLabelMustLabels = state.get(
                    'mustLabelModels')[NO_LABEL_STRING]
                if (len(noLabelMustLabels) < MUST_LABEL_SIZE):
                    noLabelMustLabels.append(model)
            elif (len(modelLabelsMap[mId]) > 1):
                multipleLabelsMustLabels = state.get(
                    'mustLabelModels')[MULTIPLE_LABELS_STRING]
                if (len(multipleLabelsMustLabels) < MUST_LABEL_SIZE):
                    multipleLabelsMustLabels.append(model)

        utils.printProgressBar(totalNumOfModels, numOfIterations=numOfModels)
        lockW.release()
Exemple #24
0
def main(argv):
    team = argv[0] if argv[0] != 'overall' else {'$exists': True}
    teamName = argv[0] if argv[0] != 'overall' else 'overall'

    print('Connecting to Mongo')

    client = pymongo.MongoClient(os.getenv('MONGO_URI'),
                                 ssl_cert_reqs=ssl.CERT_NONE)
    collection = client.epilog.data

    print('fetching metadata')

    doc_count = collection.estimated_document_count()

    print('loading all data from database')

    printProgressBar(0,
                     doc_count,
                     prefix='Progress:',
                     suffix='Complete',
                     length=50)

    data = defaultdict(def_value)

    index = 0
    for doc in collection.find({
            'experimentLabel': team,
            'x': {
                '$exists': True
            },
            'y': {
                '$exists': True
            },
            'z': {
                '$exists': True
            }
    }).sort('time', pymongo.ASCENDING):
        printProgressBar(index,
                         doc_count,
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
        index = index + 1

        x = int(doc['x'])
        y = int(doc['z'])

        data[x, y] += 1

    data = sorted(data.items(), key=lambda key: key[0])

    print('')

    print('Writing data to file')
    writeToFile(data, teamName, 'csv')

    return 0
Exemple #25
0
def main():
    global noGuess

    conn = sqlite3.connect('out/data.db')

    prepareDatabase(conn)

    rawItems = conn.cursor().execute("SELECT path,id,link,owner FROM file WHERE type='file'").fetchall()
    parsedItems = []

    coursesIdByName = conn.cursor().execute(f"SELECT name,id FROM course")
    coursesIdByName = dict(coursesIdByName)

    for i in range(0, len(rawItems)):
        printProgressBar(i+1, len(rawItems), prefix = 'Progress:', suffix = 'Complete', length = 50)

        rawItem = rawItems[i]

        parsedItem = {
            'path': unidecode(rawItem[0].lower()), 'id': rawItem[1], 'link': rawItem[2], 'owner': rawItem[3],
            'types': None, 'courses': None, 'date': None
        }

        parseTypes(parsedItem)

        parseCourses(parsedItem, coursesIdByName)

        parseDate(parsedItem)

        parsedItems.append(parsedItem)

        itemName = parsedItem['path'].split('/')[-1]

        conn.cursor().execute(
            "INSERT INTO parsed_content(name,id,link,owner,date) VALUES(?,?,?,?,?)"
            +" ON CONFLICT(id) DO NOTHING",
            (itemName, parsedItem['id'], parsedItem['link'], parsedItem['owner'], parsedItem['date'])
        )

        for type in parsedItem['types']:
            conn.cursor().execute(
                "INSERT INTO parsed_content_type VALUES(?,?)"
                +" ON CONFLICT(content_id, type) DO NOTHING",
                (parsedItem['id'], type)
            )

        for course in parsedItem['courses']:
            conn.cursor().execute(
                "INSERT INTO parsed_content_course VALUES(?,?)"
                +" ON CONFLICT(content_id, course_id) DO NOTHING",
                (parsedItem['id'], course)
            )

        conn.commit()

    print(f"No guesses {noGuess}")
Exemple #26
0
def download_image_file(meta):
    download_image(meta)

    path, dirs, files = next(os.walk(base_path + 'images/logs'))
    file_count = len(files)

    printProgressBar(file_count,
                     14653,
                     prefix='Progress:',
                     suffix='Complete',
                     decimals=2,
                     length=50)
Exemple #27
0
def detectCirclesVideo(videoPath, initialFrame=0, lastFrame='max', thresh=20, display_intermediate_steps=False, opening_kernel=5):
    if lastFrame=='max':
        # Find number of frames in the video
        v = pims.Cine(videoPath)
        lastFrame = v.len()-1
    #TODO: falta rellenar la columna size (con el radio medio detectado en los primeros 10 frames por ejemplo)
    # We first create an empty dataframe to store the circles in the correct format
    A = pd.DataFrame(np.zeros((1, 2), dtype=np.float64), index=('-1',), columns=('x', 'y'))
    B = pd.DataFrame(np.full((1, 1), 0, dtype=np.int64), index=('-1',), columns=('frame',))
    C = pd.DataFrame(np.full((1, 1), 0, dtype=np.float64), index=('-1',), columns=('size',))
    circles_tp = pd.concat((A, C, B), axis=1)
# =============================================================================
#     try:
#         meanRadius = findMeanRadius(videoPath, n_frames=10)
#     except:
#         meanRadius = 30    
# =============================================================================
    meanRadius = 29 
    video = cv2.VideoCapture(videoPath)
    
    n = 1 # Simple acumulador, para llevar la cuenta de por cual frame voy
    while(video.isOpened()):
        # Leemos el frame actual y lo asignamos a la variable frame
        frameExists, frame = video.read()
        
        if n<initialFrame+1:
            n+=1
            pass
        elif n>lastFrame+1:
            break
        else:
            # Detect circles for current frame and append them to the general dataframe
            new_circles = alternative_detectCirclesImage(frame, frame_number=n, meanRadius=meanRadius, 
                                             display_intermediate_steps=display_intermediate_steps, thresh=thresh)
            
# =============================================================================
#             new_circles = detectCircles_watershed(frame, frame_number=n, meanRadius=meanRadius, 
#                                                   display_intermediate_steps=display_intermediate_steps, thresh=thresh)
# =============================================================================
            
            circles_tp = pd.concat((circles_tp, new_circles), axis=0)
            n+=1
    
        printProgressBar(n, lastFrame+2-initialFrame, prefix='Detecting particles:', suffix='frames searched')
    # Cerramos el stream de video
    video.release()
    # We delete the first row of circles_tp, since it was only used for 
    # initialization and is no longer needed.
    circles_tp = circles_tp.drop('-1')
    #TODO: Reniciar indexes
    circles_tp = circles_tp.reset_index(drop=True)
    
    return circles_tp
Exemple #28
0
 def generateProgressBar(self):
     sleepTime = 0.5  ### in seconds(Using variable to manage speeds ###
     prevDoneSize = 0
     while True:
         #print(str(self.donesize)+str(self.fragsize))
         curDoneSize = sum(self.donesize)
         utils.printProgressBar(curDoneSize * 100.0 / self.length,
                                speed=(curDoneSize - prevDoneSize) /
                                sleepTime / 1024)
         if self.donesize == self.fragsize:
             break
         time.sleep(sleepTime)
         prevDoneSize = curDoneSize
Exemple #29
0
def computeAllModelObjects(models):
    allModels = list()
    utils.initProgressBar()
    canPrintProgressBar = state.get('prenamedComponents') or (
        not state.get('nameComponents'))

    for modelStr in models:
        newModelObj = utils.computeModelObjFromModelStr(modelStr)
        allModels.append(newModelObj)
        if canPrintProgressBar:
            utils.printProgressBar(len(models))

    return allModels
Exemple #30
0
 def update(pltStart):
     utils.printProgressBar(pltStart, numIndexes, prefix='  Streamed:', length=50)
     plt.cla()
     zValues = np.repeat(0, pltLen)  # Reference phase
     for key in plotDictSortedKeys:  # Array of phases
         if key[0] != 'd':
             zValues = np.append(zValues, np.angle(
                 plotDict[key][pltStart:pltStart+pltLen])*180/np.pi)
     t = np.linspace(pltStart, pltStart+pltLen, pltLen)*timeStep
     yValues = np.hstack((t, t, t, t))  # Array of times
     ax.set_zlim(-180, 180)
     ax.plot_trisurf(xValues, yValues, zValues, cmap=cm.inferno,
                     linewidth=0)
     return ax
def send_command_ssh(discovery_id, command, credentials_file=None):
    if credentials_file is None:
        if discovery_id:
            try:
                credentials_files = os.path.join(BASE_DIR, 'assets',
                                                 'compromised',
                                                 '*%s.txt' % discovery_id)
                protocols, cf = get_possible_protocols_files(credentials_files)
                credentials_file = cf[0]
            except Exception as e:
                print "There is no such discovery id!!"
                print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno),
                      type(e), e)
                return 0
        else:
            return 0
    try:

        with open(credentials_file, "r") as credentials:
            lines = [line.strip() for line in credentials if line.strip()]
            i = 0
            l = len(lines)
            for line in lines:
                printProgressBar(i,
                                 l,
                                 prefix='Progress:',
                                 suffix='Complete',
                                 length=50)
                tokens = line.split(":")
                ip = tokens[0]
                username = tokens[1]
                password = tokens[2]
                ssh = paramiko.SSHClient()
                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                ssh.connect(ip, username=username, password=password)
                #print "Sending command to: ", ip, "....."
                ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
                i += 1
                sys.stdout.write(
                    printProgressBar(i,
                                     l,
                                     prefix='Progress:',
                                     suffix='Complete',
                                     length=50))
                time.sleep(0.1)
                sys.stdout.flush()
            print "\nFinished"
    except IOError:
        print "There is no such file: %s" % credentials_file
Exemple #32
0
def link_extract(query, number):
    try:
        print "Extracting URLs from Google for following dork: " + query
        discovery_id = id_generator()
        results = google_search(query, GOOGLE_API_KEY, GOOGLE_CSE_ID, num=number)
        i = 0
        l = len(results)
        for result in results:
            printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50)
            filename = os.path.join(BASE_DIR, 'assets', 'discovered', 'google_web_' + str(discovery_id) + '.txt')
            with open(filename, "a") as links:
                links.write(result['link'])
                links.write("\n")
            i += 1    
            sys.stdout.write(printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50))
            time.sleep(0.1)
            sys.stdout.flush()    
        print "\nFinished"
    except Exception as e:
        print "Link extraction failed! Probably your API limit exceeded"
        print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e)
def shodan_search(query, protocol):
    print "Extracting IPs for following query: " + query
    print "Please wait.."
    discovery_id = id_generator()
    api = shodan.Shodan(SHODAN_API_KEY)
    try:
        results = api.search(query)
    except:
        print "Cannot communicate with Shodan.io"    
        return 0
    ip_list = []
    output_file = ''
    i = 0
    l = len(results['matches'])
    for result in results['matches']:
        try:
            # Initial call to print 0% progress
            printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50)
            ip = result['ip_str']
            if ip in ip_list:
                continue
            ip_list.append(ip)
            output_file = get_output_file_by_scanner('shodan', discovery_id, protocol)
            try:
                with open(output_file, "a") as ips:
                    ips.write(ip)
                    ips.write("\n")
            except IOError:
                print "There is no such file: %s" % output_file
                return 0
            i += 1
            sys.stdout.write(printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50))
            sleep(0.1)
            sys.stdout.flush()    
        except KeyboardInterrupt:
            break
    print "\nResults saved under: %s" % output_file
    print "Finished"
    return 1
Exemple #34
0
def send_command_ssh(discovery_id, command, credentials_file=None):
    if credentials_file is None:
        if discovery_id:
            try:
                credentials_files = os.path.join(BASE_DIR, 'assets', 'compromised', '*%s.txt' % discovery_id)
                protocols, cf = get_possible_protocols_files(credentials_files)
                credentials_file = cf[0]
            except Exception as e:
                print "There is no such discovery id!!"
                print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e)
                return 0
        else:
            return 0
    try:
       
        with open(credentials_file, "r") as credentials:
            lines = [line.strip() for line in credentials if line.strip()]
            i = 0
            l = len(lines)
            for line in lines:
                printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50)
                tokens = line.split(":")
                ip = tokens[0]
                username = tokens[1]
                password = tokens[2]
                ssh = paramiko.SSHClient()
                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                ssh.connect(ip, username=username, password=password)
                #print "Sending command to: ", ip, "....."
                ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
                i += 1 
                sys.stdout.write(printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50))
                time.sleep(0.1)
                sys.stdout.flush()
            print "\nFinished"
    except IOError:
        print "There is no such file: %s" % credentials_file
Exemple #35
0
def rectification(to_process, configuration):
    n = len(to_process)
    rootfly_foldername = get_to_rootfly_foldername()
    
    for k,(tube,date,last_session,image_name,filename) in enumerate(to_process):
        #print tube,date,image_name,filename
        
        year = int(date[0:4])
        month = int(date[4:6])
        day = int(date[6:8])
        
        #extract window number from name
        frame_part = image_name.split("-")[1]
        windows_in_image = int(frame_part.split(".")[0])
        
        im = cv2.imread(filename)
        rectified, circles, matches = rectify(im,configuration.rectify.iterations,pad=configuration.rootfly.pad)
        
        h, w, colors = im.shape
        # check if resize is needed
        if configuration.rectify.image_width != w or configuration.rectify.image_height != h:
            rectified = cv2.resize(rectified, (configuration.rectify.image_width, configuration.rectify.image_height))

        #check if tube folder exists
        outputfolder = os.path.join(rootfly_foldername,tube)
        if not os.path.exists(outputfolder):
            os.mkdir(outputfolder)

        out_filename_template = os.path.join(configuration.rootfly.to_rootfly_path_template,configuration.rootfly.template)
        out_filename = utils.get_rootfly_filename(out_filename_template,tube,windows_in_image,year,month,day,last_session+1)
        
        cv2.imwrite(out_filename, rectified)

        utils.printProgressBar(k,n)

    utils.printProgressBar(n,n)
Exemple #36
0
        for filename in ret:
            cols = filename.split("/")
            list_to_process += [(tube,date,cols[-1],filename)]

    n = len(list_to_process)
    logging.info("Number of images to copy: [%d]",n)

    for k,(tube,date,filename,fullpath) in enumerate(list_to_process):
        #copy window image to destination folder
        pathname = configuration.rootfly.to_copy_from.format(tube=tube,date=date,year=date[0:4])
        #print(k,tube,date,filename,fullpath)
        
        inputfilename = fullpath
        destination = configuration.rootfly.to_rectify_path_template.format(tube=tube,date=date,year=date[0:4])
        #create folder if is needed
        try:
            os.makedirs(destination)
        except:
            pass
            
        shutil.copyfile(inputfilename,os.path.join(destination,filename))
        
        #copy_images(images, tube, date, outputfolder, configuration)
        #origin = images, tube, date, outputfolder, configuration
        
        utils.printProgressBar(k,n)
        
    if n > 0: utils.printProgressBar(n,n)

    logging.info("All images have been copied to: [%s]", get_to_rectify_foldername())