Beispiel #1
0
    def __init__(self, width, height, recursiondepth):
        """Constructor for the Generator class.

        Caps the desired space to a size that will
        fit the algorithm.
        """
        pg.init()
        # STEP 1: Limit the map fit the necessary specs
        self.width_px = round_down(width, TILESIZE)
        self.height_px = round_down(height, TILESIZE)
        if not (self.width_px//TILESIZE) % 2:
            self.width_px += TILESIZE
        if not (self.height_px//TILESIZE) % 2:
            self.height_px += TILESIZE
        print(f"Width: {self.width_px} Height: {self.height_px}")
        self.rec_depth = recursiondepth
        min_space_in_px = min(self.width_px//ROOMFRAC, self.width_px//ROOMFRAC)
        # minimum number of tiles that the side of a space can have.
        self.min_space = round_down(min_space_in_px//TILESIZE, TILESIZE)
        # Setup the screen etc.
        self.screen = pg.display.set_mode((self.width_px, self.height_px))
        self.width_tiles = self.width_px//TILESIZE
        self.height_tiles = self.height_px//TILESIZE
        print(f"Width: {self.width_tiles} Height: {self.height_tiles}")
        pg.display.set_caption("Dungeon Generator")
        self.screen.fill(pg.color.THECOLORS["black"])
        self.done = False
Beispiel #2
0
 def slicing(self, start, distance, padding):
     """Calculate a spot to do a slice along
     the given distance. Return number of the tile."""
     if random.randint(0, 1):
         slicing = start + (distance // 2) - padding
     else:
         slicing = start + (distance // 2) + padding
     return round_down(slicing, 1)
Beispiel #3
0
import glob
from utils import round_down

segment_size = 500
fileCutoff = 4500

X = []
X_labels = np.array([])

# Grab all the waveforms
files = glob.glob("/home/nsbruce/RFI/data/waveforms/*")

for file in files[:fileCutoff]:
    data = np.fromfile(file)
    # Get length that rounds to nearest multiple of segment size
    new_len = round_down(len(data), segment_size)
    if new_len > 0:
        data = data[:new_len]
        i = 0
        while len(data) - i > 0:
            X.append(data[i:i + segment_size])
            i += segment_size
            X_labels = np.append(X_labels, str(file))
    # print("File: {}, len(X): {}, len(X_labels): {}".format(file, len(X), len(X_labels)))

X = np.array(X)
print("Training/testing | X.shape: {}, X_labels.shape: {}".format(
    X.shape, X_labels.shape))

np.save('/home/nsbruce/RFI/data/training_testing_waveforms.npy', X)
np.save('/home/nsbruce/RFI/data/training_testing_waveform_labels.npy',
Beispiel #4
0
def predict(model, test, test_targets, test_coords, test_shape, input_shape,
            vnet, bayesian, batch_size, mc_samples, num_percentiles):
    """Uses given model to predict on test data."""

    # Ensures MC samples is divisible by batch size.
    if mc_samples < batch_size:
        raise ValueError("Not enough MC samples.")
    old_mc_samples = mc_samples
    mc_samples = round_down(old_mc_samples, batch_size)
    if old_mc_samples != mc_samples:
        print("MC samples rounded from {} to {}".format(
            old_mc_samples, mc_samples))

    # Initializes prediction variables.
    sigmoid = None
    percentiles = None
    scale = 100 / (num_percentiles - 1)
    percentile_points = [scale * k for k in range(num_percentiles)]

    if vnet:
        # Initializes V-Net specific prediction variables.
        sigmoid = np.zeros(test_shape)
        counts = np.zeros(test_shape)
        percentiles = [np.zeros(test_shape) for i in range(num_percentiles)]

        # Predicts on individual chunks.
        print()
        for i, (chunk, coords) in enumerate(zip(test, test_coords)):
            print("Chunk {}/{}".format(i + 1, test.shape[0]))
            chunk_samples = np.empty((mc_samples, ) + input_shape)

            chunk = np.expand_dims(chunk, axis=0)
            batch = np.repeat(chunk, batch_size, axis=0)

            # Performs Monte Carlo sampling.
            for j in range(0, mc_samples, batch_size):
                chunk_samples[j:j + batch_size] = model.predict_on_batch(batch)

            # Discards poor edge predictions.
            # I use 5% but this can be changed.
            trimmed_shape = input_shape
            border1 = ceil(input_shape[0] * 0.05)
            border2 = ceil(input_shape[1] * 0.05)
            border3 = ceil(input_shape[2] * 0.05)

            # Checks edge cases on edge discarding.
            # For example, we don't want to throw away an edge
            # if it is the very edge of the volume, because that
            # edge may only get predicted on once.
            if coords[0] != 0 and coords[0] != test_shape[0] - input_shape[0]:
                chunk_samples = chunk_samples[:, border1:-border1, :, :, :]
                coords = [coords[0] + border1, coords[1], coords[2]]
                trimmed_shape = [
                    trimmed_shape[0] - (2 * border1), trimmed_shape[1],
                    trimmed_shape[2], 1
                ]
            elif coords[0] != 0:
                chunk_samples = chunk_samples[:, border1:, :, :, :]
                coords = [coords[0] + border1, coords[1], coords[2]]
                trimmed_shape = [
                    trimmed_shape[0] - border1, trimmed_shape[1],
                    trimmed_shape[2], 1
                ]
            elif coords[0] != test_shape[0] - input_shape[0]:
                chunk_samples = chunk_samples[:, :-border1, :, :, :]
                trimmed_shape = [
                    trimmed_shape[0] - border1, trimmed_shape[1],
                    trimmed_shape[2], 1
                ]

            if coords[1] != 0 and coords[1] != test_shape[1] - input_shape[1]:
                chunk_samples = chunk_samples[:, :, border2:-border2, :, :]
                coords = [coords[0], coords[1] + border2, coords[2]]
                trimmed_shape = [
                    trimmed_shape[0], trimmed_shape[1] - (2 * border2),
                    trimmed_shape[2], 1
                ]
            elif coords[1] != 0:
                chunk_samples = chunk_samples[:, :, border2:, :, :]
                coords = [coords[0], coords[1] + border2, coords[2]]
                trimmed_shape = [
                    trimmed_shape[0], trimmed_shape[1] - border2,
                    trimmed_shape[2], 1
                ]
            elif coords[1] != test_shape[1] - input_shape[1]:
                chunk_samples = chunk_samples[:, :, :-border2, :, :]
                trimmed_shape = [
                    trimmed_shape[0], trimmed_shape[1] - border2,
                    trimmed_shape[2], 1
                ]

            if coords[2] != 0 and coords[2] != test_shape[2] - input_shape[2]:
                chunk_samples = chunk_samples[:, :, :, border3:-border3, :]
                coords = [coords[0], coords[1], coords[2] + border3]
                trimmed_shape = [
                    trimmed_shape[0], trimmed_shape[1],
                    trimmed_shape[2] - (2 * border3), 1
                ]
            elif coords[2] != 0:
                chunk_samples = chunk_samples[:, :, :, border3:, :]
                coords = [coords[0], coords[1], coords[2] + border3]
                trimmed_shape = [
                    trimmed_shape[0], trimmed_shape[1],
                    trimmed_shape[2] - border3, 1
                ]
            elif coords[2] != test_shape[2] - input_shape[2]:
                chunk_samples = chunk_samples[:, :, :, :-border3, :]
                trimmed_shape = [
                    trimmed_shape[0], trimmed_shape[1],
                    trimmed_shape[2] - border3, 1
                ]

            # Increments each voxel in the counts array.
            counts = add_chunk_to_arr(counts, np.ones(trimmed_shape), coords,
                                      trimmed_shape)

            # Updates the sigmoid volume with the voxel means.
            chunk_mean = np.mean(chunk_samples, axis=0)
            sigmoid = add_chunk_to_arr(sigmoid, chunk_mean, coords,
                                       trimmed_shape)

            # Updates the percentile volumes.
            percentile_samples = np.percentile(chunk_samples,
                                               percentile_points,
                                               axis=0)
            percentiles = [
                add_chunk_to_arr(p, s, coords, trimmed_shape)
                for p, s in zip(percentiles, percentile_samples)
            ]

        # Divides each voxel by the number of times it was predicted.
        sigmoid = sigmoid / counts

        # Note that division automatically broadcasts across axis 0.
        percentiles = percentiles / counts

    else:
        # Predicts on entire slices.
        print()
        samples = np.zeros((mc_samples, ) + test_shape)

        # Performs Monte Carlo sampling.
        for i in range(mc_samples):
            samples[i] = model.predict(test, batch_size=batch_size)

        sigmoid = np.mean(samples, axis=0)
        percentiles = np.percentile(samples, percentile_points, axis=0)

    # Calculates prediction and uncertainty.
    pred = sigmoid.copy()
    pred[pred > 0.5] = 1.
    pred[pred <= 0.5] = 0.

    twenty = percentiles[num_percentiles // 5]
    eighty = percentiles[num_percentiles - ((num_percentiles // 5) + 1)]
    unc = eighty - twenty

    # If data was chunked, turn it back into the original size.
    if vnet and test_coords is not None and test_shape is not None:
        test = reconstruct(test, test_coords, test_shape)
        test_targets = reconstruct(test_targets, test_coords, test_shape)

    # Saves predictions.
    save_predictions(sigmoid, pred, percentiles, unc, test, test_targets)
Beispiel #5
0
 def calc_padding(self, distance):
     """Calculates an offset from a slice-point.
     Return the offset as number of tiles."""
     percent = random.randint(0, MAX_SLICE_OFFSET)/100
     padding = int(distance*percent)
     return round_down(padding, 1)
Beispiel #6
0
query = SimpleStatement(
    'SELECT attribute,value FROM resource_attributes WHERE context = %s AND resource = %s'
)
print("Resources on %s:%s :" % (foreign_source, foreign_id))
for resource in resources:
    resource.attributes = []
    for row in session.execute(query, (CONTEXT, resource.key)):
        resource.attributes.append((row.attribute, row.value))
    print("%s: %s" % (resource.key, resource.attributes))

# Convert the times to timestamps expressed in seconds
start_ts = time.mktime(start.timetuple())
end_ts = time.mktime(end.timetuple())

# Compute the partition keys
first_partition = int(round_down(start_ts, SHARD))
last_partition = int(round_down(end_ts, SHARD)) + SHARD

partitions = []
for partition in range(first_partition, last_partition, SHARD):
    print("Partition: %d includes data from: %s, to %s" %
          (partition, df(partition), df(partition + SHARD)))
    partitions.append(partition)

# Gather the samples for every resource in every partition
query = SimpleStatement(
    'SELECT * from samples WHERE context = %s AND partition = %s and resource = %s'
)
for resource in resources:
    # Execute the query for each partition
    for partition in partitions:
Beispiel #7
0
    tempX = build_dataset('./waveforms/', indexes, featureFile,
                          args.segment_size)
    X = np.append(X, tempX)

    print("Length of X so far: {}".format(X.shape))

# Restructure the dataset
X = FloatTensor(X)
X = X.view(-1, segment_size)

# Normalize the data in amplitude
X = (X - X.mean(dim=-1).unsqueeze(1)) / X.std(dim=-1).unsqueeze(1)

# Split training and validation sets from the same dataste
splitSize = round_down(int(len(X) * 0.8), segment_size)
print("Shape of X: {}".format(X.shape))
X_train = X[:splitSize]
X_test = X[splitSize:]
print("X_train length: {}".format(len(X_train)))
print("X_test length: {}".format(len(X_test)))

# number of epochs and training set batch size
num_epochs = args.num_epochs
batch_size = args.batch_size

########## Define the model ##########
model = convautoencoder()
if has_cuda:
    model = model.cuda()
model.train()