Exemple #1
0
    def __init__(self):
        input_reader = InputReader()
        self.state = input_reader.read(0)
        self.output_writer = OutputWriter('naive')
        self.num_rows = self.state['numRows']
        self.num_cols = self.state['numCols']
        self.num_drones = self.state['numDrones']
        self.num_turns = self.state['numTurns']
        self.max_payload = self.state['maxPayload']
        self.num_product_types = self.state['numProductTypes']
        self.product_weights = self.state['productWeights']
        self.num_warehouses = self.state['numWarehouses']
        # warehouse: liste auf dicts: x, y, items -> itemstock
        self.warehouse_data = self.state['warehousesData']
        # order data: liste auf dicts: x, y, numorders, items
        self.order_data = self.state['orderData']

        # get list of items
        self.item_lists = numpy.zeros(
            (len(self.order_data), len(self.product_weights)), dtype=int)

        self.turns_used = numpy.zeros(self.num_drones)
        self.drone_location = numpy.zeros((self.num_drones, 2))

        for order_idx in numpy.arange(len(self.order_data)):
            for item in self.order_data[order_idx]['items']:
                self.item_lists[order_idx][item] += 1

        for n in numpy.arange(self.num_drones):
            self.drone_location[n] = [0, 0]
Exemple #2
0
 def initialize(self, conf, context):
     try:        
         self.counter = 0
         self.modname = conf["dispel4py.module"]
         self.scriptname = conf["dispel4py.script"]
         
         scriptconfig = pickle.loads(str(conf['dispel4py.config'])) if 'dispel4py.config' in conf else {}
         
         storm.log("Dispel4Py ------> loading script %s" % self.scriptname)
         mod = import_module(self.modname)
         self.script = getattr(mod, self.scriptname)()
         for key, value in scriptconfig.iteritems():
             storm.log("Dispel4Py ------> %s: setting attribute %s" % (self.scriptname, key))
             setattr(self.script, key, value)
         storm.log("Dispel4Py ------> loaded script %s" % self.scriptname)
             
         # attach an output writer to each output connection
         for outputname, output in self.script.outputconnections.iteritems():
             output['writer'] = OutputWriter(self.scriptname, outputname)
             
         # pre-processing if required
         self.script.preprocess()
         storm.log("Dispel4Py ------> %s: preprocess() completed." % (self.scriptname,))
     except:
         storm.log("Dispel4Py ------> %s: %s" % (self.scriptname, traceback.format_exc(),))
         raise
Exemple #3
0
def execute_orders(orders_path, output_path):
    output_writer = OutputWriter(output_path)
    reader = FilerReader(orders_path)
    while reader.has_next_line():
        args = reader.next_line().split(",")
        order = None
        if len(args) == 3:  # Receive order
            order = ReceiveOrder(args[0], args[1], args[2], output_writer)

        elif len(args) == 2:  # Send order
            order = SendOrder(args[0], args[1], output_writer)

        if order:
            order.execute()

        else:
            print("ERROR")
NUM_WORKERS = 1

total_images_count = 0

# COMMAND ----------

batch_classifier = BatchClassifier(
    can_use_cuda=CAN_USE_CUDA,
    classes_file_location=IMAGENET_CLASSES_LOCATION,
    batch_size=BATCH_SIZE,
    max_labels=MAX_LABELS,
    num_workers=NUM_WORKERS)

# COMMAND ----------

output_writer = OutputWriter(OUTPUT_CLASSIFIED_PARQUET)

# COMMAND ----------


def process_dataframe(dataframe):
    global total_images_count
    start_time_batch_processing = time.time()
    batch_size = dataframe.size
    print("Classifying a Batch of size: %d" % batch_size)
    matched_labels = batch_classifier.classify_images(dataframe)
    size_matched_labels = len(matched_labels)
    print("Classified a batch of %d valid elements from %d possible elements" %
          (size_matched_labels, batch_size))
    output_writer.write_to_parquet(matched_labels)
    duration_batch_processing = time.time() - start_time_batch_processing
Exemple #5
0
from error_handler import ErrorHandler
from input_reader import InputReader
from output_writer import OutputWriter
from trace_manager import TraceManager
from trace_parser import TraceParser
from threading import Thread

error_handler = ErrorHandler()
writer = OutputWriter(error_handler)
trace_manager = TraceManager(writer)
parser = TraceParser(error_handler, trace_manager)
reader = InputReader(parser)

print("Start parsing")
clean_thread = Thread(target=trace_manager.clean_traces)
clean_thread.start()

reader.read()
Exemple #6
0
               "min_samples_split=0.1, n_jobs=1)"

    crf2_str = "RandomForestClassifier(n_estimators=n_trees, max_features=1, random_state=2, " \
               "min_samples_split=0.1, n_jobs=1)"

    prf1 = eval(prf1_str)
    prf2 = eval(prf2_str)
    crf1 = eval(crf1_str)
    crf2 = eval(crf2_str)

    return [prf1, prf2, crf1, crf2], [prf1_str, prf2_str, crf1_str, crf2_str]


for file_name in file_list:
    print(file_name)
    output_writer = OutputWriter('vlpso_result' + '_' + str(n_trees) +
                                 '_trees/' + file_name)

    # ---------------------- Prepare Data ---------------------- #

    D_train = np.loadtxt(data_folder + 'train1/' + file_name + '_train1.dat',
                         delimiter=',')
    D_val = np.loadtxt(data_folder + 'val/' + file_name + '_val.dat',
                       delimiter=',')
    D_test = np.loadtxt(data_folder + 'test/' + file_name + '_test.dat',
                        delimiter=',')

    X_train = D_train[:, :-1]
    Y_train = D_train[:, -1].astype(np.int32)
    X_val = D_val[:, :-1]
    Y_val = D_val[:, -1].astype(np.int32)
    X_test = D_test[:, :-1]
# ------------------------ Parameters ---------------------- #


def init_classifier():
    rf_str = "RandomForestClassifier(n_estimators=n_trees, max_features='sqrt', random_state=1, " \
             "min_samples_split=0.1, n_jobs=1)"

    rf = eval(rf_str)

    return rf, rf_str


for i_file in range(from_id, to_id):
    file_name = file_list[i_file]
    print(datetime.datetime.now(), ' File {}: '.format(i_file), file_name)
    output_writer = OutputWriter('result/{}/'.format(NAME) + file_name)

    # ---------------------- Prepare Data ---------------------- #
    D_train = np.loadtxt(data_folder + 'train1/' + file_name + '_train1.dat',
                         delimiter=',')
    D_val = np.loadtxt(data_folder + 'val/' + file_name + '_val.dat',
                       delimiter=',')
    D_test = np.loadtxt(data_folder + 'test/' + file_name + '_test.dat',
                        delimiter=',')

    X_train = D_train[:, :-1]
    Y_train = D_train[:, -1].astype(np.int32)
    X_val = D_val[:, :-1]
    Y_val = D_val[:, -1].astype(np.int32)
    X_test = D_test[:, :-1]
    Y_test = D_test[:, -1].astype(np.int32)
import torch
import pandas as pd
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from output_writer import OutputWriter

ow = OutputWriter(5)
ow.write_to_csv(verbose=True)

# Create Tensors to hold dependent/independent variable data
train_csv = ow.get_cached_csv("train")
train_ind = pd.read_csv(train_csv)[["f1", "f2", "f3", "f4", "f5"]]
train_dep = pd.read_csv(train_csv)[["phone_class_index"]]

x = torch.from_numpy(train_ind.values).float()
y = torch.from_numpy(train_dep.values).long()
print(x)
print(y)

# Create a TensorDataset and DataLoader to provide the model with batches of data
train_ds = TensorDataset(x, y)
train_dl = DataLoader(train_ds, batch_size=32)

##### Set model layer dimensions
### D_in is the input dimension (5, one for each estimated formant)
D_in = x.shape[1]
### H is the hidden layer dimension
H = 16
### C is the number of final categories (there are 14 monophthongs)
C = 14