def missing_message_data(path, channels=CHANNELS):
    for record in list_records(path):
        glog.info("reading records %s" % record)
        reader = RecordReader(record)
        for channel in channels:
            glog.info("has %d messages" % reader.get_messagenumber(channel))
            if reader.get_messagenumber(channel) == 0:
                return True
    return False
def list_records(path):
    glog.info("in list_records:%s" % path)
    records = []
    for (dirpath, _, filenames) in os.walk(path):
        glog.info('filenames: %s' % filenames)
        glog.info('dirpath %s' % dirpath)
        for filename in filenames:
            end_file = os.path.join(dirpath, filename)
            glog.info("end_files: %s" % end_file)
            if record_utils.is_record_file(end_file):
                records.append(end_file)
    return records
def sanity_check(input_folder, email_receivers=None):
    err_msg = None
    if missing_file(input_folder):
        err_msg = "One or more files are missing in %s" % input_folder
    elif parse_error(input_folder):
        err_msg = "Confige file cannot be parsed in %s" % input_folder
    elif missing_field(input_folder):
        err_msg = "One or more field is missing in Confige file %s" % input_folder
    elif missing_message_data(input_folder):
        err_msg = "Messages are missing in records of %s" % input_folder
    else:
        glog.info("%s Passed sanity check." % input_folder)
        return True

    if email_receivers:
        title = 'Error occurred during data sanity check'
        email_utils.send_email_error(title, {'Error': err_msg}, email_receivers)

    glog.error(err_msg)
    return False
def missing_file(path):
    vehicles = multi_vehicle_utils.get_vehicle(path)
    glog.info("vehicles %s" % vehicles)
    for vehicle in vehicles:
        # config file
        conf = os.path.join(path, vehicle, ConfFile)
        glog.info("vehicles conf %s" % conf)
        if os.path.exists(conf) is False:
            glog.error('Missing configuration file in %s' % vehicle)
            return True
        # record file
        glog.info("list of records:" % list_records(os.path.join(path, vehicle)))
        if len(list_records(os.path.join(path, vehicle))) == 0:
            glog.error('No record files in %s' % vehicle)
            return True
    return False
def missing_field(path):
    vehicles = multi_vehicle_utils.get_vehicle(path)
    glog.info("vehicles in missing field: %s" % vehicles)
    for vehicle in vehicles:
        conf_file = os.path.join(path, vehicle, ConfFile)
        glog.info("conf_file: %s" % conf_file)
        # reset for each vehicle to avoid overwrite
        pb_value = vehicle_config_pb2.VehicleConfig()
        conf = proto_utils.get_pb_from_text_file(conf_file, pb_value)
        glog.info("vehicles conf %s" % conf)
        if not check_vehicle_id(conf):
            return True
        # required field
        fields = [conf.vehicle_param.brake_deadzone,
                  conf.vehicle_param.throttle_deadzone,
                  conf.vehicle_param.max_acceleration,
                  conf.vehicle_param.max_deceleration]
        for field in fields:
            if math.isnan(field):
                return True
    return False
Пример #6
0
if __name__ == '__main__':
    config = read_config()

    ham = HAM().to('cuda')
    flow_dataset = FlowDataset()
    flow_dataloader = DataLoader(
        flow_dataset,
        batch_size=eval(config['train']['BatchSize']),
        shuffle=eval(config['train']['Shuffle'])
    )

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(ham.parameters(), lr=1e-3, weight_decay=1e-5)

    for epoch in range(eval(config['train']['NumEpochs'])):
        log.info('Running Epoch #{}'.format(epoch + 1))
        total_loss = 0
        with tqdm(total = len(flow_dataset) // eval(config['train']['BatchSize'])) as pbar:
            for batch_id, sample in enumerate(flow_dataloader):
                pbar.update(1)
                optimizer.zero_grad()
                X, y = sample
                prediction = ham(X.type(torch.long))

                loss = criterion(prediction, y.to('cuda'))
                total_loss += loss.item()

                loss.backward()
                optimizer.step()

        log.info('Epoch #{} ==> {:.4f}'.format(epoch+1, total_loss))
Пример #7
0
    #flow_embeddings = np.zeros((10000, 256))

    model = HierarchicalAttentionModel().to('cuda')
    model.load_state_dict(
        torch.load(
            wrap_path(config['ModelSavePath']).format(epoch=config['Checkpoint'])
        )
    )

    criterion = nn.CrossEntropyLoss()

    X, y = fetch_dataset()
    test_dataset = TensorDataset(torch.Tensor(X), torch.Tensor(y))
    test_dataloader = DataLoader(test_dataset, batch_size=128)

    log.info('Running inference...')
    with torch.no_grad():
        num_iteration = len(test_dataloader) // 128
        with tqdm(total=num_iteration) as pbar:
            for batch_id, (test_X, test_y) in enumerate(test_dataloader):
                preds, flow_embedding = model(test_X.type(torch.long).to('cuda'))
                loss = criterion(preds, test_y.type(torch.long).to('cuda'))

                hit += (preds.argmax(dim=1) == test_y.type(torch.long).to('cuda')).sum().item()
                all_preds += [i.item() for i in preds.argmax(dim=1)]
                #flow_embeddings[batch_id*128:batch_id*128+flow_embedding.shape[0],] = flow_embedding.data.cpu().numpy()

                pbar.update(1)

    accuracy = 100 * (hit / len(test_dataset))
    log.info('Accuracy : {:.2f}'.format(accuracy))
Пример #8
0
    return parser.parse_args()


def load_model():
    ham = HAM().to('cuda')
    ham.load_state_dict(torch.load('ham.pt'))

    return ham.eval()


if __name__ == '__main__':
    args = read_args()
    ham = load_model()

    with open('label_map.pkl', 'rb') as f:
        label_map = pickle.load(f)

    data = torch.Tensor(np.load(args.flow)).type(
        torch.long).unsqueeze(0).to('cuda')
    prediction = ham(data)

    scores = F.softmax(prediction, dim=1).squeeze()

    item_id = prediction.argmax().item()

    predicted_item_name = label_map[item_id]
    confidence = scores[item_id]

    log.info('Predicted => {} (Confidence {:.2f}%)'.format(
        predicted_item_name, confidence * 100))
Пример #9
0
if __name__ == '__main__':
    config = read_config('train')
    history = []

    model = HierarchicalAttentionModel().to('cuda')
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=eval(config['LearningRate']))

    X, y = fetch_dataset()
    num_split = eval(config['KFoldSplit'])
    kfold = StratifiedKFold(n_splits=num_split)

    for epoch in range(eval(config['NumEpoch'])):
        log.info(f'Running Epoch #{epoch+1}')
        total_train_loss, total_valid_loss = 0, 0

        train_dataset = TensorDataset(torch.Tensor(X), torch.Tensor(y))
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=eval(config['BatchSize']))

        train_loss = 0
        num_train_iteration = len(train_dataset) // eval(config['BatchSize'])

        with tqdm(total=num_train_iteration) as pbar:
            for batch_id, (train_X, train_y) in enumerate(train_dataloader):
                pbar.update(1)
                optimizer.zero_grad()

                preds = model(train_X.type(torch.long).to('cuda'))