コード例 #1
0
 def __init__(self):
     self.r = run.Run()
     return
コード例 #2
0
import net
import traci
import rou
import environment
import sumocfg as sc
import run

#创建nod.xml,道路长度为1005.
net.Simple.nod(1005)
#创建edg.xml,道路限速为100.
net.Simple.edg(100)
#将nod.xml,edg.xml合成为net.xml
net.Simple.netconvert(r'D:\APPs\SUMO\bin')
#生成sumocfg文件,仿真步长为0.1.
sc.cfg(0.1)
#检查SUMO_HOME环境
sumoCmd = environment.main()
#开始仿真
traci.start(sumoCmd)
#实例化车辆对象,车长为4m。
veh0 = rou.HeadwayDistribution(5)
#车辆服从均匀分布,数量为10,初始间距为100m,初始速度为20m/s。
veh0.uniform(20, 20, 10)
#实例化仿真对象,仿真时长1000s,周期性边界,输出DataFrame
run0 = run.Run(1000, 0.1, 1000, 0, output_df=True)
#IDM模型参数alpha=3,beta=2,v0=50,s0=5,T=3
run0.FVD(3, 2, 20, 15)
#结束仿真并关闭SUMO.GUI
traci.close()
コード例 #3
0
def train_loop(
    data_obj: data.Data,
    test_datafile_name: str,
    model_class=cnn_feature_model.CNNModel,
    kfold: bool = False,
    fold: Union[int, None] = None,
    desc: bool = True,
):
    # TODO: Implement K-fold cross-validation
    if kfold and (fold is None):
        raise Exception(
            f"K-fold cross validation is passed but fold index in range [0, {config.folds}) is not specified."
        )
    if (not kfold) and (fold is not None):
        LOGGER.info(
            f"K-fold is set to {kfold} but fold index is passed!"
            " Proceeding without using K-fold."
        )
        fold = None

    # test data file path
    test_data_file = os.path.join(config.data_dir, test_datafile_name)

    LOGGER.info("-" * 60)
    if config.balance_classes:
        LOGGER.info("Training with balanced classes.")
    else:
        LOGGER.info("Training using unbalanced (original) classes.")

    LOGGER.info(f"Test data will be saved to: {test_data_file}")
    LOGGER.info("-" * 30)
    LOGGER.info(f"       Training fold: {fold}       ")
    LOGGER.info("-" * 30)

    # turn off model details for subsequent folds/epochs
    if fold is not None:
        if fold >= 1:
            desc = False

    # create train, valid and test data
    train_data, valid_data, test_data = data_obj.get_data(
        shuffle_sample_indices=True, fold=fold
    )

    # dump test data into to a file
    with open(test_data_file, "wb") as f:
        pickle.dump(
            {
                "signals": test_data[0],
                "labels": test_data[1],
                "sample_indices": test_data[2],
                "window_start": test_data[3],
            },
            f,
        )

    # create image transforms
    if type(model_class).__name__ in ["FeatureModel", "CNNModel"]:
        transforms = None
    else:
        transforms = data.get_transforms()

    # create datasets
    train_dataset = data.ELMDataset(
        *train_data,
        config.signal_window_size,
        config.label_look_ahead,
        stack_elm_events=config.stack_elm_events,
        transform=transforms,
    )

    valid_dataset = data.ELMDataset(
        *valid_data,
        config.signal_window_size,
        config.label_look_ahead,
        stack_elm_events=config.stack_elm_events,
        transform=transforms,
    )

    # training and validation dataloaders
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers,
        pin_memory=True,
        drop_last=True,
    )

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        pin_memory=True,
        drop_last=True,
    )

    # model
    model = model_class()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model_name = type(model).__name__
    LOGGER.info("-" * 50)
    LOGGER.info(f"       Training with model: {model_name}       ")
    LOGGER.info("-" * 50)

    # display model details
    if desc:
        if config.stack_elm_events and model_name == "StackedELMModel":
            input_size = (config.batch_size, 1, config.size, config.size)
        else:
            input_size = (
                config.batch_size,
                1,
                config.signal_window_size,
                8,
                8,
            )
        x = torch.rand(*input_size)
        x = x.to(device)
        cnn_feature_model.model_details(model, x, input_size)

    # optimizer
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=config.learning_rate,
        weight_decay=config.weight_decay,
        amsgrad=False,
    )

    # get the lr scheduler
    scheduler = get_lr_scheduler(
        optimizer, scheduler_name=config.scheduler, dataloader=train_loader
    )

    # loss function
    criterion = nn.BCEWithLogitsLoss(reduction="none")

    # define variables for ROC and loss
    best_score = 0
    best_loss = np.inf

    # instantiate training object
    engine = run.Run(
        model,
        device=device,
        criterion=criterion,
        optimizer=optimizer,
        use_focal_loss=True,
    )

    # iterate through all the epochs
    for epoch in range(config.epochs):
        start_time = time.time()

        if config.scheduler in [
            "CosineAnnealingLR",
            "CyclicLR",
            "CyclicLR2",
            "OneCycleLR",
        ]:
            # train
            avg_loss = engine.train(
                train_loader, epoch, scheduler=scheduler, print_every=5000
            )

            # evaluate
            avg_val_loss, preds, valid_labels = engine.evaluate(
                valid_loader, print_every=2000
            )
            scheduler = get_lr_scheduler(
                optimizer,
                scheduler_name=config.scheduler,
                dataloader=train_loader,
            )
        else:
            # train
            avg_loss = engine.train(train_loader, epoch, print_every=5000)

            # evaluate
            avg_val_loss, preds, valid_labels = engine.evaluate(
                valid_loader, print_every=2000
            )

            # step the scheduler
            if config.scheduler == "ReduceLROnPlateau":
                scheduler.step(avg_val_loss)
            else:
                scheduler.step()

        # scoring
        roc_score = roc_auc_score(valid_labels, preds)
        elapsed = time.time() - start_time

        LOGGER.info(
            f"Epoch: {epoch + 1}, \tavg train loss: {avg_loss:.4f}, \tavg validation loss: {avg_val_loss:.4f}"
        )
        LOGGER.info(
            f"Epoch: {epoch +1}, \tROC-AUC score: {roc_score:.4f}, \ttime elapsed: {elapsed}"
        )

        # save the model if best ROC is found
        model_save_path = os.path.join(
            config.model_dir,
            f"{model_name}_fold{fold}_best_roc_{config.data_mode}.pth",
        )
        if roc_score > best_score:
            best_score = roc_score
            LOGGER.info(
                f"Epoch: {epoch+1}, \tSave Best Score: {best_score:.4f} Model"
            )
            torch.save(
                {"model": model.state_dict(), "preds": preds},
                model_save_path,
            )

        if avg_val_loss < best_loss:
            best_loss = avg_val_loss
            LOGGER.info(
                f"Epoch: {epoch+1}, \tSave Best Loss: {best_loss:.4f} Model"
            )
        LOGGER.info(f"Model saved to: {model_save_path}")
コード例 #4
0
ファイル: main.py プロジェクト: Anton-Sl/QC
cc = TestInstance(testId=214, cycleId=0)
print(client.CreateEntity("test-instances", cc))

cc = Test("Basic", "1-Low", "Reviewed", "1175", "Test#!1", "MANUAL")
print(client.CreateEntity("tests", cc))

cc = TestConfigs("Test1", "214")
print(client.CreateEntity("test-config", cc))

cc = TestSetFolders("ExampleFolder1")
print(client.CreateEntity("test-set-folder", cc))

cc = ReleaseCycle("2018-07-29", "ExampleCycle1", 1001, "2018-07-25")
print(client.CreateEntity("release-cycle", cc))

cc = Runs("Test1", 214, 1001, "")
print(client.CreateEntity("run", cc))

cc = TestInstance(testId=214, cycleId=0)
print(client.CreateTestInstance(cc))

cc = TestSet("FirstSet1")
print(client.CreateTestSet(cc))

v = run.Run()
client.CreateTestRun(run=v, status="1-Low", instanceDataToUpdate=a)

print(client.GetTestObjByTcId(214))

print(client.GetFields(entityType="test-set-folder"))
コード例 #5
0
ファイル: ioreg.py プロジェクト: qushe-me/VGTabMerge
 def __init__(self):
     self.ioreg = {}
     self.r = run.Run()
コード例 #6
0
 def checkForNewXTCs(self):
     runs, xtcs = self._getRunsWithXTCs()
     for r, x in zip(runs, xtcs):
         if r not in self.R:
             self.R[r] = run.Run(r, self.C, x)
コード例 #7
0
def train_loop(df: pd.DataFrame, fold: int, desc: bool = False):
    LOGGER.info("-" * 30)
    LOGGER.info(f"       Training fold: {fold}       ")
    LOGGER.info("-" * 30)

    # turn off model details for subsequent folds
    if fold >= 1:
        desc = False

    # divide the data into training and validation dataframes based on folds
    train_idx = df[df["fold"] != fold].index
    valid_idx = df[df["fold"] == fold].index

    train_folds = df.loc[train_idx].reset_index(drop=True)
    valid_folds = df.loc[valid_idx].reset_index(drop=True)

    valid_labels = valid_folds["target"].values

    # get the image augmentations
    train_transforms = dataset.get_train_transforms()
    valid_transforms = dataset.get_valid_transforms()

    # create training and validation datasets
    train_data = dataset.SETIDataset(train_folds, transform=train_transforms)
    valid_data = dataset.SETIDataset(valid_folds, transform=valid_transforms)

    # training and validation dataloaders
    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=config.BATCH_SIZE,
        shuffle=True,
        num_workers=config.NUM_WORKERS,
        pin_memory=True,
        drop_last=True,
    )
    valid_loader = torch.utils.data.DataLoader(
        valid_data,
        batch_size=config.BATCH_SIZE,
        shuffle=False,
        num_workers=config.NUM_WORKERS,
        pin_memory=True,
        drop_last=False,
    )

    # model
    model = seti_model.SETIModel(model_name=config.MODEL_NAME, pretrained=True)
    # model = test_model.CNNModel()
    LOGGER.info(f"Training with model: {config.MODEL_NAME}")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # print model details
    if desc:
        x = torch.rand(config.BATCH_SIZE, 1, config.SIZE, config.SIZE)
        x = x.to(device)
        seti_model.model_details(model, x)

    # optimizer
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=config.LEARNING_RATE,
        weight_decay=config.WEIGHT_DECAY,
        amsgrad=False,
    )

    # learning rate scheduler
    scheduler = get_lr_scheduler(optimizer, scheduler_name=config.SCHEDULER)

    # define the loss function
    criterion = nn.BCEWithLogitsLoss()

    # define variables for ROC and loss
    best_score = 0
    best_loss = np.inf

    # instantiate training object
    engine = run.Run(
        model,
        device=device,
        criterion=criterion,
        optimizer=optimizer,
        use_mixup=config.USE_MIXUP,
    )

    # iterate through all the epochs
    for epoch in range(config.EPOCHS):
        start_time = time.time()

        if config.SCHEDULER in ["CosineAnnealingLR", "CyclicLR", "CyclicLR2"]:
            # train
            avg_loss = engine.train(train_loader,
                                    epoch,
                                    scheduler=scheduler,
                                    print_every=100)

            # evaluate
            avg_val_loss, preds = engine.evaluate(valid_loader, print_every=50)
            scheduler = get_lr_scheduler(optimizer,
                                         scheduler_name=config.SCHEDULER)
        else:
            # train
            avg_loss = engine.train(train_loader, epoch, print_every=100)

            # evaluate
            avg_val_loss, preds = engine.evaluate(valid_loader, print_every=50)

            # step the scheduler
            if config.SCHEDULER == "ReduceLROnPlateau":
                scheduler.step(avg_val_loss)
            else:
                scheduler.step()

        # scoring
        roc_score = metrics.roc_auc_score(valid_labels, preds)
        elapsed = time.time() - start_time

        LOGGER.info(
            f"Epoch: {epoch + 1}, \tavg train loss: {avg_loss:.4f}, \tavg validation loss: {avg_val_loss:.4f}"
        )
        LOGGER.info(
            f"Epoch: {epoch +1}, \tROC-AUC score: {roc_score:.4f}, \ttime elapsed: {elapsed}"
        )

        if roc_score > best_score:
            best_score = roc_score
            LOGGER.info(
                f"Epoch: {epoch+1}, \tSave Best Score: {best_score:.4f} Model")
            torch.save(
                {
                    "model": model.state_dict(),
                    "preds": preds
                },
                os.path.join(
                    config.MODEL_DIR,
                    f"{config.MODEL_NAME}_fold{fold}_best_roc.pth",
                ),
            )

        if avg_val_loss < best_loss:
            best_loss = avg_val_loss
            LOGGER.info(
                f"Epoch: {epoch+1}, \tSave Best Loss: {best_loss:.4f} Model")
            # torch.save(
            #     {"model": model.state_dict(), "preds": preds},
            #     os.path.join(
            #         config.MODEL_DIR,
            #         f"{config.MODEL_NAME}_fold{fold}_best_loss.pth",
            #     ),
            # )
    # save the predictions in the valid dataframe
    valid_folds["preds"] = torch.load(
        os.path.join(config.MODEL_DIR,
                     f"{config.MODEL_NAME}_fold{fold}_best_roc.pth"),
        map_location=torch.device("cpu"),
    )["preds"]

    return valid_folds