コード例 #1
0
 def backup_devices(self, testbed):
     for device in testbed:
         try:
             create_backup(device)
             backup_path = get_backup_path(device)
             device.backup_path = backup_path
         except Exception as e:
             self.errored(
                 reason=f"Failed Backup {device.name}",
                 goto=['exit'],
                 from_exception=e,)
コード例 #2
0
def add_root(root, input_json, input_folder):
    """Append the root for json

    This method modifies the json and (optinally) folder, such that:
        - `root` will be prepend to filepath keys in `input_json`
        - the `input_json` file will be renamed into `root`.json
        - `input_folder` will be renamed into `root`

    # Arguments
        root [str]: the rootname
        input_json [str]: path to input json
        input_folder [str]: path to input folder

    # Returns
        [str]: target output folder
        [str]: target output json
    """
    input_folder = (input_folder[:-1]
                    if input_folder[-1] == '/' else input_folder)
    _ = utils.create_backup(input_folder, input_json,
                            'dataversioning_backup_add_root')

    with open(input_json, 'r') as f_in:
        features = json.load(f_in)
        features_new = {}

    print('Adding the root to json keys...')
    for each_file in features.keys():
        filename_new = os.path.join(root, each_file)
        features_new[filename_new] = features[each_file]

    target_folder = os.path.join(os.path.dirname(input_folder), root)
    subprocess.run(['mv', input_folder, target_folder])
    print('Moved the folder into {}'.format(target_folder))

    target_json = os.path.join(os.path.dirname(input_json),
                               '{}.json'.format(root))

    with open(target_json, 'w') as f_out:
        json.dump(features_new,
                  f_out,
                  ensure_ascii=False,
                  indent=4,
                  separators=(',', ': '),
                  sort_keys=True)
    _ = subprocess.run(['rm', input_json])
    print('Dumped the new json file into {}'.format(target_json))

    return target_folder, target_json
コード例 #3
0
def _main_(args):

    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    if config['backup']['create_backup']:
        config = create_backup(config)

    keras.backend.tensorflow_backend.set_session(get_session())

    #path for the training and validation dataset
    datasetTrainPath = os.path.join(args.folder, "train")
    datasetValPath = os.path.join(args.folder, "val")

    for folder in [datasetTrainPath, datasetValPath]:
        if not os.path.isdir(folder):
            raise Exception("{} doesn't exist!".format(folder))

    classesTrain = next(os.walk(datasetTrainPath))[1]
    classesVal = next(os.walk(datasetValPath))[1]

    if not classesVal == classesTrain:
        raise Exception(
            "The training and validation classes must be the same!")
    else:
        folders = classesTrain

    #training configuration
    epochs = config['train']['nb_epochs']
    batchSize = config['train']['batch_size']
    width = config['model']['input_size_w']
    height = config['model']['input_size_h']
    depth = 3 if config['model']['gray_mode'] == False else 1

    #config keras generators
    if len(
            folders
    ) == 2:  #if just have 2 classes, the model will have a binary output
        classes = 1
    else:
        classes = len(folders)

    #count all samples
    imagesTrainPaths = []
    imagesValPaths = []
    for folder in folders:
        imagesTrainPaths += list(
            list_images(os.path.join(datasetTrainPath, folder)))
        imagesValPaths += list(
            list_images(os.path.join(datasetValPath, folder)))

    generator_config = {
        'IMAGE_H': height,
        'IMAGE_W': width,
        'IMAGE_C': depth,
        'BATCH_SIZE': batchSize
    }

    #callbacks
    model_name = config['train']['saved_weights_name']
    checkPointSaverBest = ModelCheckpoint(model_name,
                                          monitor='val_acc',
                                          verbose=1,
                                          save_best_only=True,
                                          save_weights_only=False,
                                          mode='auto',
                                          period=1)
    ckp_model_name = os.path.splitext(model_name)[1] + "_ckp.h5"
    checkPointSaver = ModelCheckpoint(ckp_model_name,
                                      verbose=1,
                                      save_best_only=False,
                                      save_weights_only=False,
                                      period=10)

    tb = TensorBoard(log_dir=config['train']['tensorboard_log_dir'],
                     histogram_freq=0,
                     batch_size=batchSize,
                     write_graph=True,
                     write_grads=False,
                     write_images=False,
                     embeddings_freq=0,
                     embeddings_layer_names=None,
                     embeddings_metadata=None)

    #create the classification model
    # make the feature extractor layers
    if depth == 1:
        input_size = (height, width, 1)
        input_image = Input(shape=input_size)
    else:
        input_size = (height, width, 3)
        input_image = Input(shape=input_size)

    feature_extractor = import_feature_extractor(config['model']['backend'],
                                                 input_size)

    train_generator = BatchGenerator(imagesTrainPaths,
                                     generator_config,
                                     norm=feature_extractor.normalize,
                                     jitter=True)

    val_generator = BatchGenerator(imagesValPaths,
                                   generator_config,
                                   norm=feature_extractor.normalize,
                                   jitter=False)

    features = feature_extractor.extract(input_image)

    # make the model head
    output = Conv2D(classes, (1, 1), padding="same")(features)
    output = BatchNormalization()(output)
    output = LeakyReLU(alpha=0.1)(output)
    output = GlobalAveragePooling2D()(output)
    output = Activation("sigmoid")(output) if classes == 1 else Activation(
        "softmax")(output)

    if config['train']['pretrained_weights'] != "":
        model = load_model(config['model']['pretrained_weights'])
    else:
        model = Model(input_image, output)
        opt = Adam()
        model.compile(loss="binary_crossentropy"
                      if classes == 1 else "categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])
    model.summary()

    model.fit_generator(train_generator,
                        steps_per_epoch=len(imagesTrainPaths) // batchSize,
                        epochs=epochs,
                        validation_data=val_generator,
                        validation_steps=len(imagesValPaths) // batchSize,
                        callbacks=[checkPointSaverBest, checkPointSaver, tb],
                        workers=12,
                        max_queue_size=40)
コード例 #4
0
def restore_to_file_with_backing_file_test(backing_dev, backup_target):
    length0 = 4 * 1024
    length1 = 256
    length2 = 128
    offset0 = 0
    offset1 = length1 + offset0
    offset2 = length2 + offset0

    output_raw_path = file(OUTPUT_FILE_RAW)
    output_qcow2_path = file(OUTPUT_FILE_QCOW2)

    # create 1 empty snapshot.
    # data in output image == data in backing
    check_backing()
    check_empty_volume(backing_dev)
    snap0 = cmd.snapshot_create()
    backup = create_backup(backup_target, snap0)

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(offset0, length0)
    dev_checksum = common.checksum_dev(backing_dev)
    assert volume_data != ""
    assert volume_data == backing_data

    cmd.restore_to_file(backup, file(BACKING_FILE_QCOW2),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output0_raw = read_file(output_raw_path, offset0, length0)
    output0_checksum = checksum_data(
        read_file(output_raw_path, 0, SIZE))
    assert output0_raw == backing_data
    assert output0_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(backup, file(BACKING_FILE_QCOW2),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output0_qcow2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length0)
    output0_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output0_qcow2 == backing_data
    assert output0_qcow2 == volume_data
    assert output0_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    rm_backups([backup])

    # create 1 snapshot with 256B data.
    # output = snap1(offset0, length1) + backing(offset1, ...)
    snap1_data = common.random_string(length1)
    common.verify_data(backing_dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create()
    backup = create_backup(backup_target, snap1)

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(
        offset1, length0 - offset1)
    dev_checksum = common.checksum_dev(backing_dev)

    cmd.restore_to_file(backup, file(BACKING_FILE_QCOW2),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output1_raw_snap1 = read_file(
        output_raw_path, offset0, length1)
    output1_raw_backing = read_file(
        output_raw_path, offset1, length0 - offset1)
    output1_checksum = checksum_data(
        read_file(output_raw_path, 0, SIZE))
    assert output1_raw_snap1 == snap1_data
    assert output1_raw_backing == backing_data
    assert output1_raw_snap1 + output1_raw_backing == volume_data
    assert output1_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(backup, file(BACKING_FILE_QCOW2),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output1_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length1)
    output1_qcow2_backing = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - offset1)
    output1_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output1_qcow2_snap1 == snap1_data
    assert output1_qcow2_backing == backing_data
    assert output1_qcow2_snap1 + output1_qcow2_backing == volume_data
    assert output1_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    cmd.snapshot_revert(snap0)
    rm_snaps([snap1])
    rm_backups([backup])
    check_backing()
    check_empty_volume(backing_dev)

    # create 2 snapshots with 256B data and 128B data
    # output = snap2(offset0, length1 - length2) +
    #          snap1(offset2, length2) + backing(offset2, ...)
    snap1_data = common.random_string(length1)
    common.verify_data(backing_dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create()
    snap2_data = common.random_string(length2)
    common.verify_data(backing_dev, offset0, snap2_data)
    snap2 = cmd.snapshot_create()
    backup = create_backup(backup_target, snap2)

    volume_data = read_dev(backing_dev, offset0, length0)
    backing_data = read_from_backing_file(
        offset1, length0 - offset1)
    dev_checksum = common.checksum_dev(backing_dev)

    cmd.restore_to_file(backup, file(BACKING_FILE_QCOW2),
                        output_raw_path, IMAGE_FORMAT_RAW)
    output2_raw_snap2 = read_file(
        output_raw_path, offset0, length2)
    output2_raw_snap1 = read_file(
        output_raw_path, offset2, length1 - length2)
    output2_raw_backing = read_file(
        output_raw_path, offset1, length0 - offset1)
    output2_checksum = checksum_data(
        read_file(output_raw_path, 0, SIZE))
    assert output2_raw_snap2 == snap2_data
    assert output2_raw_snap1 == snap1_data[offset2: length1]
    assert output2_raw_backing == backing_data
    assert \
        volume_data == \
        output2_raw_snap2 + output2_raw_snap1 + output2_raw_backing
    assert output2_checksum == dev_checksum
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(backup, file(BACKING_FILE_QCOW2),
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output2_qcow2_snap2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length2)
    output2_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset2, length1 - length2)
    output2_qcow2_backing = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - offset1)
    output2_checksum = checksum_data(
        read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE))
    assert output2_qcow2_snap2 == snap2_data
    assert output2_qcow2_snap1 == snap1_data[offset2: length1]
    assert output2_qcow2_backing == backing_data
    assert \
        volume_data == \
        output2_qcow2_snap2 + output2_qcow2_snap1 + output1_qcow2_backing
    assert output2_checksum == dev_checksum
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    cmd.snapshot_revert(snap0)
    rm_snaps([snap1, snap2])
    rm_backups([backup])
    check_backing()
    check_empty_volume(backing_dev)
コード例 #5
0
def restore_to_file_without_backing_file_test(dev, backup_target):
    length0 = 256
    length1 = 128
    offset0 = 0
    offset1 = length1 + offset0

    output_raw_path = file(OUTPUT_FILE_RAW)
    output_qcow2_path = file(OUTPUT_FILE_QCOW2)

    # create 1 empty snapshot for converting to init state.
    snap0 = cmd.snapshot_create()

    # create 1 snapshot with 256B data.
    # output = snap2(offset0, length1)
    snap1_data = common.random_string(length0)
    common.verify_data(dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create()
    backup = create_backup(backup_target, snap1)

    cmd.restore_to_file(backup, "",
                        output_raw_path, IMAGE_FORMAT_RAW)
    output1_raw = read_file(output_raw_path, offset0, length0)
    assert output1_raw == snap1_data
    os.remove(output_raw_path)
    assert not os.path.exists(output_raw_path)

    cmd.restore_to_file(backup, "",
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output1_qcow2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length0)
    assert output1_qcow2 == snap1_data
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    cmd.snapshot_revert(snap0)
    rm_snaps([snap1])
    rm_backups([backup])

    # create 2 snapshots with 256B data and 128B data
    # output = snap2(offset0, length1 - length2) +
    #          snap1(offset2, length2)
    snap1_data = common.random_string(length0)
    common.verify_data(dev, offset0, snap1_data)
    snap1 = cmd.snapshot_create()
    snap2_data = common.random_string(length1)
    common.verify_data(dev, offset0, snap2_data)
    snap2 = cmd.snapshot_create()
    backup = create_backup(backup_target, snap2)

    cmd.restore_to_file(backup, "",
                        output_raw_path, IMAGE_FORMAT_RAW)
    output2_raw_snap2 = read_file(
        output_raw_path, offset0, length1)
    output2_raw_snap1 = read_file(
        output_raw_path, offset1, length0 - length1)
    assert output2_raw_snap2 == snap2_data
    assert output2_raw_snap1 == snap1_data[offset1: length0]

    cmd.restore_to_file(backup, "",
                        output_qcow2_path, IMAGE_FORMAT_QCOW2)
    output2_qcow2_snap2 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset0, length1)
    output2_qcow2_snap1 = read_qcow2_file_without_backing_file(
        output_qcow2_path, offset1, length0 - length1)
    assert output2_qcow2_snap2 == snap2_data
    assert output2_qcow2_snap1 == snap1_data[offset1: length0]
    os.remove(output_qcow2_path)
    assert not os.path.exists(output_qcow2_path)

    cmd.snapshot_revert(snap0)
    rm_snaps([snap1, snap2])
    rm_backups([backup])
コード例 #6
0
ファイル: train.py プロジェクト: tspannhw/yolo2NCS
def _main_(args):
    config_path = args.conf
    keras.backend.tensorflow_backend.set_session(get_session())

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    if config['backup']['create_backup']:
        config = create_backup(config)
    ###############################
    #   Parse the annotations
    ###############################

    if config['parser_annotation_type'] == 'xml':
        # parse annotations of the training set
        train_imgs, train_labels = parse_annotation_xml(
            config['train']['train_annot_folder'],
            config['train']['train_image_folder'], config['model']['labels'])

        # parse annotations of the validation set, if any, otherwise split the training set
        if os.path.exists(config['valid']['valid_annot_folder']):
            valid_imgs, valid_labels = parse_annotation_xml(
                config['valid']['valid_annot_folder'],
                config['valid']['valid_image_folder'],
                config['model']['labels'])
            split = False
        else:
            split = True
    elif config['parser_annotation_type'] == 'csv':
        # parse annotations of the training set
        train_imgs, train_labels = parse_annotation_csv(
            config['train']['train_csv_file'], config['model']['labels'],
            config['train']['train_csv_base_path'])

        # parse annotations of the validation set, if any, otherwise split the training set
        if os.path.exists(config['valid']['valid_csv_file']):
            valid_imgs, valid_labels = parse_annotation_csv(
                config['valid']['valid_csv_file'], config['model']['labels'],
                config['valid']['valid_csv_base_path'])
            split = False
        else:
            split = True
    else:
        raise ValueError(
            "'parser_annotations_type' must be 'xml' or 'csv' not {}.".format(
                config['parser_annotations_type']))

    if split:
        train_valid_split = int(0.8 * len(train_imgs))
        np.random.shuffle(train_imgs)

        valid_imgs = train_imgs[train_valid_split:]
        train_imgs = train_imgs[:train_valid_split]

    if len(config['model']['labels']) > 0:
        overlap_labels = set(config['model']['labels']).intersection(
            set(train_labels.keys()))

        print('Seen labels:\t', train_labels)
        print('Given labels:\t', config['model']['labels'])
        print('Overlap labels:\t', overlap_labels)

        if len(overlap_labels) < len(config['model']['labels']):
            print(
                'Some labels have no annotations! Please revise the list of labels in the config.json file!'
            )
            return
    else:
        print('No labels are provided. Train on all seen labels.')
        config['model']['labels'] = train_labels.keys()
        with open("labels.json", 'w') as outfile:
            json.dump({"labels": list(train_labels.keys())}, outfile)

    ###############################
    #   Construct the model
    ###############################

    yolo = YOLO(backend=config['model']['backend'],
                input_size=(config['model']['input_size_h'],
                            config['model']['input_size_w']),
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'],
                gray_mode=config['model']['gray_mode'])

    ###############################
    #   Load the pretrained weights (if any)
    ###############################

    if os.path.exists(config['train']['pretrained_weights']):
        print("Loading pre-trained weights in",
              config['train']['pretrained_weights'])
        yolo.load_weights(config['train']['pretrained_weights'])
    else:
        print("No pretrained model has been loaded")

    ###############################
    #   Start the training process
    ###############################

    yolo.train(train_imgs=train_imgs,
               valid_imgs=valid_imgs,
               train_times=config['train']['train_times'],
               valid_times=config['valid']['valid_times'],
               nb_epochs=config['train']['nb_epochs'],
               learning_rate=config['train']['learning_rate'],
               batch_size=config['train']['batch_size'],
               warmup_epochs=config['train']['warmup_epochs'],
               object_scale=config['train']['object_scale'],
               no_object_scale=config['train']['no_object_scale'],
               coord_scale=config['train']['coord_scale'],
               class_scale=config['train']['class_scale'],
               saved_weights_name=config['train']['saved_weights_name'],
               debug=config['train']['debug'],
               early_stop=config['train']['early_stop'],
               workers=config['train']['workers'],
               max_queue_size=config['train']['max_queue_size'],
               tb_logdir=config['train']['tensorboard_log_dir'])
コード例 #7
0
def restore_inc_test(
        controller,
        replica1,
        replica2,  # NOQA
        sb_controller,
        sb_replica1,
        sb_replica2,
        backup_target):  # NOQA
    launcher.start_engine_frontend(FRONTEND_TGT_BLOCKDEV, url=LAUNCHER)
    dev = common.get_dev(replica1, replica2, controller)

    zero_string = b'\x00'.decode('utf-8')

    # backup0: 256 random data in 1st block
    length0 = 256
    snap0_data = common.random_string(length0)
    verify_data(dev, 0, snap0_data)
    verify_data(dev, BLOCK_SIZE, snap0_data)
    snap0 = cmd.snapshot_create()
    backup0 = create_backup(backup_target, snap0)
    backup0_name = cmd.backup_inspect(backup0)['Name']

    # backup1: 32 random data + 32 zero data + 192 random data in 1st block
    length1 = 32
    offset1 = 32
    snap1_data = zero_string * length1
    verify_data(dev, offset1, snap1_data)
    snap1 = cmd.snapshot_create()
    backup1 = create_backup(backup_target, snap1)
    backup1_name = cmd.backup_inspect(backup1)['Name']

    # backup2: 32 random data + 256 random data in 1st block,
    #          256 random data in 2nd block
    length2 = 256
    offset2 = 32
    snap2_data = common.random_string(length2)
    verify_data(dev, offset2, snap2_data)
    verify_data(dev, BLOCK_SIZE, snap2_data)
    snap2 = cmd.snapshot_create()
    backup2 = create_backup(backup_target, snap2)
    backup2_name = cmd.backup_inspect(backup2)['Name']

    # backup3: 64 zero data + 192 random data in 1st block
    length3 = 64
    offset3 = 0
    verify_data(dev, offset3, zero_string * length3)
    verify_data(dev, length2, zero_string * offset2)
    verify_data(dev, BLOCK_SIZE, zero_string * length2)
    snap3 = cmd.snapshot_create()
    backup3 = create_backup(backup_target, snap3)
    backup3_name = cmd.backup_inspect(backup3)['Name']

    # backup4: 256 random data in 1st block
    length4 = 256
    offset4 = 0
    snap4_data = common.random_string(length4)
    verify_data(dev, offset4, snap4_data)
    snap4 = cmd.snapshot_create()
    backup4 = create_backup(backup_target, snap4)
    backup4_name = cmd.backup_inspect(backup4)['Name']

    common.cleanup_replica(replica1)
    common.cleanup_replica(replica2)
    common.cleanup_controller(controller)
    launcher.shutdown_engine_frontend(url=LAUNCHER)

    # start no-frontend volume
    # start standby volume (no frontend)
    start_no_frontend_volume(sb_controller, sb_replica1, sb_replica2)

    restore_for_no_frontend_volume(backup0, sb_controller)
    verify_no_frontend_data(0, snap0_data, sb_controller)

    # mock restore crash/error
    delta_file1 = "volume-delta-" + backup0_name + ".img"
    if "vfs" in backup_target:
        command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME]
        backup_volume_path = subprocess.check_output(command).strip()
        command = ["find", backup_volume_path, "-name", "*blk"]
        blocks = subprocess.check_output(command).split()
        assert len(blocks) != 0
        for blk in blocks:
            command = ["mv", blk, blk + ".tmp"]
            subprocess.check_output(command).strip()
        with pytest.raises(subprocess.CalledProcessError):
            cmd.restore_inc(backup1, backup0_name, CONTROLLER_NO_FRONTEND)
        assert path.exists(STANDBY_REPLICA1_PATH + delta_file1)
        assert path.exists(STANDBY_REPLICA2_PATH + delta_file1)
        for blk in blocks:
            command = ["mv", blk + ".tmp", blk]
            subprocess.check_output(command)

    data1 = \
        snap0_data[0:offset1] + snap1_data + \
        snap0_data[offset1+length1:]
    cmd.restore_inc(backup1, backup0_name, CONTROLLER_NO_FRONTEND)
    verify_no_frontend_data(0, data1, sb_controller)

    assert not path.exists(STANDBY_REPLICA1_PATH + delta_file1)
    assert not path.exists(STANDBY_REPLICA2_PATH + delta_file1)
    volume_info = cmd.info(CONTROLLER_NO_FRONTEND)
    assert volume_info['lastRestored'] == backup1_name

    data2 = \
        data1[0:offset2] + snap2_data + \
        zero_string * (BLOCK_SIZE - length2 - offset2) + snap2_data
    cmd.restore_inc(backup2, backup1_name, CONTROLLER_NO_FRONTEND)
    verify_no_frontend_data(0, data2, sb_controller)

    delta_file2 = "volume-delta-" + backup1_name + ".img"
    assert not path.exists(STANDBY_REPLICA1_PATH + delta_file2)
    assert not path.exists(STANDBY_REPLICA2_PATH + delta_file2)
    volume_info = cmd.info(CONTROLLER_NO_FRONTEND)
    assert volume_info['lastRestored'] == backup2_name

    # mock race condition
    with pytest.raises(subprocess.CalledProcessError) as e:
        cmd.restore_inc(backup1, backup0_name, CONTROLLER_NO_FRONTEND)
        assert "doesn't match lastRestored" in e

    data3 = zero_string * length3 + data2[length3:length2]
    cmd.restore_inc(backup3, backup2_name, CONTROLLER_NO_FRONTEND)
    verify_no_frontend_data(0, data3, sb_controller)

    delta_file3 = "volume-delta-" + backup3_name + ".img"
    assert not path.exists(STANDBY_REPLICA1_PATH + delta_file3)
    assert not path.exists(STANDBY_REPLICA2_PATH + delta_file3)
    volume_info = cmd.info(CONTROLLER_NO_FRONTEND)
    assert volume_info['lastRestored'] == backup3_name

    # mock corner case: invalid last-restored backup
    rm_backups([backup3])
    # actually it is full restoration
    cmd.restore_inc(backup4, backup3_name, CONTROLLER_NO_FRONTEND)
    verify_no_frontend_data(0, snap4_data, sb_controller)
    volume_info = cmd.info(CONTROLLER_NO_FRONTEND)
    assert volume_info['lastRestored'] == backup4_name
    if "vfs" in backup_target:
        command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME]
        backup_volume_path = subprocess.check_output(command).strip()
        command = ["find", backup_volume_path, "-name", "*tempoary"]
        tmp_files = subprocess.check_output(command).split()
        assert len(tmp_files) == 0

    cleanup_no_frontend_volume(sb_controller, sb_replica1, sb_replica2)

    rm_backups([backup0, backup1, backup2, backup4])
コード例 #8
0
def adjust_filename(input_folder, input_json, project, time=None):
    """Modify images and json into correct filename convention

    This method automatically adjust filename into desired format. It will also
    create a backup version in `dataversioning_backup_adjust_filename`.

    # Arguments
        input_folder [str]: the path to input folder
        input_json [str]: the path to input json
        project [str]: the project name
        time [str]: the time in YYYYMMDD format, if is None, then take the
            current time
        append_root [bool]: whether to create a root folder

    # Returns
        [str]: project name
        [str]: time
    """
    input_folder = input_folder[:-1] if input_folder[
        -1] == '/' else input_folder
    _ = utils.create_backup(input_folder, input_json,
                            'dataversioning_backup_adjust_filename')

    print('Loading images and labels...')
    with open(input_json, 'r') as f_in:
        features = json.load(f_in)
        features_new = {}

    if time is None:
        time = arrow.now().format('YYYYMMDD')
    else:
        _ = arrow.get(time, 'YYYYMMDD')

    digits = len(str(len(features))) + 1
    print('Modifying images and labels...')
    for idx, each_file in enumerate(features.keys()):
        directory = os.path.dirname(each_file)
        _, ext = os.path.splitext(each_file)

        filename_new = '{project}_{time}_{idx:0{digits}d}{ext}'.format(
            project=project, time=time, idx=idx, digits=digits, ext=ext)
        filename_new = os.path.join(directory, filename_new)

        features_new[filename_new] = features[each_file]
        subprocess.run([
            'mv',
            os.path.join(input_folder, each_file),
            os.path.join(input_folder, filename_new)
        ])

    with open(input_json, 'w') as f_out:
        json.dump(features_new,
                  f_out,
                  indent=4,
                  separators=(',', ': '),
                  sort_keys=True,
                  ensure_ascii=False)

    print('Finish!')

    return project, time