Пример #1
0
def main(args):

    iman = []
    s3 = s3store(args.minio_address, args.minio_access_key,
                 args.minio_secret_key)
    shutil.rmtree(args.ann_dir, ignore_errors=True)
    for src in args.annotations:
        destDir = args.ann_dir + '/' + src
        s3.GetDir(args.srcbucket, src, destDir)
        file_list = glob.glob(glob.escape(destDir) + '/*.' + args.image_type)

        for imfile in file_list:
            annFile = '{}{}'.format(
                os.path.splitext(imfile)[0], args.annotation_decoration)
            if (os.path.exists(imfile) and os.path.exists(annFile)):
                iman.append({'im': imfile, 'an': annFile})
    if (len(iman) < 1):
        print('No files found {}  Exiting'.format(args.annotations))
        return

    if not os.path.exists(args.record_dir):
        os.makedirs(args.record_dir)

    WriteRecords(args, iman)

    print('Write to s3  {}/{}'.format(args.destbucket, args.setname))

    s3.PutDir(args.destbucket, args.record_dir, args.setname)
    shutil.rmtree(args.ann_dir, ignore_errors=True)
    shutil.rmtree(args.record_dir, ignore_errors=True)
    print('{} complete'.format(os.path.basename(__file__)))
Пример #2
0
def main(args):

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(
            args.credentails))

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'],
                 s3def['access key'],
                 s3def['secret key'],
                 tls=s3def['tls'],
                 cert_verify=s3def['cert_verify'],
                 cert_path=s3def['cert_path'])

    if not os.path.exists(args.path):
        os.makedirs(args.path)

    for url in cocourl:
        outpath = '{}/{}'.format(args.path, os.path.basename(url))
        if os.path.isfile(outpath):
            print('{} exists.  Skipping'.format(outpath))
        else:
            sysmsg = 'wget -O {} {} '.format(outpath, url)
            print(sysmsg)
            os.system(sysmsg)

        sysmsg = 'unzip {} -d {}'.format(outpath, args.path)
        print(sysmsg)
        os.system(sysmsg)
        os.remove(outpath)  # Remove zip file once extracted

    saved_name = '{}/{}'.format(s3def['sets']['dataset']['prefix'],
                                args.dataset)
    print('Save model to {}/{}'.format(s3def['sets']['dataset']['bucket'],
                                       saved_name))
    if s3.PutDir(s3def['sets']['dataset']['bucket'], args.path, saved_name):
        shutil.rmtree(args.path, ignore_errors=True)

    url = s3.GetUrl(s3def['sets']['dataset']['bucket'], saved_name)
    print("Complete. Results saved to {}".format(url))
Пример #3
0
def main(args):

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(
            args.credentails))
        return

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'],
                 s3def['access key'],
                 s3def['secret key'],
                 tls=s3def['tls'],
                 cert_verify=s3def['cert_verify'],
                 cert_path=s3def['cert_path'])

    if not os.path.exists(args.record_dir):
        os.makedirs(args.record_dir)

    WriteRecords(s3def, s3, args)
Пример #4
0
def main(args):

    print('Start Tensorflow to ONNX conversion')

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(args.credentails))

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'], 
                 s3def['access key'], 
                 s3def['secret key'], 
                 tls=s3def['tls'], 
                 cert_verify=s3def['cert_verify'], 
                 cert_path=s3def['cert_path']
                 )

    trainingset = '{}/{}/'.format(s3def['sets']['trainingset']['prefix'] , args.trainingset)
    print('Load training set {}/{} to {}'.format(s3def['sets']['trainingset']['bucket'],trainingset,args.trainingset_dir ))
    s3.Mirror(s3def['sets']['trainingset']['bucket'], trainingset, args.trainingset_dir)

    config = {
        'descripiton': args.description,
        'batch_size': args.batch_size,
        'input_shape': [args.training_crop[0], args.training_crop[1], args.train_depth],
        'learning_rate': args.learning_rate,
        'weights': args.weights,
        'channel_order': args.channel_order,
        's3_address':s3def['address'],
        's3_sets':s3def['sets'],
        'initialmodel':args.initialmodel,
    }

    if args.initialmodel is None or len(args.initialmodel) == 0:
        config['initialmodel'] = None

    val_dataset = input_fn('val', args.trainingset_dir, config)
    train_dataset = input_fn('train', args.trainingset_dir, config)
    iterator = iter(train_dataset)

    tempinitmodel = tempfile.TemporaryDirectory(prefix='initmodel', dir='.')
    modelpath = tempinitmodel.name+'/'+config['initialmodel']
    os.makedirs(modelpath)
    file_count = 0
    s3model=config['s3_sets']['model']['prefix']+'/'+config['initialmodel']
    file_count = s3.GetDir(config['s3_sets']['model']['bucket'], s3model, modelpath)

    FP = 'FP32'
    num_calibration_steps = 20
    def representative_dataset_gen():
        for _ in range(num_calibration_steps):
            # Get sample input data as a numpy array in a method of your choosing.
            yield [input]

    params = tf.experimental.tensorrt.ConversionParams(
        precision_mode=FP,
        # Set this to a large enough number so it can cache all the engines.
        maximum_cached_engines=128)
    converter = tf.experimental.tensorrt.Converter(input_saved_model_dir=modelpath, conversion_params=params)

    converter.convert()
    converter.build(input_fn=representative_dataset_gen)  # Generate corresponding TRT engines
    converter.save('./zzz')  # Generated engines will be saved

    '''print('Store {} to {}/{}'.format(onnx_filename, s3def['sets']['model']['bucket'],s3model))
    if not s3.PutFile(s3def['sets']['model']['bucket'], onnx_filename, s3model):
        print("s3.PutFile({},{},{} failed".format(s3def['sets']['model']['bucket'], onnx_filename, s3model))

    obj_name = '{}/{}.onnx'.format(s3model, args.modelprefix)
    objurl = s3.GetUrl(s3def['sets']['model']['bucket'], obj_name)'''

    #print("Tensorflow to TRT  complete. Results stored {}".format(objurl))
    print("Tensorflow to TRT  complete. Results stored")
Пример #5
0
def main(args):

    print('Start training')

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(args.credentails))

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'], 
                 s3def['access key'], 
                 s3def['secret key'], 
                 tls=s3def['tls'], 
                 cert_verify=s3def['cert_verify'], 
                 cert_path=s3def['cert_path']
                 )
    
    trainingset = '{}/{}/'.format(s3def['sets']['trainingset']['prefix'] , args.trainingset)
    trainingsetdir = '{}/{}'.format(args.trainingsetdir,args.trainingset)
    print('Load training set {}/{} to {}'.format(s3def['sets']['trainingset']['bucket'],trainingset,trainingsetdir ))
    s3.Mirror(s3def['sets']['trainingset']['bucket'], trainingset, trainingsetdir)

    if args.weights is not None and args.weights.lower() == 'none' or args.weights == '':
        args.weights = None

    trainingsetDescriptionFile = '{}/description.json'.format(trainingsetdir)
    trainingsetDescription = json.load(open(trainingsetDescriptionFile))

    config = {
        'descripiton': args.description,
        'batch_size': args.batch_size,
        'traningset': trainingset,
        'trainingset description': trainingsetDescription,
        'input_shape': [args.training_crop[0], args.training_crop[1], args.train_depth],
        'classScale': 0.001, # scale value for each product class
        'augment_rotation' : 15., # Rotation in degrees
        'augment_flip_x': False,
        'augment_flip_y': True,
        'augment_brightness':0.,
        'augment_contrast': 0.,
        'augment_shift_x': 0.1, # in fraction of image
        'augment_shift_y': 0.1, # in fraction of image
        'scale_min': 0.5, # in fraction of image
        'scale_max': 2.0, # in fraction of image
        'ignore_label': trainingsetDescription['classes']['ignore'],
        'classes': trainingsetDescription['classes']['classes'],
        'epochs': args.epochs,
        'area_filter_min': 25,
        'learning_rate': args.learning_rate,
        'weights': args.weights,
        'channel_order': args.channel_order,
        'clean': args.clean,
        's3_address':s3def['address'],
        's3_sets':s3def['sets'],
        'initialmodel':args.initialmodel,
        'training_dir': args.training_dir,
        'min':args.min,
        'strategy': args.strategy,
        'devices':args.devices,
    }

    if args.trainingset is None or len(args.trainingset) == 0:
        config['trainingset'] = None
    if args.initialmodel is None or len(args.initialmodel) == 0:
        config['initialmodel'] = None
    if args.training_dir is None or len(args.training_dir) == 0:
        config['training_dir'] = tempfile.TemporaryDirectory(prefix='train', dir='.')

    if args.clean:
        shutil.rmtree(config['training_dir'], ignore_errors=True)

    strategy = None
    if(args.strategy == 'mirrored'):
        strategy = tf.distribute.MirroredStrategy(devices=args.devices)

    else:
        device = "/gpu:0"
        if args.devices is not None and len(args.devices) > 0:
            device = args.devices[0]

        strategy = tf.distribute.OneDeviceStrategy(device=device)

    print('{} distribute with {} GPUs'.format(args.strategy,strategy.num_replicas_in_sync))

    savedmodelpath = '{}/{}'.format(args.savedmodel, args.savedmodelname)
    if not os.path.exists(savedmodelpath):
        os.makedirs(savedmodelpath)
    if not os.path.exists(config['training_dir']):
        os.makedirs(config['training_dir'])

    with strategy.scope(): # Apply training strategy 
        model =  LoadModel(config, s3) 

        # Display model
        model.summary()

        train_dataset = input_fn('train', trainingsetdir, config)
        val_dataset = input_fn('val', trainingsetdir, config)

        #earlystop_callback = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta=1e-4, patience=3, verbose=0, mode='auto')
        save_callback = tf.keras.callbacks.ModelCheckpoint(filepath=config['training_dir'], monitor='loss',verbose=0,save_weights_only=False,save_freq='epoch')
        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=config['training_dir'], histogram_freq=100)
        callbacks = [
            save_callback,
            tensorboard_callback
        ]
        #file_writer = tf.summary.create_file_writer(config['training_dir'])

        # Save plot of model model
        # Failing with "AttributeError: 'dict' object has no attribute 'name'" when returning multiple outputs 
        #tf.keras.utils.plot_model(model, to_file='{}unet.png'.format(savedmodelpath), show_shapes=True)

        train_images = config['batch_size'] # Guess training set if not provided
        val_images = config['batch_size']

        for dataset in trainingsetDescription['sets']:
            if(dataset['name']=="train"):
                train_images = dataset["length"]
            if(dataset['name']=="val"):
                val_images = dataset["length"]
        steps_per_epoch=int(train_images/config['batch_size'])        
        validation_steps=int(val_images/config['batch_size']/config['epochs'])
              
        if(args.min):
            steps_per_epoch= min(args.min_steps, steps_per_epoch)
            validation_steps=min(args.min_steps, validation_steps)
            config['epochs'] = 1


        if config['epochs'] > 0:

            print("Fit model to data")
            model_history = model.fit(train_dataset, 
                                    validation_data=val_dataset,
                                    epochs=config['epochs'],
                                    steps_per_epoch=steps_per_epoch,
                                    validation_steps=validation_steps,
                                    callbacks=callbacks)

            history = model_history.history
            if 'loss' in history:
                loss = model_history.history['loss']
            else:
                loss = []
            if 'val_loss' in history:
                val_loss = model_history.history['val_loss']
            else:
                val_loss = []

            model_description = {'config':config,
                                'results': history
                                }

            graph_history(loss,val_loss,savedmodelpath)


        else:
            model_description = {'config':config,
                            }

    print("Create saved model")
    model.save(savedmodelpath, save_format='tf')
    WriteDictJson(model_description, '{}/description.json'.format(savedmodelpath))

    if args.saveonnx:
        onnx_req = "python -m tf2onnx.convert --saved-model {0} --opset 10 --output {0}/model.onnx".format(savedmodelpath)
        os.system(onnx_req)

    # Make some predictions. In the interest of saving time, the number of epochs was kept small, but you may set this higher to achieve more accurate results.
    WritePredictions(train_dataset, model, config, outpath=savedmodelpath, imgname='train_img')
    WritePredictions(val_dataset, model, config, outpath=savedmodelpath, imgname='val_img')

    # Kubeflow Pipeline results
    results = model_description
    WriteDictJson(results, '{}/results.json'.format(savedmodelpath))

    saved_name = '{}/{}'.format(s3def['sets']['model']['prefix'] , args.savedmodelname)
    print('Save model to {}/{}'.format(s3def['sets']['model']['bucket'],saved_name))
    if s3.PutDir(s3def['sets']['model']['bucket'], savedmodelpath, saved_name):
        shutil.rmtree(savedmodelpath, ignore_errors=True)

    if args.clean or args.training_dir is None or len(args.training_dir) == 0:
        shutil.rmtree(config['training_dir'], ignore_errors=True)

    print("Segmentation training complete. Results saved to https://{}/minio/{}/{}".format(s3def['address'], s3def['sets']['model']['bucket'],saved_name))
Пример #6
0
def main(args):
    print('Start test')

    creds = ReadDictJson(args.credentails)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(args.credentails))
        return False

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'], 
                 s3def['access key'], 
                 s3def['secret key'], 
                 tls=s3def['tls'], 
                 cert_verify=s3def['cert_verify'], 
                 cert_path=s3def['cert_path']
                 )

    trainingset = '{}/{}/'.format(s3def['sets']['trainingset']['prefix'] , args.trainingset)
    print('Load training set {}/{} to {}'.format(s3def['sets']['trainingset']['bucket'],trainingset,args.trainingset_dir ))
    s3.Mirror(s3def['sets']['trainingset']['bucket'], trainingset, args.trainingset_dir)

    trainingsetDescriptionFile = '{}/description.json'.format(args.trainingset_dir)
    trainingsetDescription = json.load(open(trainingsetDescriptionFile))
    
    config = {
        'name': args.name,
        'description': args.description,
        'initialmodel': args.model,
        'trtmodel': args.trtmodel,
        'batch_size': args.batch_size,
        'trainingset description': trainingsetDescription,
        'input_shape': [args.training_crop[0], args.training_crop[1], args.train_depth],
        'classScale': 0.001, # scale value for each product class
        'augment_rotation' : 5., # Rotation in degrees
        'augment_flip_x': False,
        'augment_flip_y': True,
        'augment_brightness':0.,
        'augment_contrast': 0.,
        'augment_shift_x': 0.0, # in fraction of image
        'augment_shift_y': 0.0, # in fraction of image
        'scale_min': 0.75, # in fraction of image
        'scale_max': 1.25, # in fraction of image
        'ignore_label': trainingsetDescription['classes']['ignore'],
        'classes': trainingsetDescription['classes']['classes'],
        'epochs': 1,
        'area_filter_min': 25,
        'weights': None,
        'channel_order': args.channel_order,
        's3_address':s3def['address'],
        's3_sets':s3def['sets'],
        'training_dir': None, # used by LoadModel
        'learning_rate': 1e-3, # used by LoadModel
        'clean' : True,
        'test_archive': trainingset,
        'run_archive': '{}{}/'.format(trainingset, args.model),
        'min':args.min,
    }

    trainingsetDescriptionFile = '{}/description.json'.format(args.trainingset_dir)
    trainingsetDescription = json.load(open(trainingsetDescriptionFile))

    strategy = None
    if(args.strategy == 'mirrored'):
        strategy = tf.distribute.MirroredStrategy(devices=args.devices)

    else:
        device = "/gpu:0"
        if args.devices is not None and len(args.devices) > 0:
            device = args.devices[0]

        strategy = tf.distribute.OneDeviceStrategy(device=device)

    modelobjname = '{}/{}/{}'.format(s3def['sets']['model']['prefix'], config['initialmodel'], config['trtmodel'])
    modelfilename = '{}/{}/{}/{}'.format(args.test_dir, s3def['sets']['model']['prefix'], config['initialmodel'], config['trtmodel'])
    print('Load trt model {}/{} to {}'.format(s3def['sets']['model']['bucket'], modelobjname, modelfilename))
    s3.GetFile(s3def['sets']['model']['bucket'], modelobjname, modelfilename)

    # Prepare datasets for similarity computation
    objTypes = {}
    for objType in trainingsetDescription['classes']['objects']:
        if objType['trainId'] not in objTypes:
            objTypes[objType['trainId']] = copy.deepcopy(objType)
            # set name to category for objTypes and id to trainId
            objTypes[objType['trainId']]['name'] = objType['category']
            objTypes[objType['trainId']]['id'] = objType['trainId']

    results = {'class similarity':{}, 'config':config, 'image':[]}

    for objType in objTypes:
        results['class similarity'][objType] = {'union':0, 'intersection':0} 

    with strategy.scope(): 
        accuracy = tf.keras.metrics.Accuracy()
        #train_dataset = input_fn('train', args.trainingset_dir, config)
        val_dataset = input_fn('val', args.trainingset_dir, config)

        trainingsetdesc = {}
        validationsetdec = {}
        for dataset in config['trainingset description']['sets']:
            if dataset['name'] == 'val':
                validationsetdec = dataset
            if dataset['name'] == 'train':
                trainingsetdesc = dataset

        print("Begin inferences")
        dtSum = 0.0
        accuracySum = 0.0
        total_confusion = None
        iterator = iter(val_dataset)
        numsteps = int(validationsetdec['length']/config['batch_size'])
        step = 0

        if(config['min']):
            numsteps=min(args.min_steps, numsteps)

        try:

            f = open(modelfilename, "rb")
            runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING)) 

            engine = runtime.deserialize_cuda_engine(f.read())
            context = engine.create_execution_context()

            target_dtype = np.float16 if args.fp16 else np.float32

            dummy_input_batch = np.zeros((1, 480, 512, 3), dtype=np.float32)

            output = np.empty([args.batch_size, config['input_shape'][0], config['input_shape'][1], config['classes']], dtype = np.float32)
            # Allocate device memory
            d_input = cuda.mem_alloc(1 * dummy_input_batch.nbytes)
            d_output = cuda.mem_alloc(1 * output.nbytes)

            bindings = [int(d_input), int(d_output)]

            stream = cuda.Stream()

            def predict(batch): # result gets copied into output
                # Transfer input data to device
                cuda.memcpy_htod_async(d_input, batch, stream)
                # Execute model
                context.execute_async_v2(bindings, stream.handle, None)
                # Transfer predictions back
                cuda.memcpy_dtoh_async(output, d_output, stream)
                # Syncronize threads
                stream.synchronize()
                
                return output

            if not os.path.exists(args.test_dir):
                os.makedirs(args.test_dir)

            output = predict(dummy_input_batch)  # Run to load dependencies

            tf.get_logger().setLevel('ERROR') # remove tf.cast warning from algorithm time

            for i in tqdm(range(numsteps)):
                step = i
                image, annotation  = iterator.get_next()
                initial = datetime.now()
                image_norm = tf.image.per_image_standardization(tf.cast(image, tf.float32))
                logitstft = predict(image_norm.numpy())
                segmentationtrt = np.argmax(logitstft, axis=-1).astype(np.uint8)

                dt = (datetime.now()-initial).total_seconds()
                dtSum += dt
                imageTime = dt/config['batch_size']
                for j in range(config['batch_size']):
                    img = tf.squeeze(image[j]).numpy().astype(np.uint8)
                    ann = tf.squeeze(annotation[j]).numpy().astype(np.uint8)
                    seg = tf.squeeze(segmentationtrt[j]).numpy().astype(np.uint8)

                    accuracy.update_state(ann,seg)
                    seg_accuracy = accuracy.result().numpy()
                    accuracySum += seg_accuracy
                    imagesimilarity, results['class similarity'], unique = jaccard(ann, seg, objTypes, results['class similarity'])

                    confusion = tf.math.confusion_matrix(ann.flatten(),seg.flatten(), config['classes']).numpy().astype(np.int64)
                    if total_confusion is None:
                        total_confusion = confusion
                    else:
                        total_confusion += confusion

                    if args.saveimg:
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        iman = DrawFeatures(img, ann, config)
                        iman = cv2.putText(iman, 'Annotation',(10,25), font, 1,(255,255,255),1,cv2.LINE_AA)
                        imseg = DrawFeatures(img, seg, config)
                        imseg = cv2.putText(imseg, 'TensorRT',(10,25), font, 1,(255,255,255),1,cv2.LINE_AA)

                        im = cv2.hconcat([iman, imseg])
                        im_bgr = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
                        cv2.imwrite('{}/{}{:03d}{:03d}.png'.format(args.test_dir, 'segtrt', i, j), im_bgr)

                    results['image'].append({'dt':imageTime,'similarity':imagesimilarity, 'accuracy':seg_accuracy.astype(float), 'confusion':confusion.tolist()})
        except Exception as e:
            print("Error: test exception {} step {}".format(e, step))
            numsteps = step

    num_images = numsteps*config['batch_size']

    if numsteps > 0: 
        num_images = numsteps*config['batch_size']
        average_time = dtSum/num_images
        average_accuracy = accuracySum/num_images
    else:
        num_images = 0
        average_time = 0.0
        average_accuracy = 0.0

    sumIntersection = 0
    sumUnion = 0
    sumAccuracy = 0.0
    dataset_similarity = {}
    for key in results['class similarity']:
        intersection = results['class similarity'][key]['intersection']
        sumIntersection += intersection
        union = results['class similarity'][key]['union']
        sumUnion += union
        class_similarity = similarity(intersection, union)

        # convert to int from int64 for json.dumps
        dataset_similarity[key] = {'intersection':int(intersection) ,'union':int(union) , 'similarity':class_similarity}

    results['class similarity'] = dataset_similarity
    total_similarity = similarity(sumIntersection, sumUnion)

    now = datetime.now()
    date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
    test_summary = {'date':date_time}
    test_summary['name']=config['name']
    test_summary['description']=config['description']
    test_summary['model']=config['initialmodel']
    test_summary['accuracy']=average_accuracy
    test_summary['class_similarity']=dataset_similarity
    test_summary['similarity']=total_similarity
    test_summary['confusion']=total_confusion.tolist()
    test_summary['images']=num_images
    test_summary['image time']=average_time
    test_summary['batch size']=config['batch_size']
    test_summary['store address'] =s3def['address']
    test_summary['test bucket'] = s3def['sets']['trainingset']['bucket']
    test_summary['platform'] = platform.platform()
    if args.saveresults:
        test_summary['results'] = results    
    print ("Average time {}".format(average_time))
    print ('Similarity: {}'.format(dataset_similarity))

    # If there is a way to lock this object between read and write, it would prevent the possability of loosing data
    training_data = s3.GetDict(s3def['sets']['trainingset']['bucket'], config['test_archive']+args.tests_json)
    if training_data is None:
        training_data = []
    training_data.append(test_summary)
    s3.PutDict(s3def['sets']['trainingset']['bucket'], config['test_archive']+args.tests_json, training_data)

    test_url = s3.GetUrl(s3def['sets']['trainingset']['bucket'], config['test_archive']+args.tests_json)

    print("Test results {}".format(test_url))
Пример #7
0
def main(args):

    print('Platform: {}'.format(platform.platform()))

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(
            args.credentails))

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'],
                 s3def['access key'],
                 s3def['secret key'],
                 tls=s3def['tls'],
                 cert_verify=s3def['cert_verify'],
                 cert_path=s3def['cert_path'])

    config = {
        'initialmodel': args.savedmodel,
        's3_address': s3def['address'],
        's3_sets': s3def['sets'],
        'savedmodel': args.savedmodel,
        'training_dir': None,
        'clean': False,
        'shape': (args.size_y, args.size_x, args.depth),
        'training_dir': None,
    }

    using_tensorflow = True
    using_tensorrt = True

    modelpath = '{}/{}'.format(s3def['sets']['model']['prefix'],
                               config['savedmodel'])
    savedmodelpath = '{}/{}'.format(args.work_path, config['savedmodel'])
    onnxname = '{}/{}.onnx'.format(savedmodelpath, config['savedmodel'])
    trtenginename = '{}/{}.trt'.format(savedmodelpath, config['savedmodel'])

    if using_tensorflow:
        import tensorflow
        import keras2onnx
        import onnx

        print('Tensorflow: {}'.format(tensorflow.__version__))

        model = LoadModel(config, s3)
        onnx_model = keras2onnx.convert_keras(model, model.name)

        if not s3.GetDir(s3def['sets']['model']['bucket'], modelpath,
                         savedmodelpath):
            print('Failed to load model')

        onnx.save_model(onnx_model, onnxname)
        # Unload models to free memory for ONNX->TRT conversion

    if using_tensorrt:

        oscmd = 'trtexec --onnx={} --batch=1 --saveEngine={}  --explicitBatch 2>&1'.format(
            onnxname, trtenginename)
        os.system(oscmd)
    '''
    trtenginename = '{}/{}.trt'.format(savedmodelpath, config['savedmodel'])

    graph_def, inputs, outputs = tf_loader.from_saved_model(savedmodelpath, input_names=args.model_input_names, output_names=args.model_output_names)

    print("inputs: {}".format(inputs))
    print("outputs: {}".format(outputs))

    with tensorflow.Graph().as_default() as tf_graph:
        const_node_values = None
        tensorflow.import_graph_def(graph_def, name='')
    with tf_loader.tf_session(graph=tf_graph):
        onnx_graph = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers={},
                             extra_opset=[],
                             shape_override=None,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

        model_proto = onnx_graph.make_model(config['savedmodel'])
        with open(onnxname, "wb") as f:
            f.write(model_proto.SerializeToString())

    #onnx_graph = optimizer.optimize_graph(onnx_graph)

    TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
    trt_runtime = trt.Runtime(TRT_LOGGER)

    EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) # https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#import_onnx_python
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
        #builder.max_workspace_size = (256 << 20)
        with open(onnxname, 'rb') as model:
            parser.parse(model.read())
        #network.get_input(0).shape = [args.batch_size, args.image_size[0], args.image_size[1], args.image_depth]
        engine = builder.build_cuda_engine(network)

        buf = engine.serialize()
        with open(trtenginename, 'wb') as f:
            f.write(buf)

        #builder.max_workspace_size = (256 << 20)
        #parser.parse(model_proto.SerializeToString())
        #network.get_input(0).shape = [args.batch_size, args.image_size[0], args.image_size[1], args.image_depth]
        #engine = builder.build_cuda_engine(network)

        #eng.save_engine(engine, trtenginename) 

    '''

    s3.PutFile(s3def['sets']['model']['bucket'], onnxname, modelpath)
    s3.PutFile(s3def['sets']['model']['bucket'], trtenginename, modelpath)

    print("Conversion complete. Results saved to {}".format(trtenginename))
Пример #8
0
def main(args):

    print('Start Tensorflow to ONNX conversion')

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(
            args.credentails))

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'],
                 s3def['access key'],
                 s3def['secret key'],
                 tls=s3def['tls'],
                 cert_verify=s3def['cert_verify'],
                 cert_path=s3def['cert_path'])

    config = {
        'descripiton':
        args.description,
        'batch_size':
        args.batch_size,
        'input_shape':
        [args.training_crop[0], args.training_crop[1], args.train_depth],
        'learning_rate':
        args.learning_rate,
        'weights':
        args.weights,
        'channel_order':
        args.channel_order,
        's3_address':
        s3def['address'],
        's3_sets':
        s3def['sets'],
        'initialmodel':
        args.initialmodel,
    }

    if args.initialmodel is None or len(args.initialmodel) == 0:
        config['initialmodel'] = None

    tempinitmodel = tempfile.TemporaryDirectory(prefix='initmodel', dir='.')
    modelpath = tempinitmodel.name + '/' + config['initialmodel']
    os.makedirs(modelpath)
    file_count = 0
    s3model = config['s3_sets']['model']['prefix'] + '/' + config[
        'initialmodel']
    file_count = s3.GetDir(config['s3_sets']['model']['bucket'], s3model,
                           modelpath)

    onnx_filename = "{}/{}.onnx".format(modelpath, args.modelprefix)
    onnx_req = "python -m tf2onnx.convert --saved-model {} --opset 10 --output {}".format(
        modelpath, onnx_filename)
    os.system(onnx_req)

    #  Fix ONNX size
    '''
    onnx_model = onnx.load(onnx_filename)
    inputs = onnx_model.graph.input

    for input in inputs:
        dim1 = input.type.tensor_type.shape.dim[0]
        dim1.dim_value = config['batch_size']
    onnx.save_model(onnx_model, onnx_filename)
    '''

    print('Store {} to {}/{}'.format(onnx_filename,
                                     s3def['sets']['model']['bucket'],
                                     s3model))
    if not s3.PutFile(s3def['sets']['model']['bucket'], onnx_filename,
                      s3model):
        print("s3.PutFile({},{},{} failed".format(
            s3def['sets']['model']['bucket'], onnx_filename, s3model))

    obj_name = '{}/{}.onnx'.format(s3model, args.modelprefix)
    objurl = s3.GetUrl(s3def['sets']['model']['bucket'], obj_name)

    print("Tensorflow to ONNX  complete. Results stored {}".format(objurl))
Пример #9
0
def main(args):

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(
            args.credentails))

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'],
                 s3def['access key'],
                 s3def['secret key'],
                 tls=s3def['tls'],
                 cert_verify=s3def['cert_verify'],
                 cert_path=s3def['cert_path'])

    trainingset = '{}/{}/'.format(s3def['sets']['trainingset']['prefix'],
                                  args.trainingset)
    print('Load training set {}/{} to {}'.format(
        s3def['sets']['trainingset']['bucket'], trainingset,
        args.trainingset_dir))
    s3.Mirror(s3def['sets']['trainingset']['bucket'], trainingset,
              args.trainingset_dir)

    trainingsetDescriptionFile = '{}/description.json'.format(
        args.trainingset_dir)
    trainingsetDescription = json.load(open(trainingsetDescriptionFile))

    config = {
        'batch_size':
        args.batch_size,
        'trainingset':
        args.trainingset,
        'trainingset description':
        trainingsetDescription,
        'input_shape':
        [args.training_crop[0], args.training_crop[1], args.train_depth],
        'classScale':
        0.001,  # scale value for each product class
        'augment_rotation':
        5.,  # Rotation in degrees
        'augment_flip_x':
        False,
        'augment_flip_y':
        True,
        'augment_brightness':
        0.,
        'augment_contrast':
        0.,
        'augment_shift_x':
        0.0,  # in fraction of image
        'augment_shift_y':
        0.0,  # in fraction of image
        'scale_min':
        0.75,  # in fraction of image
        'scale_max':
        1.25,  # in fraction of image
        'ignore_label':
        trainingsetDescription['classes']['ignore'],
        'classes':
        trainingsetDescription['classes']['classes'],
        'image_crops':
        args.crops,
        'epochs':
        args.epochs,
        'area_filter_min':
        25,
        'channel_order':
        'channels_last',
        's3_address':
        s3def['address'],
        's3_sets':
        s3def['sets'],
        'min':
        args.min,
    }

    # ## Train the model
    # Now, all that is left to do is to compile and train the model. The loss being used here is `losses.SparseCategoricalCrossentropy(from_logits=True)`. The reason to use this loss function is because the network is trying to assign each pixel a label, just like multi-class prediction. In the true segmentation mask, each pixel has either a {0,1,2}. The network here is outputting three channels. Essentially, each channel is trying to learn to predict a class, and `losses.SparseCategoricalCrossentropy(from_logits=True)` is the recommended loss for
    # such a scenario. Using the output of the network, the label assigned to the pixel is the channel with the highest value. This is what the create_mask function is doing.

    train_dataset = input_fn('train', args.trainingset_dir, config)
    val_datast = input_fn('val', args.trainingset_dir, config)

    outpath = 'test'
    if not os.path.exists(outpath):
        os.makedirs(outpath)

    train_images = config['batch_size']  # Guess training set if not provided
    val_images = config['batch_size']

    for dataset in trainingsetDescription['sets']:
        if (dataset['name'] == "train"):
            train_images = dataset["length"]
        if (dataset['name'] == "val"):
            val_images = dataset["length"]
    steps_per_epoch = int(train_images / config['batch_size'])
    validation_steps = int(val_images / config['batch_size'])

    if (args.min):
        steps_per_epoch = min(args.min_steps, steps_per_epoch)
        validation_steps = min(args.min_steps, validation_steps)
        config['epochs'] = 1

    try:
        i = 0
        j = 0
        k = 0
        iterator = iter(train_dataset)
        for i in range(config['epochs']):
            for j in range(steps_per_epoch):
                image, mask = iterator.get_next()
                for k in range(image.shape[0]):
                    img = tf.squeeze(image[k]).numpy().astype(np.uint8)
                    ann = tf.squeeze(mask[k]).numpy().astype(np.uint8)
                    img = cv.cvtColor(img, cv2.COLOR_RGB2BGR)
                    iman = DrawFeatures(img, ann, config)
                    inxstr = '{:02d}_{:04d}'.format(
                        i, config['batch_size'] * j + k)
                    cv2.imwrite('{}/train_iman{}.png'.format(outpath, inxstr),
                                iman)
    except:
        print("Write train_dataset failed epoch {} step {} image {}".format(
            i, j, k))

    try:
        j = 0
        k = 0
        iterator = iter(val_datast)
        for j in range(validation_steps):
            image, mask = iterator.get_next()
            for k in range(image.shape[0]):
                img = tf.squeeze(image[k]).numpy().astype(np.uint8)
                ann = tf.squeeze(mask[k]).numpy().astype(np.uint8)

                iman = DrawFeatures(img, ann, config)
                inxstr = '{:04d}'.format(config['batch_size'] * j + k)
                cv2.imwrite('{}/val_iman{}.png'.format(outpath, inxstr), iman)

    except:
        print("Write val_datast failed step {} image {}".format(j, k))

    #WriteImgAn(train_dataset, config, outpath=outpath)
    #WriteImgAn(test_dataset, config, outpath=outpath)
    print("Write complete. Results saved to {}".format(outpath))
Пример #10
0
def main(args):
    failed = False

    creds = {}
    with open(args.credentails) as json_file:
        creds = json.load(json_file)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(args.credentails))

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'], 
                 s3def['access key'], 
                 s3def['secret key'], 
                 tls=s3def['tls'], 
                 cert_verify=s3def['cert_verify'], 
                 cert_path=s3def['cert_path']
                 )

    workdir = '{}/{}'.format(args.workdir, args.savedmodelname)
    inobj = '{}/{}/{}'.format(s3def['sets']['model']['prefix'],args.savedmodelname, args.onnxname)
    objpath = '{}/{}'.format(s3def['sets']['model']['prefix'],args.savedmodelname)

    infile = '{}/{}'.format(workdir, args.onnxname)

    if not s3.GetFile(s3def['sets']['model']['bucket'], inobj, infile):
        print('Failed to load {}/{} to {}'.format(s3def['sets']['model']['bucket'], inobj, infile ))
        failed = True
        return failed

    onnx_model = onnx.load(infile)
    inputs = onnx_model.graph.input
    for input in inputs:
        dim1 = input.type.tensor_type.shape.dim[0]
        dim1.dim_value = args.batch_size

    fixedfile = '{}/fixed-{}'.format(workdir, args.onnxname)
    onnx.save_model(onnx_model, fixedfile)

    targetname = args.targetname
    params = ''
    if args.fp16:
        targetname += '-fp16'
        params = '--fp16'
    outfile = '{}/{}.trt'.format(workdir, targetname)
    logfile = '{}/{}-trt.log'.format(workdir, targetname)

    
    # USE_FP16 = True
    # May need to shut down all kernels and restart before this - otherwise you might get cuDNN initialization errors:
    #if USE_FP16:
    #    os.system("trtexec --onnx=resnet50_onnx_model.onnx --saveEngine=resnet_engine.trt  --explicitBatch --fp16")
    #else:
    #    os.system("trtexec --onnx=resnet50_onnx_model.onnx --saveEngine=resnet_engine.trt  --explicitBatch")


    # engine = build_engine(fixedfile)

    succeeded = build_tensorrt_engine(fixedfile,
                            outfile,
                            precision_mode='fp16',
                            max_workspace_size=GB(1),  # in bytes
                            max_batch_size=1,
                            min_timing_iterations=2,
                            avg_timing_iterations=2,
                            int8_calibrator=None)

    if s3.PutFile(s3def['sets']['model']['bucket'], outfile, objpath):
        shutil.rmtree(args.workdir, ignore_errors=True) 

    # trtcmd = "trtexec --onnx=/store/dmp/cl/store/mllib/model/2021-02-19-20-51-59-cocoseg/model.onnx --saveEngine=/store/dmp/cl/store/mllib/model/2021-02-19-20-51-59-cocoseg/model.trt  --explicitBatch --workspace=4096 --verbose  2>&1 | tee trtexe.log"
    print('onnx-trt complete return {}'.format(failed))
    return failed
Пример #11
0
def main(args):
    print('Start test')

    creds = ReadDictJson(args.credentails)
    if not creds:
        print('Failed to load credentials file {}. Exiting'.format(
            args.credentails))
        return False

    s3def = creds['s3'][0]
    s3 = s3store(s3def['address'],
                 s3def['access key'],
                 s3def['secret key'],
                 tls=s3def['tls'],
                 cert_verify=s3def['cert_verify'],
                 cert_path=s3def['cert_path'])

    trainingset = '{}/{}/'.format(s3def['sets']['trainingset']['prefix'],
                                  args.trainingset)
    print('Load training set {}/{} to {}'.format(
        s3def['sets']['trainingset']['bucket'], trainingset,
        args.trainingset_dir))
    s3.Mirror(s3def['sets']['trainingset']['bucket'], trainingset,
              args.trainingset_dir)

    trainingsetDescriptionFile = '{}/description.json'.format(
        args.trainingset_dir)
    trainingsetDescription = json.load(open(trainingsetDescriptionFile))

    config = {
        'name':
        args.name,
        'description':
        args.description,
        'initialmodel':
        args.model,
        'onnxmodel':
        args.onnxmodel,
        'batch_size':
        args.batch_size,
        'trainingset description':
        trainingsetDescription,
        'input_shape':
        [args.training_crop[0], args.training_crop[1], args.train_depth],
        'classScale':
        0.001,  # scale value for each product class
        'augment_rotation':
        5.,  # Rotation in degrees
        'augment_flip_x':
        False,
        'augment_flip_y':
        True,
        'augment_brightness':
        0.,
        'augment_contrast':
        0.,
        'augment_shift_x':
        0.0,  # in fraction of image
        'augment_shift_y':
        0.0,  # in fraction of image
        'scale_min':
        0.75,  # in fraction of image
        'scale_max':
        1.25,  # in fraction of image
        'ignore_label':
        trainingsetDescription['classes']['ignore'],
        'classes':
        trainingsetDescription['classes']['classes'],
        'epochs':
        1,
        'area_filter_min':
        25,
        'weights':
        None,
        'channel_order':
        args.channel_order,
        's3_address':
        s3def['address'],
        's3_sets':
        s3def['sets'],
        'training_dir':
        None,  # used by LoadModel
        'learning_rate':
        1e-3,  # used by LoadModel
        'clean':
        True,
        'test_archive':
        trainingset,
        'run_archive':
        '{}{}/'.format(trainingset, args.model),
        'min':
        args.min,
    }

    trainingsetDescriptionFile = '{}/description.json'.format(
        args.trainingset_dir)
    trainingsetDescription = json.load(open(trainingsetDescriptionFile))

    onnxobjname = '{}/{}/{}'.format(s3def['sets']['model']['prefix'],
                                    config['initialmodel'],
                                    config['onnxmodel'])
    onnxfilename = '{}/{}/{}/{}'.format(args.test_dir,
                                        s3def['sets']['model']['prefix'],
                                        config['initialmodel'],
                                        config['onnxmodel'])
    print('Load onnx model {}/{} to {}'.format(
        s3def['sets']['model']['bucket'], onnxobjname, onnxfilename))
    s3.GetFile(s3def['sets']['model']['bucket'], onnxobjname, onnxfilename)

    strategy = None
    if (args.strategy == 'mirrored'):
        strategy = tf.distribute.MirroredStrategy(devices=args.devices)

    else:
        device = "/gpu:0"
        if args.devices is not None and len(args.devices) > 0:
            device = args.devices[0]

        strategy = tf.distribute.OneDeviceStrategy(device=device)

    # Prepare datasets for similarity computation
    objTypes = {}
    for objType in trainingsetDescription['classes']['objects']:
        if objType['trainId'] not in objTypes:
            objTypes[objType['trainId']] = copy.deepcopy(objType)
            # set name to category for objTypes and id to trainId
            objTypes[objType['trainId']]['name'] = objType['category']
            objTypes[objType['trainId']]['id'] = objType['trainId']

    results = {'class similarity': {}, 'config': config, 'image': []}

    for objType in objTypes:
        results['class similarity'][objType] = {'union': 0, 'intersection': 0}

    with strategy.scope():  # Apply training strategy
        accuracy = tf.keras.metrics.Accuracy()
        #train_dataset = input_fn('train', args.trainingset_dir, config)
        val_dataset = input_fn('val', args.trainingset_dir, config)

        trainingsetdesc = {}
        validationsetdec = {}
        for dataset in config['trainingset description']['sets']:
            if dataset['name'] == 'val':
                validationsetdec = dataset
            if dataset['name'] == 'train':
                trainingsetdesc = dataset

        print("Begin inferences")
        dtSum = 0.0
        accuracySum = 0.0
        total_confusion = None
        iterator = iter(val_dataset)
        numsteps = int(validationsetdec['length'] / config['batch_size'])
        step = 0

        if (config['min']):
            numsteps = min(args.min_steps, numsteps)

        try:
            print('ONNX runtime devices {}'.format(rt.get_device()))
            onnxsess = rt.InferenceSession(onnxfilename)

            for i in tqdm(range(numsteps)):
                step = i
                image, annotation = iterator.get_next()
                initial = datetime.now()

                input_name = onnxsess.get_inputs()[0].name
                predonnx = onnxsess.run(
                    None, {input_name: image.numpy().astype(np.float32)})
                segmentationonnx = tf.argmax(predonnx[0], axis=-1)

                dt = (datetime.now() - initial).total_seconds()
                dtSum += dt
                imageTime = dt / config['batch_size']
                for j in range(config['batch_size']):
                    img = tf.squeeze(image[j]).numpy().astype(np.uint8)
                    ann = tf.squeeze(annotation[j]).numpy().astype(np.uint8)
                    seg = tf.squeeze(segmentationonnx[j]).numpy().astype(
                        np.uint8)

                    accuracy.update_state(ann, seg)
                    seg_accuracy = accuracy.result().numpy()
                    accuracySum += seg_accuracy
                    imagesimilarity, results[
                        'class similarity'], unique = jaccard(
                            ann, seg, objTypes, results['class similarity'])

                    confusion = tf.math.confusion_matrix(
                        ann.flatten(), seg.flatten(),
                        config['classes']).numpy().astype(np.int64)
                    if total_confusion is None:
                        total_confusion = confusion
                    else:
                        total_confusion += confusion

                    if args.saveimg:
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        iman = DrawFeatures(img, ann, config)
                        iman = cv2.putText(iman, 'Annotation', (10, 25), font,
                                           1, (255, 255, 255), 1, cv2.LINE_AA)
                        imseg = DrawFeatures(img, seg, config)
                        imseg = cv2.putText(imseg, 'TensorRT', (10, 25), font,
                                            1, (255, 255, 255), 1, cv2.LINE_AA)

                        im = cv2.hconcat([iman, imseg])
                        im_bgr = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
                        cv2.imwrite(
                            '{}/{}{:03d}{:03d}.png'.format(
                                args.test_dir, 'segtrt', i, j), im_bgr)

                    results['image'].append({
                        'dt':
                        imageTime,
                        'similarity':
                        imagesimilarity,
                        'accuracy':
                        seg_accuracy.astype(float),
                        'confusion':
                        confusion.tolist()
                    })
        except Exception as e:
            print("Error: test exception {} step {}".format(e, step))
            numsteps = step

    num_images = numsteps * config['batch_size']

    if numsteps > 0:
        num_images = numsteps * config['batch_size']
        average_time = dtSum / num_images
        average_accuracy = accuracySum / num_images
    else:
        num_images = 0
        average_time = 0.0
        average_accuracy = 0.0

    sumIntersection = 0
    sumUnion = 0
    sumAccuracy = 0.0
    dataset_similarity = {}
    for key in results['class similarity']:
        intersection = results['class similarity'][key]['intersection']
        sumIntersection += intersection
        union = results['class similarity'][key]['union']
        sumUnion += union
        class_similarity = similarity(intersection, union)

        # convert to int from int64 for json.dumps
        dataset_similarity[key] = {
            'intersection': int(intersection),
            'union': int(union),
            'similarity': class_similarity
        }

    results['class similarity'] = dataset_similarity
    total_similarity = similarity(sumIntersection, sumUnion)

    now = datetime.now()
    date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
    test_summary = {'date': date_time}
    test_summary['name'] = config['name']
    test_summary['description'] = config['description']
    test_summary['model'] = config['initialmodel']
    test_summary['accuracy'] = average_accuracy
    test_summary['class_similarity'] = dataset_similarity
    test_summary['similarity'] = total_similarity
    test_summary['confusion'] = total_confusion.tolist()
    test_summary['images'] = num_images
    test_summary['image time'] = average_time
    test_summary['batch size'] = config['batch_size']
    test_summary['store address'] = s3def['address']
    test_summary['test bucket'] = s3def['sets']['trainingset']['bucket']
    test_summary['platform'] = platform.platform()
    if args.saveresults:
        test_summary['results'] = results

    print("Average time {}".format(average_time))
    print('Similarity: {}'.format(dataset_similarity))

    # If there is a way to lock this object between read and write, it would prevent the possability of loosing data
    training_data = s3.GetDict(s3def['sets']['trainingset']['bucket'],
                               config['test_archive'] + args.tests_json)
    if training_data is None:
        training_data = []
    training_data.append(test_summary)
    s3.PutDict(s3def['sets']['trainingset']['bucket'],
               config['test_archive'] + args.tests_json, training_data)

    test_url = s3.GetUrl(s3def['sets']['trainingset']['bucket'],
                         config['test_archive'] + args.tests_json)

    print("Test results {}".format(test_url))