Пример #1
0
def epoch_testgen_coverage(index, dataset_path):
    model_name = "cnn"
    image_size = (128, 128)
    threshold = 0.2
    weights_path = './weights_HMB_2.hdf5'  # Change to your model weights

    seed_inputs1 = os.path.join(dataset_path, "hmb3/")
    seed_labels1 = os.path.join(dataset_path, "hmb3/hmb3_steering.csv")
    seed_inputs2 = os.path.join(dataset_path, "Ch2_001/center/")
    seed_labels2 = os.path.join(dataset_path,
                                "Ch2_001/CH2_final_evaluation.csv")
    # Model build
    # ---------------------------------------------------------------------------------
    model_builders = {
        'V3': (build_InceptionV3, preprocess_input_InceptionV3, exact_output),
        'cnn': (build_cnn, normalize_input, exact_output)
    }

    if model_name not in model_builders:
        raise ValueError("unsupported model %s" % model_name)
    model_builder, input_processor, output_processor = model_builders[
        model_name]
    model = model_builder(image_size, weights_path)
    print('model %s built...' % model_name)

    filelist1 = []
    for file in sorted(os.listdir(seed_inputs1)):
        if file.endswith(".jpg"):
            filelist1.append(file)

    filelist2 = []
    for file in sorted(os.listdir(seed_inputs2)):
        if file.endswith(".jpg"):
            filelist2.append(file)

    with open(seed_labels1, 'rb') as csvfile1:
        label1 = list(csv.reader(csvfile1, delimiter=',', quotechar='|'))
    label1 = label1[1:]

    with open(seed_labels2, 'rb') as csvfile2:
        label2 = list(csv.reader(csvfile2, delimiter=',', quotechar='|'))
    label2 = label2[1:]

    nc = NCoverage(model, threshold)
    index = int(index)
    #seed inputs
    with open('result/epoch_coverage_70000_images.csv', 'ab', 0) as csvfile:
        writer = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)
        if index == 0:
            writer.writerow([
                'index', 'image', 'tranformation', 'param_name', 'param_value',
                'threshold', 'covered_neurons', 'total_neurons',
                'covered_detail', 'y_hat', 'label'
            ])
        if index / 2 == 0:
            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)
            for i in input_images:
                j = i * 5
                csvrecord = []

                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = read_transformed_image(seed_image, image_size)

                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k]:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue

                if j < 2:
                    continue

                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('-')
                csvrecord.append('-')
                csvrecord.append('-')
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)

            print("seed input done")

        #Translation
        if index / 2 >= 1 and index / 2 <= 10:
            #for p in xrange(1,11):
            p = index / 2
            params = [p * 10, p * 10]
            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)

            for i in input_images:
                j = i * 5
                csvrecord = []
                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = image_translation(seed_image, params)
                seed_image = read_transformed_image(seed_image, image_size)
                #seed_image = read_image(os.path.join(seed_inputs1, filelist1[j]),image_size)
                #new_covered1, new_total1, result = model.predict_fn(seed_image)
                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k] == True:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue

                if j < 2:
                    continue

                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('translation')
                csvrecord.append('x:y')
                csvrecord.append(':'.join(str(x) for x in params))
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)
            print("translation done")

        #Scale
        if index / 2 >= 11 and index / 2 <= 20:
            #for p in xrange(1,11):
            p = index / 2 - 10
            params = [p * 0.5 + 1, p * 0.5 + 1]

            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)

            for i in input_images:
                j = i * 5
                csvrecord = []
                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = image_scale(seed_image, params)
                seed_image = read_transformed_image(seed_image, image_size)

                #new_covered1, new_total1, result = model.predict_fn(seed_image)
                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k] == True:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue
                if j < 2:
                    continue

                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('scale')
                csvrecord.append('x:y')
                csvrecord.append(':'.join(str(x) for x in params))
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)

        print("scale done")

        #Shear
        if index / 2 >= 21 and index / 2 <= 30:
            #for p in xrange(1,11):
            p = index / 2 - 20
            #for p in xrange(1,11):
            params = 0.1 * p
            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)

            for i in input_images:
                j = i * 5
                csvrecord = []
                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = image_shear(seed_image, params)
                seed_image = read_transformed_image(seed_image, image_size)

                #new_covered1, new_total1, result = model.predict_fn(seed_image)
                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k] == True:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue
                if j < 2:
                    continue

                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('shear')
                csvrecord.append('factor')
                csvrecord.append(params)
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)
        print("sheer done")

        #Rotation
        if index / 2 >= 31 and index / 2 <= 40:
            p = index / 2 - 30
            params = p * 3
            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)
            for i in input_images:
                j = i * 5
                csvrecord = []
                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = image_rotation(seed_image, params)
                seed_image = read_transformed_image(seed_image, image_size)
                #new_covered1, new_total1, result = model.predict_fn(seed_image)
                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k] == True:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue

                if j < 2:
                    continue

                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('rotation')
                csvrecord.append('angle')
                csvrecord.append(params)
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)

        print("rotation done")

        #Contrast
        if index / 2 >= 41 and index / 2 <= 50:
            p = index / 2 - 40
            params = 1 + p * 0.2
            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)
            for i in input_images:
                j = i * 5
                csvrecord = []
                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = image_contrast(seed_image, params)
                seed_image = read_transformed_image(seed_image, image_size)

                #new_covered1, new_total1, result = model.predict_fn(seed_image)
                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k] == True:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue

                if j < 2:
                    continue

                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('contrast')
                csvrecord.append('gain')
                csvrecord.append(params)
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)

        print("contrast done")

        #Brightness
        if index / 2 >= 51 and index / 2 <= 60:
            p = index / 2 - 50
            params = p * 10
            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)
            for i in input_images:
                j = i * 5
                csvrecord = []
                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = image_brightness2(seed_image, params)
                seed_image = read_transformed_image(seed_image, image_size)

                #new_covered1, new_total1, result = model.predict_fn(seed_image)
                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k] == True:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue
                if j < 2:
                    continue

                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('brightness')
                csvrecord.append('bias')
                csvrecord.append(params)
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)
        print("brightness done")

        #blur
        if index / 2 >= 61 and index / 2 <= 70:
            p = index / 2 - 60
            params = p
            if index % 2 == 0:
                input_images = xrange(1, 501)
            else:
                input_images = xrange(501, 1001)
            for i in input_images:
                j = i * 5
                csvrecord = []
                seed_image = cv2.imread(
                    os.path.join(seed_inputs1, filelist1[j]))
                seed_image = image_blur(seed_image, params)
                seed_image = read_transformed_image(seed_image, image_size)

                #new_covered1, new_total1, result = model.predict_fn(seed_image)
                test_x = input_processor(seed_image.astype(np.float32))
                #print('test data shape:', test_x.shape)
                yhat = model.predict(test_x)
                #print('steering angle: ', yhat)

                ndict = nc.update_coverage(test_x)
                new_covered1, new_total1, p = nc.curr_neuron_cov()

                tempk = []
                for k in ndict.keys():
                    if ndict[k] == True:
                        tempk.append(k)
                tempk = sorted(tempk,
                               key=lambda element: (element[0], element[1]))
                covered_detail = ';'.join(str(x)
                                          for x in tempk).replace(',', ':')
                nc.reset_cov_dict()
                #print(covered_neurons)
                #covered_neurons = nc.get_neuron_coverage(test_x)
                #print('input covered {} neurons'.format(covered_neurons))
                #print('total {} neurons'.format(total_neurons))

                filename, ext = os.path.splitext(str(filelist1[j]))
                if label1[j][0] != filename:
                    print(filename + " not found in the label file")
                    continue

                if j < 2:
                    continue
                param_name = ""
                if params == 1:
                    param_name = "averaging:3:3"
                if params == 2:
                    param_name = "averaging:4:4"
                if params == 3:
                    param_name = "averaging:5:5"
                if params == 4:
                    param_name = "GaussianBlur:3:3"
                if params == 5:
                    param_name = "GaussianBlur:5:5"
                if params == 6:
                    param_name = "GaussianBlur:7:7"
                if params == 7:
                    param_name = "medianBlur:3"
                if params == 8:
                    param_name = "medianBlur:5"
                if params == 9:
                    param_name = "averaging:6:6"
                if params == 10:
                    param_name = "bilateralFilter:9:75:75"
                csvrecord.append(j - 2)
                csvrecord.append(str(filelist1[j]))
                csvrecord.append('blur')
                csvrecord.append(param_name)
                csvrecord.append(param_name)
                csvrecord.append(threshold)

                csvrecord.append(new_covered1)
                csvrecord.append(new_total1)
                csvrecord.append(covered_detail)

                csvrecord.append(yhat[0][0])
                csvrecord.append(label1[j][1])
                print(csvrecord)
                writer.writerow(csvrecord)
        print("all done")
class ChauffeurModel(object):
    def __init__(self,
                 cnn_json_path,
                 cnn_weights_path,
                 lstm_json_path,
                 lstm_weights_path,
                 only_layer=""):

        self.cnn = self.load_from_json(cnn_json_path, cnn_weights_path)
        self.encoder = self.load_encoder(cnn_json_path, cnn_weights_path)
        self.lstm = self.load_from_json(lstm_json_path, lstm_weights_path)

        # hardcoded from final submission model
        self.scale = 16.
        self.timesteps = 100

        self.threshold_cnn = 0.1
        self.threshold_lstm = 0.4
        self.timestepped_x = np.empty((1, self.timesteps, 8960))
        self.nc_lstm = NCoverage(self.lstm, self.threshold_lstm)
        self.nc_encoder = NCoverage(self.encoder,
                                    self.threshold_cnn,
                                    exclude_layer=['pool', 'fc', 'flatten'],
                                    only_layer=only_layer)
        self.steps = deque()
        #print(self.lstm.summary())
        #self.nc = NCoverage(self.lstm,self.threshold)

    def load_encoder(self, cnn_json_path, cnn_weights_path):
        model = self.load_from_json(cnn_json_path, cnn_weights_path)
        model.load_weights(cnn_weights_path)

        model.layers.pop()
        model.outputs = [model.layers[-1].output]
        model.layers[-1].outbound_nodes = []

        return model

    def load_from_json(self, json_path, weights_path):
        model = model_from_json(open(json_path, 'r').read())
        model.load_weights(weights_path)
        return model

    def make_cnn_only_predictor(self):
        def predict_fn(img):
            img = cv2.resize(img, (320, 240))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
            img = img[120:240, :, :]
            img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
            img = ((img - (255.0 / 2)) / 255.0)

            return self.cnn.predict_on_batch(img.reshape(
                (1, 120, 320, 3)))[0, 0] / self.scale

        return predict_fn

    #def make_stateful_predictor(self):
    #steps = deque()

    def predict_fn(self, img, dummy=2):
        # preprocess image to be YUV 320x120 and equalize Y histogram
        steps = self.steps
        img = cv2.resize(img, (320, 240))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
        img = img[120:240, :, :]
        img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
        img = ((img - (255.0 / 2)) / 255.0)
        img1 = img
        # apply feature extractor
        img = self.encoder.predict_on_batch(img.reshape((1, 120, 320, 3)))

        # initial fill of timesteps
        if not len(steps):
            for _ in xrange(self.timesteps):
                steps.append(img)

        # put most recent features at end
        steps.popleft()
        steps.append(img)
        #print(len(steps))
        #timestepped_x = np.empty((1, self.timesteps, img.shape[1]))
        if dummy == 0:
            return 0, 0, 0, 0, 0, 0, 0
        for i, img in enumerate(steps):
            self.timestepped_x[0, i] = img
        '''
        self.nc.update_coverage(timestepped_x)
        covered_neurons, total_neurons, p = self.nc.curr_neuron_cov()
        print('input covered {} neurons'.format(covered_neurons))
        print('total {} neurons'.format(total_neurons))
        print('percentage {}'.format(p))
        '''
        cnn_ndict = self.nc_encoder.update_coverage(
            img1.reshape((1, 120, 320, 3)))
        cnn_covered_neurons, cnn_total_neurons, p = self.nc_encoder.curr_neuron_cov(
        )
        if dummy == 1:
            return cnn_ndict, cnn_covered_neurons, cnn_total_neurons, 0, 0, 0, 0
        lstm_ndict = self.nc_lstm.update_coverage(self.timestepped_x)
        lstm_covered_neurons, lstm_total_neurons, p = self.nc_lstm.curr_neuron_cov(
        )
        return cnn_ndict, cnn_covered_neurons, cnn_total_neurons, lstm_ndict,\
        lstm_covered_neurons, lstm_total_neurons,\
        self.lstm.predict_on_batch(self.timestepped_x)[0, 0] / self.scale
Пример #3
0
def epoch_guided(dataset_path):
    model_name = "cnn"
    image_size = (128, 128)
    threshold = 0.2
    weights_path = './weights_HMB_2.hdf5'  # Change to your model weights

    seed_inputs1 = os.path.join(dataset_path, "hmb3/")
    seed_labels1 = os.path.join(dataset_path, "hmb3/hmb3_steering.csv")
    seed_inputs2 = os.path.join(dataset_path, "Ch2_001/center/")
    seed_labels2 = os.path.join(dataset_path,
                                "Ch2_001/CH2_final_evaluation.csv")

    new_inputs = "./new/"
    # Model build
    # ---------------------------------------------------------------------------------
    model_builders = {
        'V3': (build_InceptionV3, preprocess_input_InceptionV3, exact_output),
        'cnn': (build_cnn, normalize_input, exact_output)
    }

    if model_name not in model_builders:
        raise ValueError("unsupported model %s" % model_name)
    model_builder, input_processor, output_processor = model_builders[
        model_name]
    model = model_builder(image_size, weights_path)
    print('model %s built...' % model_name)

    filelist1 = []
    for file in sorted(os.listdir(seed_inputs1)):
        if file.endswith(".jpg"):
            filelist1.append(file)

    truth = {}
    with open(seed_labels1, 'rb') as csvfile1:
        label1 = list(csv.reader(csvfile1, delimiter=',', quotechar='|'))
    label1 = label1[1:]
    for i in label1:
        truth[i[0] + ".jpg"] = i[1]

    newlist = []
    for file in sorted(os.listdir(new_inputs)):
        if file.endswith(".jpg"):
            newlist.append(file)

    flag = 0
    #flag:0 start from beginning
    #flag:1 initialize from pickle files
    '''
    Pickle files are used for continuing the search after rerunning the script.
    Delete all pkl files and generated images for starting from the beginnning.
    '''
    if os.path.isfile("epoch_covdict2.pkl") and \
            os.path.isfile("epoch_stack.pkl") and \
            os.path.isfile("epoch_queue.pkl") and \
            os.path.isfile("generated.pkl"):
        with open('epoch_covdict2.pkl', 'rb') as input:
            covdict = pickle.load(input)
        with open('epoch_stack.pkl', 'rb') as input:
            epoch_stack = pickle.load(input)
        with open('epoch_queue.pkl', 'rb') as input:
            epoch_queue = pickle.load(input)
        with open('generated.pkl', 'rb') as input:
            generated = pickle.load(input)
        flag = 1

    nc = NCoverage(model, threshold)

    if flag == 0:
        filewrite = "wb"
        epoch_queue = deque()
        epoch_stack = []
        generated = 0
    else:
        nc.set_covdict(covdict)
        filewrite = "ab"
        print("initialize from files and continue from previous progress")

    C = 0  # covered neurons
    P = 0  # covered percentage
    T = 0  # total neurons
    transformations = [
        image_translation, image_scale, image_shear, image_rotation,
        image_contrast, image_brightness2, image_blur
    ]
    params = []
    params.append(list(xrange(-50, 50)))
    params.append(list(map(lambda x: x * 0.1, list(xrange(5, 20)))))
    params.append(list(map(lambda x: x * 0.1, list(xrange(-5, 5)))))
    params.append(list(xrange(-30, 30)))
    params.append(list(map(lambda x: x * 0.1, list(xrange(1, 20)))))
    params.append(list(xrange(-21, 21)))
    params.append(list(xrange(1, 11)))

    maxtrynumber = 10
    maximages = 200
    cache = deque()
    image_count = 0
    #load nc, generation, population
    with open('result/epoch_rq3_100_2.csv', filewrite, 0) as csvfile:
        writer = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)
        if flag == 0:
            writer.writerow([
                'id', 'seed image(root)', 'parent image',
                'new generated image', 'number of generated images',
                'total_covered', 'total_neurons', 'coverage_percentage',
                'transformations', 'yhat', 'baseline', 'label'
            ])
            #initialize population and coverage
            print("compute coverage of original population")
            input_images = xrange(1, 101)
            for i in input_images:
                j = i * 50
                epoch_queue.append(os.path.join(seed_inputs1, filelist1[j]))

        while len(epoch_queue) > 0:
            current_seed_image = epoch_queue[0]
            print(str(len(epoch_queue)) + " images are left.")
            if len(epoch_stack) == 0:
                epoch_stack.append(current_seed_image)
            image = cv2.imread(current_seed_image)
            test_x = read_transformed_image(image, image_size)
            test_x = input_processor(test_x.astype(np.float32))
            nc.update_coverage(test_x)
            baseline_yhat = model.predict(test_x)
            #image_count = 0
            while len(epoch_stack) > 0:
                try:

                    image_file = epoch_stack[-1]
                    print("current image in stack " + image_file)
                    image = cv2.imread(image_file)
                    new_generated = False
                    for i in xrange(maxtrynumber):

                        tid = random.sample([0, 1, 2, 3, 4, 5, 6], 2)
                        if len(cache) > 0:
                            tid[0] = cache.popleft()
                        transinfo = ""
                        new_image = image
                        for j in xrange(2):
                            transformation = transformations[tid[j]]
                            #random choose parameter
                            param = random.sample(params[tid[j]], 1)
                            param = param[0]
                            transinfo = transinfo + transformation.__name__ + ':' + str(
                                param) + ';'
                            print("transformation " + transformation.__name__ +
                                  "  parameter " + str(param))
                            new_image = transformation(new_image, param)

                        new_x = read_transformed_image(new_image, image_size)

                        test_x = input_processor(new_x.astype(np.float32))
                        if nc.is_testcase_increase_coverage(test_x):
                            print(
                                "Generated image increases coverage and will be added to population."
                            )
                            cache.append(tid[0])
                            cache.append(tid[1])
                            generated = generated + 1
                            #image_count = image_count + 1
                            name = os.path.basename(
                                current_seed_image) + '_' + str(
                                    generated) + '.jpg'
                            name = os.path.join(new_inputs, name)
                            cv2.imwrite(name, new_image)
                            epoch_stack.append(name)

                            nc.update_coverage(test_x)
                            yhat = model.predict(test_x)
                            covered, total, p = nc.curr_neuron_cov()
                            C = covered
                            T = total
                            P = p
                            csvrecord = []
                            csvrecord.append(100 - len(epoch_queue))
                            csvrecord.append(
                                os.path.basename(current_seed_image))
                            if len(epoch_stack) >= 2:
                                parent = os.path.basename(epoch_stack[-2])
                            else:
                                parent = os.path.basename(current_seed_image)
                            child = os.path.basename(
                                current_seed_image) + '_' + str(
                                    generated) + '.jpg'
                            csvrecord.append(parent)
                            csvrecord.append(child)
                            csvrecord.append(generated)
                            csvrecord.append(C)
                            csvrecord.append(T)
                            csvrecord.append(P)
                            csvrecord.append(transinfo)
                            csvrecord.append(yhat[0][0])
                            csvrecord.append(baseline_yhat[0][0])
                            csvrecord.append(
                                truth[os.path.basename(current_seed_image)])
                            print(csvrecord)
                            writer.writerow(csvrecord)
                            new_generated = True
                            break
                        else:
                            print(
                                "Generated image does not increase coverage.")
                    if not new_generated:
                        epoch_stack.pop()

                    save_object(epoch_stack, 'epoch_stack.pkl')
                    save_object(epoch_queue, 'epoch_queue.pkl')
                    save_object(nc.cov_dict, 'epoch_covdict2.pkl')
                    save_object(generated, 'generated.pkl')

                except ValueError:
                    print("value error")
                    epoch_stack.pop()
                    save_object(epoch_stack, 'epoch_stack.pkl')
                    save_object(epoch_queue, 'epoch_queue.pkl')

            epoch_queue.popleft()
Пример #4
0
class Model(object):
    '''
    Rambo model with integrated neuron coverage
    '''
    def __init__(self, model_path, X_train_mean_path):

        self.model = load_model(model_path)
        self.model.compile(optimizer="adam", loss="mse")
        self.X_mean = np.load(X_train_mean_path)
        self.mean_angle = np.array([-0.004179079])
        print(self.mean_angle)
        self.img0 = None
        self.state = deque(maxlen=2)

        self.threshold = 0.2
        #self.nc = NCoverage(self.model,self.threshold)
        s1 = self.model.get_layer('sequential_1')
        self.nc1 = NCoverage(s1, self.threshold)
        #print(s1.summary())

        s2 = self.model.get_layer('sequential_2')
        #print(s2.summary())
        self.nc2 = NCoverage(s2, self.threshold)

        s3 = self.model.get_layer('sequential_3')
        #print(s3.summary())
        self.nc3 = NCoverage(s3, self.threshold)

        i1 = self.model.get_layer('input_1')

        self.i1_model = Kmodel(input=self.model.inputs, output=i1.output)

    def predict(self, img):
        img_path = 'test.jpg'
        misc.imsave(img_path, img)
        img1 = load_img(img_path, grayscale=True, target_size=(192, 256))
        img1 = img_to_array(img1)

        if self.img0 is None:
            self.img0 = img1
            return self.mean_angle[0]

        elif len(self.state) < 1:
            img = img1 - self.img0
            img = rescale_intensity(img,
                                    in_range=(-255, 255),
                                    out_range=(0, 255))
            img = np.array(img, dtype=np.uint8)  # to replicate initial model
            self.state.append(img)
            self.img0 = img1

            return self.mean_angle[0]

        else:
            img = img1 - self.img0
            img = rescale_intensity(img,
                                    in_range=(-255, 255),
                                    out_range=(0, 255))
            img = np.array(img, dtype=np.uint8)  # to replicate initial model
            self.state.append(img)
            self.img0 = img1

            X = np.concatenate(self.state, axis=-1)
            X = X[:, :, ::-1]
            X = np.expand_dims(X, axis=0)
            X = X.astype('float32')
            X -= self.X_mean
            X /= 255.0
            return self.model.predict(X)[0][0]

    def predict1(self, img, transform, params):
        '''
        Rewrite predict method for computing and updating neuron coverage.
        '''
        img_path = 'test.jpg'
        misc.imsave(img_path, img)
        img1 = load_img(img_path, grayscale=True, target_size=(192, 256))
        img1 = img_to_array(img1)

        if self.img0 is None:
            self.img0 = img1
            return 0, 0, self.mean_angle[0], 0, 0, 0, 0, 0, 0, 0, 0, 0

        elif len(self.state) < 1:
            img = img1 - self.img0
            img = rescale_intensity(img,
                                    in_range=(-255, 255),
                                    out_range=(0, 255))
            img = np.array(img, dtype=np.uint8)  # to replicate initial model
            self.state.append(img)
            self.img0 = img1

            return 0, 0, self.mean_angle[0], 0, 0, 0, 0, 0, 0, 0, 0, 0

        else:
            img = img1 - self.img0
            img = rescale_intensity(img,
                                    in_range=(-255, 255),
                                    out_range=(0, 255))
            img = np.array(img, dtype=np.uint8)  # to replicate initial model
            self.state.append(img)
            self.img0 = img1

            X = np.concatenate(self.state, axis=-1)

            if transform != None and params != None:
                X = transform(X, params)

            X = X[:, :, ::-1]
            X = np.expand_dims(X, axis=0)
            X = X.astype('float32')
            X -= self.X_mean
            X /= 255.0

            #print(self.model.summary())
            #for layer in self.model.layers:
            #print (layer.name)

            i1_outputs = self.i1_model.predict(X)

            d1 = self.nc1.update_coverage(i1_outputs)
            covered_neurons1, total_neurons1, p1 = self.nc1.curr_neuron_cov()
            c1 = covered_neurons1
            t1 = total_neurons1

            d2 = self.nc2.update_coverage(i1_outputs)
            covered_neurons2, total_neurons2, p2 = self.nc2.curr_neuron_cov()
            c2 = covered_neurons2
            t2 = total_neurons2

            d3 = self.nc3.update_coverage(i1_outputs)
            covered_neurons3, total_neurons3, p3 = self.nc3.curr_neuron_cov()
            c3 = covered_neurons3
            t3 = total_neurons3
            covered_neurons = covered_neurons1 + covered_neurons2 + covered_neurons3
            total_neurons = total_neurons1 + total_neurons2 + total_neurons3

            return covered_neurons, total_neurons, self.model.predict(
                X)[0][0], c1, t1, d1, c2, t2, d2, c3, t3, d3
            #return 0, 0, self.model.predict(X)[0][0],rs1[0][0],rs2[0][0],rs3[0][0],0,0,0

    def hard_reset(self):
        '''
        Reset the coverage of three cnn sub-models
        '''
        self.mean_angle = np.array([-0.004179079])
        #print self.mean_angle
        self.img0 = None
        self.state = deque(maxlen=2)
        self.threshold = 0.2
        #self.nc.reset_cov_dict()
        self.nc1.reset_cov_dict()
        self.nc2.reset_cov_dict()
        self.nc3.reset_cov_dict()

    def soft_reset(self):

        self.mean_angle = np.array([-0.004179079])
        print(self.mean_angle)
        self.img0 = None
        self.state = deque(maxlen=2)
        self.threshold = 0.2
Пример #5
0
class ChauffeurModel(object):
    '''
    Chauffeur model with integrated neuron coverage
    '''
    def __init__(self,
                 cnn_json_path,
                 cnn_weights_path,
                 lstm_json_path,
                 lstm_weights_path, only_layer=""):

        self.cnn = self.load_from_json(cnn_json_path, cnn_weights_path)
        self.encoder = self.load_encoder(cnn_json_path, cnn_weights_path)
        self.lstm = self.load_from_json(lstm_json_path, lstm_weights_path)

        # hardcoded from final submission model
        self.scale = 16.
        self.timesteps = 100

        self.threshold_cnn = 0.1
        self.threshold_lstm = 0.4
        self.timestepped_x = np.empty((1, self.timesteps, 8960))
        self.nc_lstm = NCoverage(self.lstm, self.threshold_lstm)
        self.nc_encoder = NCoverage(self.encoder, self.threshold_cnn, exclude_layer=['pool', 'fc', 'flatten'], only_layer=only_layer)
        self.steps = deque()
        #print(self.lstm.summary())
        #self.nc = NCoverage(self.lstm,self.threshold)

    def load_encoder(self, cnn_json_path, cnn_weights_path):
        model = self.load_from_json(cnn_json_path, cnn_weights_path)
        model.load_weights(cnn_weights_path)

        model.layers.pop()
        model.outputs = [model.layers[-1].output]
        model.layers[-1].outbound_nodes = []

        return model

    def load_from_json(self, json_path, weights_path):
        model = model_from_json(open(json_path, 'r').read())
        model.load_weights(weights_path)
        return model

    def make_cnn_only_predictor(self):
        def predict_fn(img):
            img = cv2.resize(img, (320, 240))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
            img = img[120:240, :, :]
            img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
            img = ((img-(255.0/2))/255.0)

            return self.cnn.predict_on_batch(img.reshape((1, 120, 320, 3)))[0, 0] / self.scale

        return predict_fn

    #def make_stateful_predictor(self):
        #steps = deque()

    def predict_fn(self, img, test=0):
        # test == 0: update the coverage only
        # test == 1: test if the input image will increase the current coverage
        steps = self.steps
        img = cv2.resize(img, (320, 240))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
        img = img[120:240, :, :]
        img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
        img = ((img-(255.0/2))/255.0)
        img1 = img

        if test == 1:
            return self.nc_encoder.is_testcase_increase_coverage(img1.reshape((1, 120, 320, 3)))
        else:
            cnn_ndict = self.nc_encoder.update_coverage(img1.reshape((1, 120, 320, 3)))
            cnn_covered_neurons, cnn_total_neurons, cnn_p = self.nc_encoder.curr_neuron_cov()
            return cnn_covered_neurons, cnn_total_neurons, cnn_p