def validate(data_type, model, seq_length=40, saved_model=None,
             class_limit=None, image_shape=None):
    batch_size = 32

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Evaluate!
    results = rm.model.evaluate_generator(
        generator=val_generator,
        val_samples=3200)

    print(results)
    print(rm.model.metrics_names)
Пример #2
0
    def __init__(self, name=None, description=None, **kwargs):
       
        # Algorithmic description
        self.name = name
        self.description = description
        self.parameters = DataSet(name='Parameter set')  # List of parameters 
                                                         # (of type Parameter)
        self.measures = DataSet(name='Measure set')  # List of measures 
                                                     # (the observation of the 
                                                     # algorithm)
        self.constraints = []

        # Computational description
        self.parameter_file = self.name + '.param'
        self.sessions = {} # dictionary map between session id and parameter
Пример #3
0
 def soft_em_e_step(self, instance, count=1):
     count, evidence = float(count), DataSet.evidence(instance)
     value = self.value(evidence=evidence, clear_data=False)
     pre = value / self.theta_sum
     self.marginals(evidence=evidence, clear_data=False, do_bottom_up=False)
     self._soft_em_accumulate(evidence, pre, count=count)
     return pre
Пример #4
0
def get_and_log_mst_weight_from_checker(input_graph, force_recompute=False, inputslogfn=None):
    """Returns the a 2-tuple of (input, weight).  If force_recompute is not
    True, then it will check the input log cache to see if we already know the
    answer first.  Logs the result."""
    ti = __get_ti(input_graph)

    # load in the inputs in the category of input_graph
    if inputslogfn is None:
        logfn = InputSolution.get_path_to(ti.prec, ti.dims, ti.min, ti.max)
    else:
        logfn = inputslogfn
    ds = DataSet.read_from_file(InputSolution, logfn)
    if ds.dataset.has_key(ti):
        input_soln = ds.dataset[ti]
        do_log = True

        # see if we already know the answer
        if not force_recompute:
            if input_soln.has_mst_weight():
                return (ti, input_soln.mst_weight)  # cache hit!
    else:
        # if we weren't tracking the input before, don't start now
        do_log = False

    # compute the answer and (if specified) save it
    w = compute_mst_weight(input_graph)
    if do_log:
        if input_soln.update_mst_weight(w):
            ds.save_to_file(logfn)
    return (ti, w)
Пример #5
0
def gather_weight_data(wtype):
    # get the results
    results = {} # maps |V| to ResultAccumulator
    ds = DataSet.read_from_file(WeightResult, WeightResult.get_path_to(wtype))
    for data in ds.dataset.values():
        result = results.get(data.input().num_verts)
        if result is None:
            result = ResultAccumulator(data.mst_weight)
            results[data.input().num_verts] = result
        else:
            result.add_data(data.mst_weight)

    try:
        # open a file to output to
        fh = open(DATA_PATH + wtype + '.dat', 'w')

        # compute relevant stats and output them
        print >> fh, '#|V|\tLower\tAverage\tUpper  (Lower/Upper from 99% CI)'
        keys = results.keys()
        keys.sort()
        for num_verts in keys:
            r = results[num_verts]
            r.compute_stats()
            if len(r.values) > 1:
                print >> fh, '%u\t%.3f\t%.3f\t%.3f\t%u' % (num_verts, r.lower99, r.mean, r.upper99, len(r.values))
        fh.close()
        return 0
    except IOError, e:
        print sys.stderr, "failed to write file: " + str(e)
        return -1
Пример #6
0
 def log_likelihood(self, dataset):
     ll = 0.0
     for instance, count in dataset:
         evidence = DataSet.evidence(instance)
         pr = self.probability(evidence)
         ll += count * math.log(pr)
     return ll
def predict(data_type, seq_length, saved_model, image_shape, video_name, class_limit):
    model = load_model(saved_model)

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length, image_shape=image_shape,
            class_limit=class_limit)
    
    # Extract the sample from the data.
    sample = data.get_frames_by_filename(video_name, data_type)

    # Predict!
    prediction = model.predict(np.expand_dims(sample, axis=0))
    print(prediction)
    data.print_class_from_prediction(np.squeeze(prediction, axis=0))
Пример #8
0
    def normalize(self, ds_source):
        """
        Apply the normalizing operation to a given `DataSet`.

        :Parameters:
            ds_source : `DataSet` 
                Data set to normalize.

        :Returns:
            `DataSet` : Normalized data set.

        :Raises NpyDataTypeError:
            If the given `DataSet` has not been numerized.
        """

        if ds_source.is_numerized == False:
            raise NpyDataTypeError, 'ds_source must be numerized first.'

        ds_dest = DataSet()
        ds_dest.set_name_attribute(ds_source.get_name_attribute())

        data_instances = ds_source.get_data_instances()
        for data_instance_old in data_instances:

            attributes_new = []

            # Normalize each attribute
            for index, value in enumerate(data_instance_old.get_attributes()):
                value_new = (value - self.min[index]) * self.max[index] * (self.upper_bound - self.lower_bound) + self.lower_bound
                attributes_new.append(value_new)

            ds_dest.add_data_instance(data_instance_old.get_index_number(), attributes_new, data_instance_old.get_label_number())

        ds_dest.is_numerized = True
        return ds_dest
Пример #9
0
def main(data_filename,stat_filename,max_iter,sample_rate,learn_rate,max_depth,split_points):
    dataset=DataSet(data_filename);
    print "Model parameters configuration:[data_file=%s,stat_file=%s,max_iter=%d,sample_rate=%f,learn_rate=%f,max_depth=%d,split_points=%d]"%(data_filename,stat_filename,max_iter,sample_rate,learn_rate,max_depth,split_points);
    dataset.describe();
    stat_file=open(stat_filename,"w");
    stat_file.write("iteration\taverage loss in train data\tprediction accuracy on test data\taverage loss in test data\n");
    model=Model(max_iter,sample_rate,learn_rate,max_depth,split_points); 
    train_data=sample(dataset.get_instances_idset(),int(dataset.size()*2.0/3.0));
    test_data=set(dataset.get_instances_idset())-set(train_data);
    model.train(dataset,train_data,stat_file,test_data);
    #model.test(dataset,test_data);
    stat_file.close();
Пример #10
0
    def numerize(self, ds_source):
        """
        Apply the numerizing operation to a given `DataSet`.

        :Parameters:
            ds_source : `DataSet`
                Data set to numerize.

        :Returns:
            `DataSet` : Numerized data set.

        :Raises NpyDataTypeError:
            If ds_source has already been numerized.
        """
        if ds_source.is_numerized == True:
            raise NpyDataTypeError, 'ds_source has already been numerized.'

        ds_dest = DataSet()
        ds_dest.set_name_attribute(ds_source.get_name_attribute())

        data_instances = ds_source.get_data_instances()
        for data_instance_old in data_instances:

            attributes = []

            # Process the attribute values
            for index, value in enumerate(data_instance_old.get_attributes()):
                try:
                    number = float(value)
                except ValueError:
                    # Every time a non-float attribute value is met,
                    # it is added to the numerizer
                    number = self.attribute_string_to_number(value, index) 
                attributes.append(number)

            # Process the label value
            label_old = data_instance_old.get_label_number()
            try:
                label_new = float(label_old)
            except ValueError:
                # Every time a non-float label value is met,
                # it is added to the numerizer
                label_new = self.label_string_to_number(label_old)

            ds_dest.add_data_instance(data_instance_old.get_index_number(), attributes, label_new)

        ds_dest.is_numerized = True
        return ds_dest 
Пример #11
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          class_limit=None,
          image_shape=None,
          load_to_memory=False,
          batch_size=32,
          nb_epoch=100):
    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.path.join('/data/d14122793/UCF101_Video_Classi/data', 'checkpoints', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.path.join(
        '/data/d14122793/UCF101_Video_Classi/data', 'logs', model))

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('/data/d14122793/UCF101_Video_Classi/data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
        return output

# Check for cuda
if torch.cuda.is_available():
    device = 'cuda'
else:
    device = 'cpu'

# torch.manual_seed(sys.argv[1])

# Set values for DataSet object.
batch_size = 2
seq_length = 2
class_limit = None  # Number of classes to extract. Can be 1-101 or None for all.
video_limit = 2  # Number of videos allowed per class.  None for no limit
data = DataSet(seq_length=seq_length, class_limit=class_limit, video_limit=video_limit)
H, W, C = data.image_shape
video_array = np.zeros((batch_size, seq_length, C, H, W))
i = 0
video_count = 0
for video in data.data:
    if i != 92 and i != 100:
        i += 1
        continue
    else:
        # this_video =
        video_array[video_count] = data.video_to_vid_array(video)  # Get numpy array of sequence
        video_count += 1
        i += 1
        if video_count >= batch_size:
            break
Пример #13
0
def train(seq_length):
    # Set variables.
    nb_epoch = 10000
    batch_size = 32
    regularization_value = 0.004
    learning_rate = 0.001
    nb_feature = 2048
    database = 'Deception'

    data = DataSet(database, seq_length)
    skf = StratifiedKFold(n_splits=5)
    nb_class = len(data.classes)

    # Set model
    num_hiddens = [90, 90, 90]
    seq_images = tf.placeholder(dtype=tf.float32,
                                shape=[None, seq_length, nb_feature])
    input_labels = tf.placeholder(dtype=tf.float32, shape=[None, nb_class])
    drop_out = tf.placeholder(dtype=tf.float32)
    rnn_model = MultiLstm(seq_images, input_labels, drop_out, num_hiddens,
                          regularization_value, learning_rate, 5)

    # training
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
        all_samples_prediction, all_samples_true = [], []
        for train, test in skf.split(data.data, data.label):
            genaretor = data.frame_generator_train(batch_size, train)
            for epoch in range(nb_epoch):
                batch_seq_images, batch_labels = next(genaretor)
                sess.run(rnn_model.optimize,
                         feed_dict={
                             seq_images: batch_seq_images,
                             input_labels: batch_labels,
                             drop_out: 0.5
                         })
                accuracy = sess.run(rnn_model.accuracy,
                                    feed_dict={
                                        seq_images: batch_seq_images,
                                        input_labels: batch_labels,
                                        drop_out: 1.
                                    })
                print("Epoch {:2d}, training accuracy {:4.2f}".format(
                    epoch, accuracy))

            test_data, test_label = data.get_set_from_data(test)
            all_samples_true.append(test_label)
            for test_epoch in range(1, math.ceil(len(test) / batch_size) + 1):
                test_batch_images = data.frame_generator_test(
                    test_data, batch_size, test_epoch)
                test_predict_labels = sess.run(rnn_model.test_prediction,
                                               feed_dict={
                                                   seq_images:
                                                   test_batch_images,
                                                   drop_out: 1.
                                               })
                all_samples_prediction.append(list(test_predict_labels))
    all_samples_prediction = np.array(list(
        chain.from_iterable(all_samples_prediction)),
                                      dtype=float)
    all_samples_true = np.array(list(chain.from_iterable(all_samples_true)),
                                dtype=float)
    test_accuracy_cv = np.mean(
        np.equal(all_samples_prediction, all_samples_true))
    print("CV test accuracy {:4.2f}".format(test_accuracy_cv))
Пример #14
0
def check(model,
          saved_model=None,
          seq_length=150,
          dataFolder='dataset',
          stack=True,
          imType=None,
          separation=0,
          pxShift=False,
          sequence=False):

    maneuverList = [
        'circle_natural_050', 'circle_natural_075', 'circle_natural_100',
        'circle_natural_125', 'circle_natural_150'
    ]
    fileList = [
        '050_grass.csv', '075_grass.csv', '100_grass.csv', '125_grass.csv',
        '150_grass.csv'
    ]

    for i in range(0, len(maneuverList)):

        maneuver = maneuverList[i]
        file = 'results_final/experiment_variance/' + fileList[i]
        print(maneuver)

        #########################

        write = True
        if os.path.isfile(file):
            while True:
                entry = input(
                    "Do you want to overwrite the existing file? [y/n] ")
                if entry == 'y' or entry == 'Y':
                    write = True
                    break
                elif entry == 'n' or entry == 'N':
                    write = False
                    break
                else:
                    print("Try again.")

        if write == True:
            ofile = open(file, "w")
            writer = csv.writer(ofile, delimiter=',')

        # initialize the dataset
        data = DataSet(seq_length=seq_length,
                       dataFolder=dataFolder,
                       stack=stack,
                       imType=imType,
                       separation=separation,
                       pxShift=pxShift,
                       sequence=sequence)

        # initialize the model
        rm = ResearchModels(model, seq_length, saved_model, imType)

        # count the number of images in this folder
        if imType == 'OFF' or imType == 'ON':
            path = dataFolder + '/' + maneuver + '/' + imType + '/'
        elif imType == 'both':
            path = dataFolder + '/' + maneuver + '/' + 'ON' + '/'  # just for counting frames
        else:
            path = dataFolder + '/' + maneuver + '/'
        pngs = len(glob.glob1(path, "*.png"))

        # starting image
        seqNum = 12
        seqNum += seq_length - 1 + separation * seq_length

        while seqNum <= pngs:

            # get data, predict, and store
            X, y, Nef, var_img = data.test(maneuver, seqNum)
            output = rm.model.predict(X)

            # write output
            if write == True:
                writer.writerow([
                    seqNum,
                    float(output[0][0]),
                    float(output[0][1]), Nef, var_img
                ])

            # progress
            sys.stdout.write('\r' + '{0}\r'.format(int(
                (seqNum / pngs) * 100)), )
            sys.stdout.flush()

            # update sequence number
            seqNum += 1
Пример #15
0
class Solver(object):
    def __init__(self,
                 train=True,
                 common_params=None,
                 solver_params=None,
                 net_params=None,
                 dataset_params=None):
        if common_params:
            self.device_id = int(common_params['gpus'])
            self.image_size = int(common_params['image_size'])
            self.height = self.image_size
            self.width = self.image_size
            self.batch_size = int(common_params['batch_size'])
            self.num_gpus = 1
        if solver_params:
            self.learning_rate = float(solver_params['learning_rate'])
            self.moment = float(solver_params['moment'])
            self.max_steps = int(solver_params['max_iterators'])
            self.train_dir = str(solver_params['train_dir'])
            self.lr_decay = float(solver_params['lr_decay'])
            self.decay_steps = int(solver_params['decay_steps'])
        self.train = train
        # self.net = Net(train=train, common_params=common_params, net_params=net_params)
        self.net = ResNet(train=train,
                          common_params=common_params,
                          net_params=net_params)
        self.dataset = DataSet(common_params=common_params,
                               dataset_params=dataset_params)

    def construct_graph(self, scope):
        with tf.device('/gpu:' + str(self.device_id)):
            self.data_l = tf.placeholder(
                tf.float32, (self.batch_size, self.height, self.width, 1))
            self.gt_ab_313 = tf.placeholder(
                tf.float32, (self.batch_size, int(
                    self.height / 4), int(self.width / 4), 313))
            self.prior_boost_nongray = tf.placeholder(
                tf.float32, (self.batch_size, int(
                    self.height / 4), int(self.width / 4), 1))

            self.conv8_313 = self.net.inference(self.data_l)
            new_loss, g_loss = self.net.loss(scope, self.conv8_313,
                                             self.prior_boost_nongray,
                                             self.gt_ab_313)
            tf.summary.scalar('new_loss', new_loss)
            tf.summary.scalar('total_loss', g_loss)
        return new_loss, g_loss

    def train_model(self):
        with tf.device('/gpu:' + str(self.device_id)):
            self.global_step = tf.get_variable(
                'global_step', [],
                initializer=tf.constant_initializer(0),
                trainable=False)
            learning_rate = tf.train.exponential_decay(self.learning_rate,
                                                       self.global_step,
                                                       self.decay_steps,
                                                       self.lr_decay,
                                                       staircase=True)
            opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                         beta2=0.99)
            with tf.name_scope('gpu') as scope:
                new_loss, self.total_loss = self.construct_graph(scope)
                self.summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
                                                   scope)
            grads = opt.compute_gradients(new_loss)

            self.summaries.append(
                tf.summary.scalar('learning_rate', learning_rate))

            for grad, var in grads:
                if grad is not None:
                    self.summaries.append(
                        tf.summary.histogram(var.op.name + '/gradients', grad))

            apply_gradient_op = opt.apply_gradients(
                grads, global_step=self.global_step)

            for var in tf.trainable_variables():
                self.summaries.append(tf.summary.histogram(var.op.name, var))

            variable_averages = tf.train.ExponentialMovingAverage(
                0.999, self.global_step)
            variables_averages_op = variable_averages.apply(
                tf.trainable_variables())

            train_op = tf.group(apply_gradient_op, variables_averages_op)

            saver = tf.train.Saver(write_version=1)
            saver1 = tf.train.Saver()
            summary_op = tf.summary.merge(self.summaries)
            init = tf.global_variables_initializer()
            config = tf.ConfigProto(allow_soft_placement=True)
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            sess.run(init)
            #saver1.restore(sess, './models/model.ckpt')
            #nilboy
            summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph)
            for step in xrange(self.max_steps):
                start_time = time.time()
                t1 = time.time()
                data_l, gt_ab_313, prior_boost_nongray = self.dataset.batch()
                t2 = time.time()
                _, loss_value = sess.run(
                    [train_op, self.total_loss],
                    feed_dict={
                        self.data_l: data_l,
                        self.gt_ab_313: gt_ab_313,
                        self.prior_boost_nongray: prior_boost_nongray
                    })
                duration = time.time() - start_time
                t3 = time.time()
                print('io: ' + str(t2 - t1) + '; compute: ' + str(t3 - t2))
                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                if step % 1 == 0:
                    num_examples_per_step = self.batch_size * self.num_gpus
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = duration / self.num_gpus

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, loss_value,
                                        examples_per_sec, sec_per_batch))

                if step % 10 == 0:
                    summary_str = sess.run(summary_op,
                                           feed_dict={
                                               self.data_l:
                                               data_l,
                                               self.gt_ab_313:
                                               gt_ab_313,
                                               self.prior_boost_nongray:
                                               prior_boost_nongray
                                           })
                    summary_writer.add_summary(summary_str, step)

                # Save the model checkpoint periodically.
                if step % 1000 == 0:
                    checkpoint_path = os.path.join(self.train_dir,
                                                   'model_resnet.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
Пример #16
0
def train(istrain=True, model_type='quadmodal_1', saved_model_path=None, task='emotion',
         batch_size=2, nb_epoch=200, learning_r=1e-3, show_plots=True, is_fusion=False,
         fusion_type=None, pretrained=False):
    """
    train the model
    :param model: 'visual_model','audio_model','word_model','trimodal_model','quadmodal_X_model'
    :param saved_model_path: saved_model path
    :param task: 'aoursal','valence','emotion'
    :param batch_size: 2
    :param nb_epoch:2100
    :return:s
    """
    timestamp =  time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))
    # Helper: Save the model.
    model_name = model_type
    model_name = model_name.replace(':','-')
    model_name = model_name.replace('[','')
    model_name = model_name.replace(']','')
    if ',' in model_name:
        model_name = model_name.replace(',','__')
        max_len = 200
        if len(model_name) >= max_len:
            model_name = model_name[:max_len]
        model_name = 'fusion_' + fusion_type + '__' + model_name
    if not os.path.exists(os.path.join('checkpoints', model_name)):
        os.makedirs(os.path.join('checkpoints', model_name))
    checkpointer = ModelCheckpoint(
        monitor='val_acc',
        #filepath = os.path.join('checkpoints', model, task+'-'+ str(timestamp)+'-'+'best.hdf5' ),
        filepath = os.path.join('checkpoints', model_name, task + '-{val_acc:.3f}-{acc:.3f}.hdf5' ),
        verbose=1,
        save_best_only=True)
    checkpointer_acc = ModelCheckpoint(
        monitor='acc',
        #filepath = os.path.join('checkpoints', model, task+'-'+ str(timestamp)+'-'+'best.hdf5' ),
        filepath = os.path.join('checkpoints', model_name, task + '-{val_acc:.3f}-{acc:.3f}.hdf5' ),
        verbose=1,
        save_best_only=True)
    
    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.path.join('logs', model_name))

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=1000)
    
    # Helper: Save results.
    
    csv_logger = CSVLogger(os.path.join('logs', model_name , task +'-'+ \
        str(timestamp) + '.log'))

    # Get the data and process it.
    # seq_length for the sentence
    seq_length = 20
    dataset = DataSet(
        istrain = istrain,
        model = model_type,
        task = task,
        seq_length=seq_length,
        model_name=model_name,
        is_fusion=is_fusion
        )

    # Get the model.
    model = None
    if pretrained:
        model_weights_path = get_best_model(model_name)
        if model_weights_path:
            print('USING MODEL', model_weights_path)
            model = load_model(model_weights_path)
        # model_file = os.path.join('models',model_name + '.hdf5')
        # if os.path.exists(model_file):
        #     model = load_model(model_file)
        # else:
        #     print('No trained model found')
    if model is None:
        rm = ResearchModels(
                istrain = istrain,
                model = model_type, 
                seq_length = seq_length, 
                saved_path=saved_model_path, 
                task_type= task,
                learning_r = learning_r,
                model_name=model_name,
                is_fusion=is_fusion,
                fusion_type=fusion_type
                )
        model = rm.model
    # Get training and validation data.
    x_train, y_train, train_name_list = dataset.get_all_sequences_in_memory('Train')
    x_valid, y_valid, valid_name_list= dataset.get_all_sequences_in_memory('Validation')
    x_test, y_test, test_name_list = dataset.get_all_sequences_in_memory('Test')
    if task == 'emotion':
        y_train = to_categorical(y_train)
        y_valid = to_categorical(y_valid)
        y_test = to_categorical(y_test)

    # Fit!
    # Use standard fit
    print('Size', len(x_train), len(y_train), len(x_valid), len(y_valid), len(x_test), len(y_test))
    history = model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        validation_data=(x_valid,y_valid),
        verbose=1,
        callbacks=[tb, csv_logger,  checkpointer, checkpointer_acc],
        #callbacks=[tb, early_stopper, csv_logger,  checkpointer],
        #callbacks=[tb, lrate, csv_logger,  checkpointer],
        epochs=nb_epoch)
    
    # find the current best model and get its prediction on validation set
    model_weights_path = get_best_model(model_name)
    #model_weights_path = os.path.join('checkpoints', model_name, task + '-' + str(nb_epoch) + '-' + str(timestamp) + '-' + 'best.hdf5' )
    print('model_weights_path', model_weights_path)

    if model_weights_path:
        best_model = load_custom_model(model_weights_path)
    else:
        best_model = model
    

    y_valid_pred = best_model.predict(x_valid)
    y_valid_pred = np.squeeze(y_valid_pred)
    
    y_train_pred = best_model.predict(x_train)
    y_train_pred = np.squeeze(y_train_pred)

    y_test_pred = best_model.predict(x_test)
    y_test_pred = np.squeeze(y_test_pred)

    #calculate the ccc and mse

    if not os.path.exists('results'):
        os.mkdir('results')
    filename = os.path.join('results', model_name+'__'+str(nb_epoch)+'_'+task+'.txt')
    f1_score = f1(y_valid, y_valid_pred)
    f1_score_test = f1(y_test, y_test_pred)
    acc_val = model.evaluate(x_valid, y_valid, verbose=1)[1]
    acc_train = model.evaluate(x_train, y_train, verbose=1)[1]
    acc_test = model.evaluate(x_test, y_test, verbose=1)[1]
    print("F1 score in validation set is {}".format(f1_score))
    print("F1 score in test set is {}".format(f1_score_test))
    print("Val acc is {}".format(acc_val))
    print("Train acc is {}".format(acc_train))
    print("Test acc is {}".format(acc_test))
    plot_acc(history, model_name, timestamp, show_plots, nb_epoch)
    with open(filename, 'w') as f:
        f.write(str([acc_val, acc_train, acc_test, f1_score, f1_score_test]))
    # display the prediction and true label
    log_path = os.path.join('logs', model_name , task +'-'+ \
        str(timestamp) + '.log')
    
    display_true_vs_pred([y_valid, y_train, y_test], [y_valid_pred, y_train_pred, y_test_pred],log_path, task, model_name, [acc_val, acc_train, acc_test], show_plots, timestamp, nb_epoch)
extract all 101 classes. For instance, set class_limit = 8 to just
extract features for the first 8 (alphabetical) classes in the dataset.
Then set the same number when training models.
"""
import numpy as np
import os.path
from data import DataSet
from extractor import Extractor
from tqdm import tqdm

# Set defaults.
seq_length = 40
class_limit = 2  # Number of classes to extract. Can be 1-101 or None for all.

# Get the dataset.
data = DataSet(seq_length=seq_length, class_limit=class_limit)

# get the model.
model = Extractor()

# Loop through data.
pbar = tqdm(total=len(data.data))
for video in data.data:

    # Get the path to the sequence for this video.
    path = './data/MYsequences/' + video[2] + '-' + str(seq_length) + \
        '-features.txt'

    # Check if we already have it.
    if os.path.isfile(path):
        pbar.update(1)
Пример #18
0
class SolverMultigpu(object):
    def __init__(self,
                 train=True,
                 common_params=None,
                 solver_params=None,
                 net_params=None,
                 dataset_params=None):

        if common_params:
            self.gpus = [
                int(device) for device in str(common_params['gpus']).split(',')
            ]
            self.image_size = int(common_params['image_size'])
            self.height = self.image_size
            self.width = self.image_size
            self.batch_size = int(common_params['batch_size']) / len(self.gpus)
        if solver_params:
            self.learning_rate = float(solver_params['learning_rate'])
            self.moment = float(solver_params['moment'])
            self.max_steps = int(solver_params['max_iterators'])
            self.train_dir = str(solver_params['train_dir'])
            self.lr_decay = float(solver_params['lr_decay'])
            self.decay_steps = int(solver_params['decay_steps'])
        self.tower_name = 'Tower'
        self.num_gpus = len(self.gpus)
        self.train = train
        self.net = Net(train=train,
                       common_params=common_params,
                       net_params=net_params)
        self.dataset = DataSet(common_params=common_params,
                               dataset_params=dataset_params)
        self.placeholders = []

    def construct_cpu_graph(self, scope):
        data_l = tf.placeholder(tf.float32,
                                (self.batch_size, self.height, self.width, 1))
        gt_ab_313 = tf.placeholder(
            tf.float32,
            (self.batch_size, int(self.height / 4), int(self.width / 4), 313))
        prior_boost_nongray = tf.placeholder(
            tf.float32,
            (self.batch_size, int(self.height / 4), int(self.width / 4), 1))

        conv8_313 = self.net.inference(data_l)
        self.net.loss(scope, conv8_313, prior_boost_nongray, gt_ab_313)

    def construct_tower_gpu(self, scope):
        data_l = tf.placeholder(tf.float32,
                                (self.batch_size, self.height, self.width, 1))
        gt_ab_313 = tf.placeholder(
            tf.float32,
            (self.batch_size, int(self.height / 4), int(self.width / 4), 313))
        prior_boost_nongray = tf.placeholder(
            tf.float32,
            (self.batch_size, int(self.height / 4), int(self.width / 4), 1))
        self.placeholders.append(data_l)
        self.placeholders.append(gt_ab_313)
        self.placeholders.append(prior_boost_nongray)

        conv8_313 = self.net.inference(data_l)
        new_loss, g_loss = self.net.loss(scope, conv8_313, prior_boost_nongray,
                                         gt_ab_313)
        tf.summary.scalar('new_loss', new_loss)
        tf.summary.scalar('total_loss', g_loss)
        return new_loss, g_loss

    def average_gradients(self, tower_grads):
        """Calculate the average gradient for each shared variable across all towers.
    Note that this function provides a synchronization point across all towers.
    Args:
      tower_grads: List of lists of (gradient, variable) tuples. The outer list
        is over individual gradients. The inner list is over the gradient
        calculation for each tower.
    Returns:
       List of pairs of (gradient, variable) where the gradient has been averaged
       across all towers.
    """
        average_grads = []
        for grad_and_vars in zip(*tower_grads):
            # Note that each grad_and_vars looks like the following:
            #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
            grads = []
            for g, _ in grad_and_vars:
                # Add 0 dimension to the gradients to represent the tower.
                expanded_g = tf.expand_dims(g, 0)

                # Append on a 'tower' dimension which we will average over below.
                grads.append(expanded_g)

            # Average over the 'tower' dimension.
            grad = tf.concat(0, grads)
            grad = tf.reduce_mean(grad, 0)

            # Keep in mind that the Variables are redundant because they are shared
            # across towers. So .. we will just return the first tower's pointer to
            # the Variable.
            v = grad_and_vars[0][1]
            grad_and_var = (grad, v)
            average_grads.append(grad_and_var)
        return average_grads

    def train_model(self):
        with tf.Graph().as_default(), tf.device('/cpu:0'):
            self.global_step = tf.get_variable(
                'global_step', [],
                initializer=tf.constant_initializer(0),
                trainable=False)
            learning_rate = tf.train.exponential_decay(self.learning_rate,
                                                       self.global_step,
                                                       self.decay_steps,
                                                       self.lr_decay,
                                                       staircase=True)
            opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                         beta2=0.99)

            with tf.name_scope('cpu_model') as scope:
                self.construct_cpu_graph(scope)
            tf.get_variable_scope().reuse_variables()
            tower_grads = []
            for i in self.gpus:
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' %
                                       (self.tower_name, i)) as scope:
                        new_loss, self.total_loss = self.construct_tower_gpu(
                            scope)
                        self.summaries = tf.get_collection(
                            tf.GraphKeys.SUMMARIES, scope)
                        grads = opt.compute_gradients(new_loss)
                        tower_grads.append(grads)
            grads = self.average_gradients(tower_grads)

            self.summaries.append(
                tf.summary.scalar('learning_rate', learning_rate))

            for grad, var in grads:
                if grad is not None:
                    self.summaries.append(
                        tf.summary.histogram(var.op.name + '/gradients', grad))

            apply_gradient_op = opt.apply_gradients(
                grads, global_step=self.global_step)

            for var in tf.trainable_variables():
                self.summaries.append(tf.summary.histogram(var.op.name, var))

            variable_averages = tf.train.ExponentialMovingAverage(
                0.999, self.global_step)
            variables_averages_op = variable_averages.apply(
                tf.trainable_variables())

            train_op = tf.group(apply_gradient_op, variables_averages_op)

            saver = tf.train.Saver(write_version=1)
            saver1 = tf.train.Saver()
            summary_op = tf.summary.merge(self.summaries)
            init = tf.global_variables_initializer()
            config = tf.ConfigProto(allow_soft_placement=True)
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            sess.run(init)
            #saver1.restore(sess, self.pretrain_model)
            #nilboy
            summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph)
            for step in xrange(self.max_steps):
                start_time = time.time()
                t1 = time.time()
                feed_dict = {}
                np_feeds = []
                data_l, gt_ab_313, prior_boost_nongray = self.dataset.batch()
                for i in range(self.num_gpus):
                    np_feeds.append(
                        data_l[self.batch_size * i:self.batch_size *
                               (i + 1), :, :, :])
                    np_feeds.append(
                        gt_ab_313[self.batch_size * i:self.batch_size *
                                  (i + 1), :, :, :])
                    np_feeds.append(prior_boost_nongray[self.batch_size *
                                                        i:self.batch_size *
                                                        (i + 1), :, :, :])
                for i in range(len(self.placeholders)):
                    feed_dict[self.placeholders[i]] = np_feeds[i]
                t2 = time.time()
                _, loss_value = sess.run([train_op, self.total_loss],
                                         feed_dict=feed_dict)
                duration = time.time() - start_time
                t3 = time.time()
                print('io: ' + str(t2 - t1) + '; compute: ' + str(t3 - t2))
                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                if step % 1 == 0:
                    num_examples_per_step = self.batch_size * self.num_gpus
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = duration / self.num_gpus

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, loss_value,
                                        examples_per_sec, sec_per_batch))

                if step % 10 == 0:
                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    summary_writer.add_summary(summary_str, step)

                # Save the model checkpoint periodically.
                if step % 1000 == 0:
                    checkpoint_path = os.path.join(self.train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
Пример #19
0
# Preprocesses the data for the sequence length.
preprocessed = Preprocessing('data/train_belc_das_2020.csv')
preprocessed.save_dialogues_as_matrices(sequence_length=3)

# Loops over all the settings, computes the accuracy and outputs it into a data frame.
output = np.empty((1, 3)).astype(str)
for model in weighted:
    print("Cross-validation for {} model".format(model))
    for lr in learning_rates:
        print("Cross-validation for learning rate {}".format(lr))
        for hidden_dimension in hidden_dimensions:
            print("Cross-validation for hidden dimension {}".format(hidden_dimension))
            for embedding_dimension in embedding_dimensions:
                print("Cross-validation for embedding dimension {}".format(embedding_dimension))

                data = DataSet()

                # Performs cross-validation.
                cross_validation = CrossValidation(data, k)
                cross_validation.make_k_fold_cross_validation_split(levels)
                scores = cross_validation.validate(lr, batch_size, epochs, input_classes,
                                                   embedding_dimensions=embedding_dimension,
                                                   hidden_nodes=hidden_dimension, weighted=model)

                # Store the mean accuracy and standard deviation over the cross validation per setting in a Numpy array.
                setting_name = '_'.join([model, 'lr', str(lr), 'hidden', str(hidden_dimension), 'emb',
                                        str(embedding_dimension[0])])
                entry = np.array([setting_name, str(np.mean(scores)), str(np.std(scores))]).reshape(-1, 3)
                output = np.concatenate((output, entry), axis=0)
                print(output)
Пример #20
0
    target_path = os.path.join(data_root, 'mnistM/train/[0-9]/*.png')
    valid_path = os.path.join(data_root, 'mnistM/test/[0-9]/*.png')

    # --- Set Path List --- #
    source_path_list = list(
        map(lambda f: f.replace(os.sep, '/'), glob(source_path)))
    target_path_list = list(
        map(lambda f: f.replace(os.sep, '/'), glob(target_path)))
    valid_path_list = list(
        map(lambda f: f.replace(os.sep, '/'), glob(valid_path)))
    print('source_path_list length :{:>6}'.format(len(source_path_list)))
    print('target_path_list length :{:>6}'.format(len(target_path_list)))
    print('valid_path_list length  :{:>6}'.format(len(valid_path_list)))

    # --- Create Data Object --- #
    source_dataset = DataSet(source_path_list, size)
    target_dataset = DataSet(target_path_list, size)
    valid_dataset = DataSet(valid_path_list, size)

    # --- Create iterators for Training and Validation --- #
    # Select MultiprocessIterator or SerialIterator
    source = iterators.SerialIterator(source_dataset,
                                      batch,
                                      repeat=True,
                                      shuffle=True)
    target = iterators.SerialIterator(target_dataset,
                                      batch,
                                      repeat=True,
                                      shuffle=True)
    valid = iterators.SerialIterator(valid_dataset,
                                     batch,
Пример #21
0
def train(hidden_size, z_dims, l2_regularization, learning_rate, kl_imbalance,
          reconstruction_imbalance, generated_mse_imbalance):
    # train_set = np.load("../../Trajectory_generate/dataset_file/HF_train_.npy").reshape(-1, 6, 30)
    # test_set = np.load("../../Trajectory_generate/dataset_file/HF_test_.npy").reshape(-1, 6, 30)
    # test_set = np.load("../../Trajectory_generate/dataset_file/HF_validate_.npy").reshape(-1, 6, 30)

    # train_set = np.load("../../Trajectory_generate/dataset_file/train_x_.npy").reshape(-1, 6, 60)[:, :, 1:]
    # test_set = np.load("../../Trajectory_generate/dataset_file/test_x.npy").reshape(-1, 6, 60)[:, :, 1:]
    # test_set = np.load("../../Trajectory_generate/dataset_file/validate_x_.npy").reshape(-1, 6, 60)[:, :, 1:]

    # train_set = np.load("../../Trajectory_generate/dataset_file/mimic_train_x_.npy").reshape(-1, 6, 37)
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_test_x_.npy").reshape(-1, 6, 37)
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_validate_.npy").reshape(-1, 6, 37)

    train_set = np.load(
        '../../Trajectory_generate/dataset_file/sepsis_mimic_train.npy'
    ).reshape(-1, 13, 40)
    test_set = np.load(
        '../../Trajectory_generate/dataset_file/sepsis_mimic_test.npy'
    ).reshape(-1, 13, 40)
    # test_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_validate.npy').reshape(-1, 13, 40)

    previous_visit = 3
    predicted_visit = 10

    feature_dims = train_set.shape[2] - 1

    train_set = DataSet(train_set)
    train_set.epoch_completed = 0
    batch_size = 64
    epochs = 50
    #
    # hidden_size = 2 ** (int(hidden_size))
    # z_dims = 2 ** (int(z_dims))
    # learning_rate = 10 ** learning_rate
    # l2_regularization = 10 ** l2_regularization
    # kl_imbalance = 10 ** kl_imbalance
    # reconstruction_imbalance = 10 ** reconstruction_imbalance
    # generated_mse_imbalance = 10 ** generated_mse_imbalance

    print('previous_visit---{}---predicted_visit----{}-'.format(
        previous_visit, predicted_visit))

    print(
        'hidden_size{}----z_dims{}------learning_rate{}----l2_regularization{}---'
        'kl_imbalance{}----reconstruction_imbalance '
        ' {}----generated_mse_imbalance{}----'.format(
            hidden_size, z_dims, learning_rate, l2_regularization,
            kl_imbalance, reconstruction_imbalance, generated_mse_imbalance))

    encode_share = Encoder(hidden_size=hidden_size)
    decode_share = Decoder(hidden_size=hidden_size, feature_dims=feature_dims)
    prior_net = Prior(z_dims=z_dims)
    post_net = Post(z_dims=z_dims)

    logged = set()
    max_loss = 0.01
    max_pace = 0.0001
    loss = 0
    count = 0
    optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate)

    while train_set.epoch_completed < epochs:
        input_train = train_set.next_batch(batch_size=batch_size)
        input_x_train = tf.cast(input_train[:, :, 1:], dtype=tf.float32)
        input_t_train = tf.cast(input_train[:, :, 0], tf.float32)
        batch = input_x_train.shape[0]

        with tf.GradientTape() as tape:
            generated_trajectory = np.zeros(shape=[batch, 0, feature_dims])
            construct_trajectory = np.zeros(shape=[batch, 0, feature_dims])
            z_log_var_post_all = np.zeros(shape=[batch, 0, z_dims])
            z_mean_post_all = np.zeros(shape=[batch, 0, z_dims])
            z_log_var_prior_all = np.zeros(shape=[batch, 0, z_dims])
            z_mean_prior_all = np.zeros(shape=[batch, 0, z_dims])

            for predicted_visit_ in range(predicted_visit):
                sequence_last_time = input_x_train[:, predicted_visit_ +
                                                   previous_visit - 1, :]
                sequence_time_current_time = input_x_train[:,
                                                           predicted_visit_ +
                                                           previous_visit, :]

                for previous_visit_ in range(previous_visit +
                                             predicted_visit_):
                    sequence_time = input_x_train[:, previous_visit_, :]
                    if previous_visit_ == 0:
                        encode_c = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))
                        encode_h = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))
                    encode_c, encode_h = encode_share(
                        [sequence_time, encode_c, encode_h])
                context_state = encode_h
                z_prior, z_mean_prior, z_log_var_prior = prior_net(
                    context_state)  # h_i--> z_(i+1)
                encode_c, encode_h = encode_share(
                    [sequence_time_current_time, encode_c,
                     encode_h])  # h_(i+1)
                z_post, z_mean_post, z_log_var_post = post_net(
                    [context_state, encode_h])  # h_i, h_(i+1) --> z_(i+1)
                if predicted_visit_ == 0:
                    decode_c_generate = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))
                    decode_h_generate = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))

                    decode_c_reconstruct = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))
                    decode_h_reconstruct = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))
                input_t = tf.reshape(
                    input_t_train[:, previous_visit + predicted_visit_],
                    [-1, 1])
                construct_next_visit, decode_c_reconstruct, decode_h_reconstruct = decode_share(
                    [
                        z_post, context_state, sequence_last_time,
                        decode_c_reconstruct, decode_h_reconstruct, input_t
                    ])
                construct_next_visit = tf.reshape(construct_next_visit,
                                                  [batch, -1, feature_dims])
                construct_trajectory = tf.concat(
                    (construct_trajectory, construct_next_visit), axis=1)

                generated_next_visit, decode_c_generate, decode_h_generate = decode_share(
                    [
                        z_prior, context_state, sequence_last_time,
                        decode_c_generate, decode_h_generate, input_t
                    ])
                generated_next_visit = tf.reshape(generated_next_visit,
                                                  (batch, -1, feature_dims))
                generated_trajectory = tf.concat(
                    (generated_trajectory, generated_next_visit), axis=1)

                z_mean_prior_all = tf.concat(
                    (z_mean_prior_all,
                     tf.reshape(z_mean_prior, [batch, -1, z_dims])),
                    axis=1)
                z_mean_post_all = tf.concat(
                    (z_mean_post_all,
                     tf.reshape(z_mean_post, [batch, -1, z_dims])),
                    axis=1)
                z_log_var_prior_all = tf.concat(
                    (z_log_var_prior_all,
                     tf.reshape(z_log_var_prior, [batch, -1, z_dims])),
                    axis=1)
                z_log_var_post_all = tf.concat(
                    (z_log_var_post_all,
                     tf.reshape(z_log_var_post, [batch, -1, z_dims])),
                    axis=1)

            mse_reconstruction = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :], construct_trajectory))
            mse_generate = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :], generated_trajectory))

            std_post = tf.math.sqrt(tf.exp(z_log_var_post_all))
            std_prior = tf.math.sqrt(tf.exp(z_mean_prior_all))
            kl_loss_element = 0.5 * (
                2 * tf.math.log(tf.maximum(std_prior, 1e-9)) -
                2 * tf.math.log(tf.maximum(std_post, 1e-9)) +
                (tf.math.pow(std_post, 2) + tf.math.pow(
                    (z_mean_post_all - z_mean_prior_all), 2)) /
                tf.maximum(tf.math.pow(std_prior, 2), 1e-9) - 1)
            kl_loss_all = tf.reduce_mean(kl_loss_element)

            loss += mse_reconstruction * reconstruction_imbalance + kl_loss_all * kl_imbalance + mse_generate * generated_mse_imbalance

            variables = [var for var in encode_share.trainable_variables]
            for weight in encode_share.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            for weight in decode_share.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

            for weight in post_net.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

            for weight in prior_net.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)
            tape.watch(variables)

            gradient = tape.gradient(loss, variables)
            optimizer.apply_gradients(zip(gradient, variables))

            if train_set.epoch_completed % 1 == 0 and train_set.epoch_completed not in logged:
                logged.add(train_set.epoch_completed)
                loss_pre = mse_generate
                mse_reconstruction = tf.reduce_mean(
                    tf.keras.losses.mse(
                        input_x_train[:, previous_visit:previous_visit +
                                      predicted_visit, :],
                        construct_trajectory))
                mse_generate = tf.reduce_mean(
                    tf.keras.losses.mse(
                        input_x_train[:, previous_visit:previous_visit +
                                      predicted_visit, :],
                        generated_trajectory))
                kl_loss_all = tf.reduce_mean(
                    kl_loss(z_mean_post=z_mean_post_all,
                            z_mean_prior=z_mean_prior_all,
                            log_var_post=z_log_var_post_all,
                            log_var_prior=z_log_var_prior_all))
                loss = mse_reconstruction + mse_generate + kl_loss_all
                loss_diff = loss_pre - mse_generate

                if mse_generate > max_loss:
                    count = 0  # max_loss = 0.01

                else:
                    if loss_diff > max_pace:  # max_pace = 0.0001
                        count = 0
                    else:
                        count += 1

                if count > 9:
                    break

                input_test = test_set
                input_x_test = tf.cast(input_test[:, :, 1:], dtype=tf.float32)
                input_t_test = tf.cast(input_test[:, :, 0], tf.float32)
                batch_test = input_x_test.shape[0]
                generated_trajectory_test = np.zeros(
                    shape=[batch_test, 0, feature_dims])
                for predicted_visit_ in range(predicted_visit):

                    for previous_visit_ in range(previous_visit):
                        sequence_time_test = input_x_test[:,
                                                          previous_visit_, :]
                        if previous_visit_ == 0:
                            encode_c_test = tf.Variable(
                                tf.zeros(shape=(batch_test, hidden_size)))
                            encode_h_test = tf.Variable(
                                tf.zeros(shape=(batch_test, hidden_size)))

                        encode_c_test, encode_h_test = encode_share(
                            [sequence_time_test, encode_c_test, encode_h_test])

                    if predicted_visit_ != 0:
                        for i in range(predicted_visit_):
                            sequence_input_t = generated_trajectory_test[:,
                                                                         i, :]
                            encode_c_test, encode_h_test = encode_share([
                                sequence_input_t, encode_c_test, encode_h_test
                            ])

                    context_state_test = encode_h_test
                    z_prior_test, z_mean_prior_test, z_log_var_prior_test = prior_net(
                        context_state_test)

                    if predicted_visit_ == 0:
                        decode_c_generate_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))
                        decode_h_generate_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))
                        sequence_last_time_test = input_x_test[:,
                                                               predicted_visit_
                                                               +
                                                               previous_visit -
                                                               1, :]

                    input_t = tf.reshape(
                        input_t_test[:, previous_visit + predicted_visit_],
                        [-1, 1])
                    sequence_last_time_test, decode_c_generate_test, decode_h_generate_test = decode_share(
                        [
                            z_prior_test, context_state_test,
                            sequence_last_time_test, decode_c_generate_test,
                            decode_h_generate_test, input_t
                        ])
                    generated_next_visit_test = sequence_last_time_test
                    generated_next_visit_test = tf.reshape(
                        generated_next_visit_test,
                        [batch_test, -1, feature_dims])
                    generated_trajectory_test = tf.concat(
                        (generated_trajectory_test, generated_next_visit_test),
                        axis=1)

                mse_generate_test = tf.reduce_mean(
                    tf.keras.losses.mse(
                        input_x_test[:, previous_visit:previous_visit +
                                     predicted_visit, :],
                        generated_trajectory_test))
                mae_generate_test = tf.reduce_mean(
                    tf.keras.losses.mae(
                        input_x_test[:, previous_visit:previous_visit +
                                     predicted_visit, :],
                        generated_trajectory_test))
                # r_value_all = []
                # p_value_all = []
                # r_value_spearman_all = []
                # r_value_kendalltau_all = []
                # for visit in range(predicted_visit):
                #     for feature in range(feature_dims):
                #         x_ = input_x_test[:, previous_visit+visit, feature]
                #         y_ = generated_trajectory_test[:, visit, feature]
                #         r_value_pearson = stats.pearsonr(x_, y_)
                #         r_value_spearman = stats.spearmanr(x_, y_)
                #         r_value_kendalltau = stats.kendalltau(x_, y_)
                #         if not np.isnan(r_value_pearson[0]):
                #             r_value_all.append(np.abs(r_value_pearson[0]))
                #             p_value_all.append(np.abs(r_value_pearson[1]))
                #
                #         if not np.isnan(r_value_spearman[0]):
                #             r_value_spearman_all.append(np.abs(r_value_spearman[0]))
                #
                #         if not np.isnan(r_value_kendalltau[0]):
                #             r_value_kendalltau_all.append(np.abs(r_value_kendalltau[0]))

                r_value_all = []
                for patient in range(batch_test):
                    r_value = 0.0
                    for feature in range(feature_dims):
                        x_ = input_x_test[patient, previous_visit:,
                                          feature].numpy().reshape(
                                              predicted_visit, 1)
                        y_ = generated_trajectory_test[
                            patient, :,
                            feature].numpy().reshape(predicted_visit, 1)
                        r_value += DynamicTimeWarping(x_, y_)
                    r_value_all.append(r_value / 29.0)
                print(
                    "epoch  {}---train_mse_generate {}--train_reconstruct {}--train_kl "
                    "{}--test_mse {}--test_mae  {}----r_value {}---"
                    "count {}-".format(train_set.epoch_completed, mse_generate,
                                       mse_reconstruction, kl_loss_all,
                                       mse_generate_test, mae_generate_test,
                                       np.mean(r_value_all), count))

                # print("epoch  {}---train_mse_generate {}--train_reconstruct {}--train_kl "
                #       "{}--test_mse {}--test_mae  {}----r_value {}--r_value_spearman---{}"
                #       "r_value_kentalltau---{}--count {}-".format(train_set.epoch_completed,
                #                                                   mse_generate,
                #                                                   mse_reconstruction,
                #                                                   kl_loss_all,
                #                                                   mse_generate_test,
                #                                                   mae_generate_test,
                #                                                   np.mean(r_value_all),
                #                                                   np.mean(r_value_spearman_all),
                #                                                   np.mean(r_value_kendalltau_all),
                #                                                   count))
    tf.compat.v1.reset_default_graph()
    return mse_generate_test, mae_generate_test, np.mean(r_value_all)
hidden_nodes = 16
input_classes = ['dialogue_act', 'speaker', 'level', 'utterance_length']
embedding_dimensions = None

# Training hyper parameters.
learning_rate = 0.001
batch_size = 16
epochs = 20

# Preprocesses the training data for the sequence length.
preprocessed_train = Preprocessing('data/DA_labeled_belc_2019.csv')
preprocessed_train.save_dialogues_as_matrices(sequence_length=sequence_length,
                                              store_index=True)
preprocessed_train.save_dialogue_ids()
preprocessed_train.save_class_representation()
train_data = DataSet()

# Preprocesses the test data for the sequence length.
preprocessed_test = Preprocessing('data/DA_labeled_belc_2019.csv')
preprocessed_test.save_dialogues_as_matrices(sequence_length=sequence_length,
                                             store_index=True)
preprocessed_test.save_dialogue_ids()
preprocessed_test.save_class_representation()
data_frame = preprocessed_test.data
test_data = DataSet()

# Makes predictions for the weighted and unweighted model and stores them.
for weighted in models:
    print("Prediction performance for " + weighted + " model")

    if weighted == 'weighted':
Пример #23
0
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from mlxtend.preprocessing import DenseTransformer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from data import DataSet
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline, FeatureUnion
import matplotlib.pyplot as plt
from conf_lib import plot_confusion_matrix
from sklearn.decomposition import TruncatedSVD, PCA
import random

data_set = DataSet()
data, label, class_names = data_set.get_train_data_set()

indexs = random.sample(range(len(data)), 50000)
data = data[indexs]
label = label[indexs]
X_train, X_test, y_train, y_test = train_test_split(data,
                                                    label,
                                                    test_size=0.33,
                                                    random_state=42)

est = [('count_vect', CountVectorizer()),
       ('tr', TruncatedSVD(n_components=10, n_iter=100, random_state=42)),
       ('clf_LG', LogisticRegression())]

pipeline_LG = Pipeline(est)
Пример #24
0
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import itertools
from data import DataSet
import time
batch_size = 32
hidden_size = 10
use_dropout=True
vocabulary = 6
data_type = 'features'
seq_length = 60
class_limit =  6
image_shape = None
data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )
# generator = data.frame_generator(batch_size, 'train', data_type)
# # for f in generator:
# #     print(f)
# val_generator = data.frame_generator(batch_size, 'test', data_type)
X_tr, y_tr = data.get_all_sequences_in_memory('train', data_type)
X_train= X_tr.reshape(780,1080)
y_train = np.zeros(780)
j = 0
for i in y_tr:
    #print(np.argmax(i))
    y_train[j] = np.argmax(i)
    j +=1
#print(X_train.shape)
Пример #25
0
def main():
    usage = """usage: %prog [options]
Searches for missing results and uses run_test.py to collect it."""
    parser = OptionParser(usage)
    parser.add_option("-i", "--input_graph",
                      metavar="FILE",
                      help="restrict the missing data check to the specified input graph")
    parser.add_option("-l", "--inputs-list-file",
                      metavar="FILE",
                      help="collect data for all inputs in the specified log file")
    parser.add_option("--list-only",
                      action="store_true", default=False,
                      help="only list missing data (do not collect it)")
    parser.add_option("-n", "--num-runs",
                      type="int", default="1",
                      help="number of desired runs per revision-input combination [default: 1]")
    parser.add_option("-r", "--rev",
                      help="restrict the missing data check to the specified revision, or 'all' [default: current]")

    group = OptionGroup(parser, "Data Collection Options")
    group.add_option("-p", "--performance",
                      action="store_true", default=True,
                      help="collect performance data (this is the default)")
    group.add_option("-c", "--correctness",
                      action="store_true", default=False,
                      help="collect correctness data")
    parser.add_option_group(group)

    group2 = OptionGroup(parser, "Weight (Part II) Data Collection Options")
    group2.add_option("-v", "--num_vertices",
                      metavar="V", type="int", default=0,
                      help="collect weight data for V vertices (requires -d or -e)")
    group2.add_option("-d", "--dims",
                      metavar="D", type="int", default=0,
                      help="collect weight data for randomly positioned vertices in D-dimensional space (requires -v)")
    group2.add_option("-e", "--edge",
                      action="store_true", default=False,
                      help="collect weight data for random uniform edge weights in the range (0, 1] (requires -v)")
    parser.add_option_group(group2)

    (options, args) = parser.parse_args()
    if len(args) > 0:
        parser.error("too many arguments")

    if options.num_runs < 1:
        parser.error("-n must be at least 1")
    input_solns = None

    # prepare for a weight data collection
    num_on = 0
    weight_test = False
    if options.num_vertices > 0:
        weight_test = True
        if options.input_graph or options.inputs_list_file:
            parser.error('-i, -l, and -v are mutually exclusive')

        if options.dims > 0:
            num_on += 1
            wtype = 'loc%u' % options.dims

        if options.edge:
            num_on += 1
            wtype = 'edge'

        if num_on == 0:
            parser.error('-v requires either -d or -e be specified too')

        if options.num_runs > 1:
            options.num_runs = 1
            print 'warning: -v truncates the number of runs to 1 (weight should not change b/w runs)'

        input_path = InputSolution.get_path_to(15, options.dims, 0.0, 1.0)
        print 'reading inputs to run on from ' + input_path
        input_solns = DataSet.read_from_file(InputSolution, input_path)
        revs = [None] # not revision-specific (assuming our alg is correct)
        get_results_for_rev = lambda _ : DataSet.read_from_file(WeightResult, WeightResult.get_path_to(wtype))
        collect_missing_data = collect_missing_weight_data
    elif options.dims > 0 or options.edge:
        parser.error('-v is required whenever -d or -e is used')

    # handle -i, -l: collect data for a particular graph(s)
    if options.input_graph and options.inputs_list_file:
        parser.error('-i and -l are mutually exclusive')
    if options.input_graph is not None:
        try:
            i = extract_input_footer(options.input_graph)
        except ExtractInputFooterError, e:
            parser.error(e)
        input_solns = DataSet({0:InputSolution(i.prec,i.dims,i.min,i.max,i.num_verts,i.num_edges,i.seed)})
Пример #26
0
    )
    print(
        "Example: python classify.py 75 2 lstm-features.095-0.090.hdf5 some_video.mp4"
    )
    exit(1)

capture = cv2.VideoCapture(os.path.join(video_file))
width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)  # float
height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float

fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_writer = cv2.VideoWriter("result.avi", fourcc, 15,
                               (int(width), int(height)))
# Get the dataset.
data = DataSet(seq_length=seq_length,
               class_limit=class_limit,
               image_shape=(height, width, 3))
# get the model.

extract_model = Extractor(image_shape=(3, height, width))
print("HERE")
saved_LSTM_model = load_model(saved_model)

frames = []
frame_count = 0

while True:
    ret, frame = capture.read()
    # Bail out when the video file ends
    if not ret:
        break
Пример #27
0
#!/usr/bin/env python

from data import DataSet, InputSolution
from check_output import get_and_log_mst_weight_from_checker
from generate_input import main as generate_input
import sys, time

if len(sys.argv) != 2:
    print 'usage: gather_correctness.py LOG_FN'
    sys.exit(-1)

# get the file to read inputs from
logfn = sys.argv[1]
ds = DataSet.read_from_file(InputSolution, logfn)

# compute correctness for each input
inputs = ds.dataset.keys() # Input objects
inputs.sort()
on = 0
for i in inputs:
    on += 1
    # figure out how to generate the graph and where it will be sotred
    argstr = '-mt ' + i.make_args_for_generate_input()
    input_graph = generate_input(argstr.split(), get_output_name_only=True)
    print time.ctime(time.time()) + ' input # ' + str(on) + ' => gathering correctness data for ' + argstr

    # generate the graph
    generate_input(argstr.split())

    # compute the weight for the graph
    get_and_log_mst_weight_from_checker(input_graph, force_recompute=False, inputslogfn=logfn)
Пример #28
0
def train(model,
          load_to_memory=True,
          datafile='rect_same_period',
          batch_size=None,
          nb_epoch=1000,
          npoints=60,
          random_orientation=False,
          random_translation=False,
          pad=True,
          resized=False,
          **kargs):
    # Helper: Save the model.
    if not os.path.isdir(os.path.join(data_dir, 'checkpoints', model)):
        os.mkdir(os.path.join(data_dir, 'checkpoints', model))
    now = datetime.now()
    date = now.strftime("%d:%m:%Y-%H:%M")
    checkpointer = ModelCheckpoint(filepath=os.path.join(
        data_dir, 'checkpoints', model, model + '-' +
        '%s-%d-{val_loss:.3f}.hdf5' % (datafile, kargs['thick_idx'])),
                                   verbose=1,
                                   save_best_only=True)
    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.path.join(data_dir, 'logs', model))
    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=20)
    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join(data_dir, 'logs', model+'_'+date+'-'+'training-'+\
        str(timestamp) + '.log'))

    data = DataSet(npoints=npoints, datafile=datafile, **kargs)
    rm = ResearchModels(model, npoints=npoints)

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train')
        X_val, y_val = data.get_all_sequences_in_memory('val')
    else:
        # Get generators.
        k = 1
        if random_orientation:
            k *= 2
        if random_translation:
            k *= 3
        steps_per_epoch = k * len(data.train) // batch_size
        validation_steps = 0.5 * len(data.val) // batch_size
        generator = data.frame_generator(batch_size,
                                         'train',
                                         random_orientation=random_orientation,
                                         random_translation=random_translation,
                                         pad=pad,
                                         resized=resized)
        val_generator = data.frame_generator(batch_size,
                                             'val',
                                             pad=pad,
                                             resized=resized)

    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_val, y_val),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger, checkpointer],
                     epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=validation_steps)
Пример #29
0
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from data import DataSet
import os.path

from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger
from models import ResearchModels
from data import DataSet
import time

data = DataSet(class_limit=5)

checkpointer = ModelCheckpoint(
    filepath=os.path.join('data', 'checkpoints', 'inception.{epoch:03d}-{val_loss:.2f}.hdf5'),
    verbose=1,
    save_best_only=True)

def get_generators():
    train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        horizontal_flip=True,
        rotation_range=10.,
        width_shift_range=0.2,
        height_shift_range=0.2)
Пример #30
0
"""
如下为credit.data.csv文件的训练信息
iter1 : train loss=0.371342
iter2 : train loss=0.238326
iter3 : train loss=0.163624
iter4 : train loss=0.123063
iter5 : train loss=0.087872
iter6 : train loss=0.065684
iter7 : train loss=0.049936
iter8 : train loss=0.041866
iter9 : train loss=0.035695
iter10 : train loss=0.030581
iter11 : train loss=0.027034
iter12 : train loss=0.024570
iter13 : train loss=0.019227
iter14 : train loss=0.015794
iter15 : train loss=0.013484
iter16 : train loss=0.010941
iter17 : train loss=0.009879
iter18 : train loss=0.008619
iter19 : train loss=0.007306
iter20 : train loss=0.005610
"""
from data import DataSet
from model import GBDT

if __name__ == '__main__':
    data_file = './data/credit.data.csv'
    dateset = DataSet(data_file)  # 构建数据集
    gbdt = GBDT(max_iter=20, sample_rate=0.8, learn_rate=0.5, max_depth=7, loss_type='binary-classification')
    gbdt.fit(dateset, dateset.get_instances_idset())
extract all 101 classes. For instance, set class_limit = 8 to just
extract features for the first 8 (alphabetical) classes in the dataset.
Then set the same number when training models.
"""
import numpy as np
import os.path
from data import DataSet
from extractor import Extractor
from tqdm import tqdm

# Set defaults.
seq_length = 0
class_limit = None  # Number of classes to extract. Can be 1-101 or None for all.

# Get the dataset.
data = DataSet(seq_length=seq_length, class_limit=class_limit)

# get the model.
model = Extractor()

# Loop through data.
pbar = tqdm(total=len(data.data))
for video in data.data:

    # Get the path to the sequence for this video.
    path = os.path.join('data', 'sequences', video[2] + '-' + str(seq_length) + \
        '-features')  # numpy will auto-append .npy

    # Check if we already have it.
    if os.path.isfile(path + '.npy'):
        pbar.update(1)
def calc_gradients(
        test_file,
        model_name,
        output_file_dir,
        max_iter,
        learning_rate=0.0001,
        targets=None,
        weight_loss2=1,
        data_spec=None,
        batch_size=1,
        seq_len=40):

    """Compute the gradients for the given network and images."""    
    spec = data_spec

    modifier = tf.Variable(0.01*np.ones((1, seq_len, spec.crop_size,spec.crop_size,spec.channels),dtype=np.float32))
    input_image = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))
    input_label = tf.placeholder(tf.int32, (batch_size))

    # temporal mask, 1 indicates the selected frame
    indicator = [0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0]   

    true_image = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+input_image[0,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0
    true_image = tf.expand_dims(true_image, 0)
    for ll in range(seq_len-1):
        if indicator[ll+1] == 1:
           mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[0,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0
        else:
           mask_temp = input_image[0,ll+1,:,:,:]
        mask_temp = tf.expand_dims(mask_temp,0)
        true_image = tf.concat([true_image, mask_temp],0)
    true_image = tf.expand_dims(true_image, 0)

    for kk in range(batch_size-1):
        true_image_temp = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+input_image[kk+1,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0
        true_image_temp = tf.expand_dims(true_image_temp, 0)
        for ll in range(seq_len-1):
            if indicator[ll+1] == 1:
               mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[kk+1,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0
            else:
               mask_temp = input_image[kk+1,ll+1,:,:,:]
            mask_temp = tf.expand_dims(mask_temp,0)
            true_image_temp = tf.concat([true_image_temp, mask_temp],0)
        true_image_temp = tf.expand_dims(true_image_temp, 0)

        true_image = tf.concat([true_image, true_image_temp],0)

    loss2 = tf.reduce_sum(tf.sqrt(tf.reduce_mean(tf.square(true_image-input_image), axis=[0, 2, 3, 4])))
    norm_frame = tf.reduce_mean(tf.abs(modifier), axis=[2,3,4])

    sess = tf.Session()
    probs, variable_set, pre_label,ince_output, pre_node = models.get_model(sess, true_image, model_name, False) 
    true_label_prob = tf.reduce_sum(probs*tf.one_hot(input_label,101),[1])
    if targets is None:
        loss1 = -tf.log(1 - true_label_prob + 1e-6)
    else:
        loss1 = -tf.log(true_label_prob + 1e-6)
    loss1 = tf.reduce_mean(loss1)
    loss = loss1 + weight_loss2 * loss2

    optimizer = tf.train.AdamOptimizer(learning_rate)
    print('optimizer.minimize....')
    train = optimizer.minimize(loss, var_list=[modifier])
    # initiallize all uninitialized varibales
    init_varibale_list = set(tf.all_variables()) - variable_set
    sess.run(tf.initialize_variables(init_varibale_list))

    data = DataSet(test_list=test_file, seq_length=seq_len,image_shape=(spec.crop_size, spec.crop_size, spec.channels))
    all_names = []
    all_images = []
    all_labels = []
    
    def_len = 40
    for video in data.test_data:
        frames = data.get_frames_for_sample(video)
        if len(frames) < def_len:
           continue
        frames = data.rescale_list(frames, def_len)
        frames_data = data.build_image_sequence(frames)
        all_images.append(frames_data)
        label, hot_labels = data.get_class_one_hot(video[1])
        all_labels.append(label)
        all_names.append(frames)
    total = len(all_names)
    all_indices = range(total)
    num_batch = total/batch_size
    print('process data length:', num_batch)

    correct_ori = 0
    correct_noi = 0
    tot_image = 0
    
    for ii in range(num_batch):        
        images = all_images[ii*batch_size : (ii+1)*batch_size]
        names = all_names[ii*batch_size : (ii+1)*batch_size]
        labels = all_labels[ii*batch_size : (ii+1)*batch_size]
        indices = all_indices[ii*batch_size : (ii+1)*batch_size]
        print('------------------prediction for clean video-------------------')
        print('---video-level prediction---')
        for xx in range(len(indices)):
            print(names[xx][0],'label:', labels[xx], 'indice:',indices[xx], 'size:', len(images[xx]), len(images[xx][0]), len(images[xx][0][0]), len(images[xx][0][0][0]))
        sess.run(tf.initialize_variables(init_varibale_list))
        if targets is not None:
            labels = [targets[e] for e in names]
        
        feed_dict = {input_image: [images[0][0:seq_len]], input_label: labels}
        var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)
        
        correct_pre = correct_ori
        for xx in range(len(indices)):
           if labels[xx] == var_pre[xx]:
              correct_ori += 1

        tot_image += 1
        print 'Start!'
        min_loss = var_loss
        last_min = -1
        print('---frame-wise prediction---')
        print('node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib', true_prob)
        # record numer of iteration
        tot_iter = 0

        if correct_pre == correct_ori:
           ii += 1
           continue
       
        print('------------------prediction for adversarial video-------------------')

        for cur_iter in range(max_iter):
            tot_iter += 1
            sess.run(train, feed_dict=feed_dict)
            var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)
            print('iter:', cur_iter, 'total loss:', var_loss, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)
            break_condition = False
            if var_loss < min_loss:
                if np.absolute(var_loss-min_loss) < 0.00001:
                   break_condition = True
                   print(last_min)
                min_loss = var_loss
                last_min = cur_iter

            if cur_iter + 1 == max_iter or break_condition:
                print('iter:', cur_iter, 'node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)
                var_diff, var_probs, noise_norm = sess.run((modifier, probs, norm_frame), feed_dict=feed_dict)
                for pp in range(seq_len):
                    # print the map value for each frame
                    print(noise_norm[0][pp])
                for i in range(len(indices)):
                    top1 = var_probs[i].argmax()
                    if labels[i] == top1:
                        correct_noi += 1
                break
        print('saved modifier paramters.', ii)
        
        for ll in range(len(indices)):
            for kk in range(def_len):
                if kk < seq_len:
                   attack_img = np.clip(images[ll][kk]*255.0+var_diff[0][kk]+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])
                   diff = np.clip(np.absolute(var_diff[0][kk])*255.0, data_spec.rescale[0],data_spec.rescale[1])
                else:
                   attack_img = np.clip(images[ll][kk]*255.0+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])
                   diff = np.zeros((spec.crop_size,spec.crop_size,spec.channels))
                im_diff = scipy.misc.toimage(arr=diff, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])
                im = scipy.misc.toimage(arr=attack_img, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])
                new_name = names[ll][kk].split('/')
                 
                adv_dir = output_file_dir+'/adversarial/'
                dif_dir = output_file_dir+'/noise/'
                if not os.path.exists(adv_dir):
                   os.mkdir(adv_dir)
                   os.mkdir(dif_dir)

                tmp_dir = adv_dir+new_name[-2]
                tmp1_dir = dif_dir+new_name[-2]
                if not os.path.exists(tmp_dir):
                   os.mkdir(tmp_dir)
                   os.mkdir(tmp1_dir)
               
                new_name = new_name[-1] + '.png'
                im.save(tmp_dir + '/' +new_name)
                im_diff.save(tmp1_dir + '/' +new_name)
        print('saved adversarial frames.', ii)
        print('correct_ori:', correct_ori, 'correct_noi:', correct_noi)
Пример #33
0
def train(hidden_size, z_dims, l2_regularization, learning_rate, n_disc,
          generated_mse_imbalance, generated_loss_imbalance, kl_imbalance,
          reconstruction_mse_imbalance, likelihood_imbalance):
    # train_set = np.load("../../Trajectory_generate/dataset_file/train_x_.npy").reshape(-1, 6, 60)
    # test_set = np.load("../../Trajectory_generate/dataset_file/test_x.npy").reshape(-1, 6, 60)
    # test_set = np.load("../../Trajectory_generate/dataset_file/validate_x_.npy").reshape(-1, 6, 60)

    # train_set = np.load('../../Trajectory_generate/dataset_file/HF_train_.npy').reshape(-1, 6, 30)[:, :, :]
    # test_set = np.load('../../Trajectory_generate/dataset_file/HF_validate_.npy').reshape(-1, 6, 30)[:, :, :]
    # test_set = np.load('../../Trajectory_generate/dataset_file/HF_test_.npy').reshape(-1, 6, 30)[:, :, :]

    # train_set = np.load("../../Trajectory_generate/dataset_file/mimic_train_x_.npy").reshape(-1, 6, 37)
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_test_x_.npy").reshape(-1, 6, 37)
    # # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_validate_.npy").reshape(-1, 6, 37)

    # sepsis mimic dataset
    train_set = np.load(
        '../../Trajectory_generate/dataset_file/sepsis_mimic_train.npy'
    ).reshape(-1, 13, 40)
    # test_set = np.load('../../Trajectory_generate_dataset_file/sepsis_mimic_test.npy').reshape(-1, 13, 40)[:1072,:, : ]
    test_set = np.load(
        '../../Trajectory_generate/dataset_file/sepsis_mimic_validate.npy'
    ).reshape(-1, 13, 40)

    previous_visit = 3
    predicted_visit = 10

    feature_dims = train_set.shape[2] - 1

    train_set = DataSet(train_set)
    train_set.epoch_completed = 0
    batch_size = 64
    epochs = 50

    hidden_size = 2**(int(hidden_size))
    z_dims = 2**(int(z_dims))
    l2_regularization = 10**l2_regularization
    learning_rate = 10**learning_rate
    n_disc = int(n_disc)
    generated_mse_imbalance = 10**generated_mse_imbalance
    generated_loss_imbalance = 10**generated_loss_imbalance
    kl_imbalance = 10**kl_imbalance
    reconstruction_mse_imbalance = 10**reconstruction_mse_imbalance
    likelihood_imbalance = 10**likelihood_imbalance

    print('feature_dims---{}'.format(feature_dims))

    print('previous_visit---{}---predicted_visit----{}-'.format(
        previous_visit, predicted_visit))

    print(
        'hidden_size---{}---z_dims---{}---l2_regularization---{}---learning_rate---{}--n_disc---{}-'
        'generated_mse_imbalance---{}---generated_loss_imbalance---{}---'
        'kl_imbalance---{}---reconstruction_mse_imbalance---{}---'
        'likelihood_imbalance---{}'.format(
            hidden_size, z_dims, l2_regularization, learning_rate, n_disc,
            generated_mse_imbalance, generated_loss_imbalance, kl_imbalance,
            reconstruction_mse_imbalance, likelihood_imbalance))

    encode_share = Encoder(hidden_size=hidden_size)
    decoder_share = Decoder(hidden_size=hidden_size, feature_dims=feature_dims)
    discriminator = Discriminator(predicted_visit=predicted_visit,
                                  hidden_size=hidden_size,
                                  previous_visit=previous_visit)

    post_net = Post(z_dims=z_dims)
    prior_net = Prior(z_dims=z_dims)

    hawkes_process = HawkesProcess()
    loss = 0
    count = 0
    optimizer_generation = tf.keras.optimizers.RMSprop(
        learning_rate=learning_rate)
    optimizer_discriminator = tf.keras.optimizers.RMSprop(
        learning_rate=learning_rate)
    cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

    logged = set()
    max_loss = 0.001
    max_pace = 0.0001

    while train_set.epoch_completed < epochs:
        input_train = train_set.next_batch(batch_size=batch_size)
        input_x_train = tf.cast(input_train[:, :, 1:], tf.float32)
        input_t_train = tf.cast(input_train[:, :, 0], tf.float32)
        batch = input_train.shape[0]

        with tf.GradientTape() as gen_tape, tf.GradientTape(
                persistent=True) as disc_tape:
            generated_trajectory = tf.zeros(shape=[batch, 0, feature_dims])
            probability_likelihood = tf.zeros(shape=[batch, 0, 1])
            reconstructed_trajectory = tf.zeros(shape=[batch, 0, feature_dims])
            z_mean_post_all = tf.zeros(shape=[batch, 0, z_dims])
            z_log_var_post_all = tf.zeros(shape=[batch, 0, z_dims])
            z_mean_prior_all = tf.zeros(shape=[batch, 0, z_dims])
            z_log_var_prior_all = tf.zeros(shape=[batch, 0, z_dims])
            for predicted_visit_ in range(predicted_visit):
                sequence_last_time = input_x_train[:, previous_visit +
                                                   predicted_visit_ - 1, :]
                sequence_current_time = input_x_train[:, previous_visit +
                                                      predicted_visit_, :]
                for previous_visit_ in range(previous_visit +
                                             predicted_visit_):
                    sequence_time = input_x_train[:, previous_visit_, :]
                    if previous_visit_ == 0:
                        encode_c = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))
                        encode_h = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))

                    encode_c, encode_h = encode_share(
                        [sequence_time, encode_c, encode_h])
                context_state = encode_h  # h_i
                encode_c, encode_h = encode_share(
                    [sequence_current_time, encode_c, encode_h])  # h_(i+1)

                if predicted_visit_ == 0:
                    decode_c_generate = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))
                    decode_h_generate = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))

                    decode_c_reconstruction = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))
                    decode_h_reconstruction = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))

                z_post, z_mean_post, z_log_var_post = post_net(
                    [context_state, encode_h])
                z_prior, z_mean_prior, z_log_var_prior = prior_net(
                    context_state)

                current_time_index_shape = tf.ones(
                    shape=[previous_visit + predicted_visit_])
                condition_value, likelihood = hawkes_process(
                    [input_t_train, current_time_index_shape])
                probability_likelihood = tf.concat(
                    (probability_likelihood,
                     tf.reshape(likelihood, [batch, -1, 1])),
                    axis=1)
                probability_likelihood = tf.keras.activations.softmax(
                    probability_likelihood)
                # generation
                generated_next_visit, decode_c_generate, decode_h_generate = decoder_share(
                    [
                        z_prior, context_state, sequence_last_time,
                        decode_c_generate, decode_h_generate * condition_value
                    ])
                # reconstruction
                reconstructed_next_visit, decode_c_reconstruction, decode_h_reconstruction = decoder_share(
                    [
                        z_post, context_state, sequence_last_time,
                        decode_c_reconstruction,
                        decode_h_reconstruction * condition_value
                    ])

                reconstructed_trajectory = tf.concat(
                    (reconstructed_trajectory,
                     tf.reshape(reconstructed_next_visit,
                                [batch, -1, feature_dims])),
                    axis=1)
                generated_trajectory = tf.concat(
                    (generated_trajectory,
                     tf.reshape(generated_next_visit,
                                [batch, -1, feature_dims])),
                    axis=1)

                z_mean_post_all = tf.concat(
                    (z_mean_post_all,
                     tf.reshape(z_mean_post, [batch, -1, z_dims])),
                    axis=1)
                z_mean_prior_all = tf.concat(
                    (z_mean_prior_all,
                     tf.reshape(z_mean_prior, [batch, -1, z_dims])),
                    axis=1)

                z_log_var_post_all = tf.concat(
                    (z_log_var_post_all,
                     tf.reshape(z_log_var_post, [batch, -1, z_dims])),
                    axis=1)
                z_log_var_prior_all = tf.concat(
                    (z_log_var_prior_all,
                     tf.reshape(z_log_var_prior, [batch, -1, z_dims])),
                    axis=1)

            d_real_pre_, d_fake_pre_ = discriminator(input_x_train,
                                                     generated_trajectory)
            d_real_pre_loss = cross_entropy(tf.ones_like(d_real_pre_),
                                            d_real_pre_)
            d_fake_pre_loss = cross_entropy(tf.zeros_like(d_fake_pre_),
                                            d_fake_pre_)
            d_loss = d_real_pre_loss + d_fake_pre_loss

            gen_loss = cross_entropy(tf.ones_like(d_fake_pre_), d_fake_pre_)
            generated_mse_loss = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :], generated_trajectory))
            reconstructed_mse_loss = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :],
                    reconstructed_trajectory))

            std_post = tf.math.sqrt(tf.exp(z_log_var_post_all))
            std_prior = tf.math.sqrt(tf.exp(z_log_var_prior_all))

            kl_loss_element = 0.5 * (
                2 * tf.math.log(tf.maximum(std_prior, 1e-9)) -
                2 * tf.math.log(tf.maximum(std_post, 1e-9)) +
                (tf.square(std_post) +
                 (tf.square(z_mean_post_all - z_mean_prior_all)) /
                 (tf.maximum(tf.square(std_prior), 1e-9))) - 1)
            kl_loss = tf.reduce_mean(kl_loss_element)

            likelihood_loss = tf.reduce_mean(probability_likelihood)

            loss += generated_mse_loss * generated_mse_imbalance +\
                    reconstructed_mse_loss * reconstruction_mse_imbalance + \
                    kl_loss * kl_imbalance + likelihood_loss * likelihood_imbalance \
                    + gen_loss * generated_loss_imbalance

            for weight in discriminator.trainable_variables:
                d_loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            variables = [var for var in encode_share.trainable_variables]
            for weight in encode_share.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            for weight in decoder_share.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

            for weight in post_net.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

            for weight in prior_net.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

            for weight in hawkes_process.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

        for disc in range(n_disc):
            gradient_disc = disc_tape.gradient(
                d_loss, discriminator.trainable_variables)
            optimizer_discriminator.apply_gradients(
                zip(gradient_disc, discriminator.trainable_variables))

        gradient_gen = gen_tape.gradient(loss, variables)
        optimizer_generation.apply_gradients(zip(gradient_gen, variables))

        if train_set.epoch_completed % 1 == 0 and train_set.epoch_completed not in logged:

            logged.add(train_set.epoch_completed)
            loss_pre = generated_mse_loss
            mse_generated = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :], generated_trajectory))

            loss_diff = loss_pre - mse_generated

            if mse_generated > max_loss:
                count = 0
            else:
                if loss_diff > max_pace:
                    count = 0
                else:
                    count += 1
            if count > 9:
                break

            input_x_test = tf.cast(test_set[:, :, 1:], tf.float32)
            input_t_test = tf.cast(test_set[:, :, 0], tf.float32)

            batch_test = test_set.shape[0]
            generated_trajectory_test = tf.zeros(
                shape=[batch_test, 0, feature_dims])
            for predicted_visit_ in range(predicted_visit):
                for previous_visit_ in range(previous_visit):
                    sequence_time_test = input_x_test[:, previous_visit_, :]
                    if previous_visit_ == 0:
                        encode_c_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))
                        encode_h_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))

                    encode_c_test, encode_h_test = encode_share(
                        [sequence_time_test, encode_c_test, encode_h_test])

                if predicted_visit_ != 0:
                    for i in range(predicted_visit_):
                        encode_c_test, encode_h_test = encode_share([
                            generated_trajectory_test[:, i, :], encode_c_test,
                            encode_h_test
                        ])

                context_state_test = encode_h_test

                if predicted_visit_ == 0:
                    decode_c_generate_test = tf.Variable(
                        tf.zeros(shape=[batch_test, hidden_size]))
                    decode_h_generate_test = tf.Variable(
                        tf.zeros(shape=[batch_test, hidden_size]))
                    sequence_last_time_test = input_x_test[:, previous_visit +
                                                           predicted_visit_ -
                                                           1, :]

                z_prior_test, z_mean_prior_test, z_log_var_prior_test = prior_net(
                    context_state_test)
                current_time_index_shape_test = tf.ones(
                    [previous_visit + predicted_visit_])
                intensity_value_test, likelihood_test = hawkes_process(
                    [input_t_test, current_time_index_shape_test])

                generated_next_visit_test, decode_c_generate_test, decode_h_generate_test = decoder_share(
                    [
                        z_prior_test, context_state_test,
                        sequence_last_time_test, decode_c_generate_test,
                        decode_h_generate_test * intensity_value_test
                    ])
                generated_trajectory_test = tf.concat(
                    (generated_trajectory_test,
                     tf.reshape(generated_next_visit_test,
                                [batch_test, -1, feature_dims])),
                    axis=1)
                sequence_last_time_test = generated_next_visit_test

            mse_generated_test = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_test[:, previous_visit:previous_visit +
                                 predicted_visit, :],
                    generated_trajectory_test))
            mae_generated_test = tf.reduce_mean(
                tf.keras.losses.mae(
                    input_x_test[:, previous_visit:previous_visit +
                                 predicted_visit, :],
                    generated_trajectory_test))

            r_value_all = []
            for patient in range(batch_test):
                r_value = 0.0
                for feature in range(feature_dims):
                    x_ = input_x_test[patient, previous_visit:previous_visit +
                                      predicted_visit,
                                      feature].numpy().reshape(
                                          predicted_visit, 1)
                    y_ = generated_trajectory_test[patient, :,
                                                   feature].numpy().reshape(
                                                       predicted_visit, 1)
                    r_value += DynamicTimeWarping(x_, y_)
                r_value_all.append(r_value / 29.0)

            print(
                'epoch ---{}---train_mse_generated---{}---likelihood_loss{}---'
                'train_mse_reconstruct---{}---train_kl---{}---'
                'test_mse---{}---test_mae---{}---'
                'r_value_test---{}---count---{}'.format(
                    train_set.epoch_completed, generated_mse_loss,
                    likelihood_loss, reconstructed_mse_loss,
                    kl_loss, mse_generated_test, mae_generated_test,
                    np.mean(r_value_all), count))
    tf.compat.v1.reset_default_graph()
    # return mse_generated_test, mae_generated_test, np.mean(r_value_all)
    return -1 * mse_generated_test
Пример #34
0
def extract_features(seq_length=50,
                     class_limit=3,
                     image_shape=(320, 240, 3),
                     cls='training'):
    # Get the dataset.
    data = DataSet(seq_length=seq_length,
                   class_limit=class_limit,
                   image_shape=image_shape,
                   model='mobileface')
    # print(data.get_data())
    classes = data.classes

    # get the model.
    # model = Extractor(image_shape=image_shape)
    #     vgg_extract = VGGExtractor('resnet50')

    mobface_extract = MobFaceExtractor()

    mood_dict = {'Alert': '0', 'Low': '5', 'Drowsy': '10'}

    # X,y = [],[]
    for video in data.data:
        # print(video)
        if video[2] != '29':
            path = os.path.join('face_images/sequences_mobface_512', 'training' ,  video[1]+ '_' + video[2] + '-' + str(seq_length) + \
                '-features')  # numpy will auto-append .npy
            if video[1] == '29':
                path_frames = os.path.join('face_images/', 'testing', video[1])
            else:
                path_frames = os.path.join('face_images/', 'training',
                                           video[1])

    #         path = os.path.join('/DATA/DMS/new_data', 'sequences_mobface_512', 'training' ,  video[1]+ '_' + video[2] + '-' + str(seq_length) + \
    #             '-features')  # numpy will auto-append .npy
    #         if video[1] == '29':
    #             path_frames = os.path.join('/DATA/DMS/','new_data', 'testing', video[1])
    #         else:
    #             path_frames = os.path.join('/DATA/DMS/','new_data', 'training', video[1])

    # Get the frames for this video.
            filename = mood_dict[str(video[1])] + '_' + str(video[2])
            frames = glob.glob(os.path.join(path_frames, filename + '*jpg'))
            frames = natsort.natsorted(frames, reverse=False)
            # print(len(frames))

            # # Now downsample to just the ones we need.

            print(video[2] + ":" + str(len(frames)))

            # Now loop through and extract features to build the sequence.
            print('Appending sequence of the video:', video)
            sequence = []

            cnt = 0
            for image in frames[1000:10000]:

                if os.path.isfile(path + '_' + str(cnt) + '.npy'):
                    continue

                features = mobface_extract.extract(image)

                cnt += 1
                # print('Appending sequence of image:',image,' of the video:',video)
                sequence.append(features)

                if cnt % seq_length == 0 and cnt > 0 and cnt < 15000:
                    np.save(path + str(cnt) + '.npy', sequence)
                    # X.append(sequence)
                    # y.append(get_class_one_hot(classes,video[1]))
                    sequence = []
                if cnt > 11000:
                    break
            # print(np.array(X).shape)
            # print(np.array(y).shape)
            print('Sequences saved successfully', path)
def train(hidden_size, learning_rate, l2_regularization):

    # train_set = np.load('../../Trajectory_generate/dataset_file/HF_train_.npy').reshape(-1, 6, 30)[:, :, 1:]
    # test_set = np.load('../../Trajectory_generate/dataset_file/HF_validate_.npy').reshape(-1, 6, 30)[:, :, 1:]
    # test_set = np.load('../../Trajectory_generate/dataset_file/HF_test_.npy').reshape(-1, 6, 30)[:, :, 1:]

    # train_set = np.load("../../Trajectory_generate/dataset_file/mimic_train_x_.npy").reshape(-1, 6, 37)[:, :, 1:]
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_test_x_.npy").reshape(-1, 6, 37)[:, :, 1:]
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_validate_.npy").reshape(-1, 6, 37)[:, :, 1:]

    # sepsis mimic dataset
    train_set = np.load(
        '../../Trajectory_generate/dataset_file/sepsis_mimic_train.npy'
    ).reshape(-1, 13, 40)[:, :, 1:]
    test_set = np.load(
        '../../Trajectory_generate/dataset_file/sepsis_mimic_test.npy'
    ).reshape(-1, 13, 40)[:, :, 1:]
    # test_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_validate.npy').reshape(-1, 13, 40)[:, :, 1:]

    previous_visit = 3
    predicted_visit = 10
    feature_dims = train_set.shape[2]

    train_set = DataSet(train_set)

    batch_size = 64
    epochs = 50
    # 超参数
    # hidden_size = 2 ** (int(hidden_size))
    # learning_rate = 10 ** learning_rate
    # l2_regularization = 10 ** l2_regularization

    print('feature_size----{}'.format(feature_dims))
    print('previous_visit---{}---predicted_visit----{}-'.format(
        previous_visit, predicted_visit))

    print(
        'hidden_size{}-----learning_rate{}----l2_regularization{}----'.format(
            hidden_size, learning_rate, l2_regularization))

    encode_share = Encoder(hidden_size=hidden_size)
    decoder_share = Decoder(hidden_size=hidden_size, feature_dims=feature_dims)

    logged = set()

    max_loss = 0.01
    max_pace = 0.0001

    count = 0
    optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate)
    while train_set.epoch_completed < epochs:
        input_x_train = train_set.next_batch(batch_size)
        batch = input_x_train.shape[0]
        with tf.GradientTape() as tape:
            predicted_trajectory = np.zeros(shape=(batch, 0, feature_dims))
            for predicted_visit_ in range(predicted_visit):
                sequence_time_last_time = input_x_train[:, previous_visit +
                                                        predicted_visit_ -
                                                        1, :]  # y_j
                for previous_visit_ in range(previous_visit +
                                             predicted_visit_):
                    sequence_time = input_x_train[:, previous_visit_, :]
                    if previous_visit_ == 0:
                        encode_c = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))
                        encode_h = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))
                    encode_c, encode_h = encode_share(
                        [sequence_time, encode_c, encode_h])
                context_state = encode_h  # h_j from 1 to j

                input_decode = tf.concat(
                    (sequence_time_last_time, context_state),
                    axis=1)  # y_j and h_j
                if predicted_visit_ == 0:
                    decode_c = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))
                    decode_h = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))

                predicted_next_sequence, decode_c, decode_h = decoder_share(
                    [input_decode, decode_c, decode_h])
                predicted_next_sequence = tf.reshape(predicted_next_sequence,
                                                     [batch, -1, feature_dims])
                predicted_trajectory = tf.concat(
                    (predicted_trajectory, predicted_next_sequence), axis=1)

            mse_loss = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :], predicted_trajectory))

            variables = [var for var in encode_share.trainable_variables]
            for weight in encode_share.trainable_variables:
                mse_loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            for weight in decoder_share.trainable_variables:
                mse_loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

            gradient = tape.gradient(mse_loss, variables)
            optimizer.apply_gradients(zip(gradient, variables))

            if train_set.epoch_completed % 1 == 0 and train_set.epoch_completed not in logged:
                logged.add(train_set.epoch_completed)
                loss_pre = mse_loss
                mse_loss = tf.reduce_mean(
                    tf.keras.losses.mse(
                        input_x_train[:, previous_visit:previous_visit +
                                      predicted_visit, :],
                        predicted_trajectory))
                loss_diff = loss_pre - mse_loss
                if mse_loss > max_loss:
                    count = 0

                else:
                    if loss_diff > max_pace:
                        count = 0
                    else:
                        count += 1
                if count > 9:
                    break

                input_x_test = test_set
                batch_test = input_x_test.shape[0]
                predicted_trajectory_test = np.zeros(
                    shape=[batch_test, 0, feature_dims])
                for predicted_visit_ in range(predicted_visit):
                    if predicted_visit_ == 0:
                        sequence_time_last_time_test = input_x_test[:,
                                                                    predicted_visit_
                                                                    +
                                                                    previous_visit
                                                                    - 1, :]
                    for previous_visit_ in range(previous_visit):
                        sequence_time_test = input_x_test[:,
                                                          previous_visit_, :]
                        if previous_visit_ == 0:
                            encode_c_test = tf.Variable(
                                tf.zeros(shape=[batch_test, hidden_size]))
                            encode_h_test = tf.Variable(
                                tf.zeros(shape=[batch_test, hidden_size]))
                        encode_c_test, encode_h_test = encode_share(
                            [sequence_time_test, encode_c_test, encode_h_test])

                    if predicted_visit_ != 0:
                        for i in range(predicted_visit_):
                            sequence_input_t = predicted_trajectory_test[:,
                                                                         i, :]
                            encode_c_test, encode_h_test = encode_share([
                                sequence_input_t, encode_c_test, encode_h_test
                            ])

                    context_state = encode_h_test

                    if predicted_visit_ == 0:
                        decode_c_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))
                        decode_h_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))
                    input_decode_test = tf.concat(
                        (sequence_time_last_time_test, context_state), axis=1)
                    predicted_next_sequence_test, decode_c_test, decode_h_test = decoder_share(
                        [input_decode_test, decode_c_test, decode_h_test])
                    sequence_time_last_time_test = predicted_next_sequence_test  # feed the generated sequence into next state
                    predicted_next_sequence_test = tf.reshape(
                        predicted_next_sequence_test,
                        [batch_test, -1, feature_dims])
                    predicted_trajectory_test = tf.concat(
                        (predicted_trajectory_test,
                         predicted_next_sequence_test),
                        axis=1)
                mse_loss_predicted = tf.reduce_mean(
                    tf.keras.losses.mse(
                        input_x_test[:, previous_visit:previous_visit +
                                     predicted_visit, :],
                        predicted_trajectory_test))
                mae_predicted = tf.reduce_mean(
                    tf.keras.losses.mae(
                        input_x_test[:, previous_visit:previous_visit +
                                     predicted_visit, :],
                        predicted_trajectory_test))

                r_value_all = []
                for patient in range(batch_test):
                    r_value = 0.0
                    for feature in range(feature_dims):
                        x_ = input_x_test[patient, previous_visit:,
                                          feature].reshape(predicted_visit, 1)
                        y_ = predicted_trajectory_test[
                            patient, :,
                            feature].numpy().reshape(predicted_visit, 1)
                        r_value += DynamicTimeWarping(x_, y_)
                    r_value_all.append(r_value / 29.0)

                print(
                    '----epoch{}------mse_loss{}----predicted_mse{}----mae_predicted---{}-'
                    '-predicted_r_value---{}---count  {}'.format(
                        train_set.epoch_completed,
                        mse_loss, mse_loss_predicted, mae_predicted,
                        np.mean(r_value_all), count))

                # r_value_all = []
                # p_value_all = []
                # r_value_spearman = []
                # r_value_kendalltau = []
                # for visit in range(predicted_visit):
                #     for feature in range(feature_dims):
                #         x_ = input_x_test[:, previous_visit+visit, feature]
                #         y_ = predicted_trajectory_test[:, visit, feature]
                #         r_value_ = stats.pearsonr(x_, y_)
                #         r_value_spearman_ = stats.spearmanr(x_, y_)
                #         r_value_kendalltau_ = stats.kendalltau(x_, y_)
                #         if not np.isnan(r_value_[0]):
                #             r_value_all.append(np.abs(r_value_[0]))
                #             p_value_all.append(np.abs(r_value_[1]))
                #         if not np.isnan(r_value_spearman_[0]):
                #             r_value_spearman.append(np.abs(r_value_spearman_[0]))
                #         if not np.isnan(r_value_kendalltau_[0]):
                #             r_value_kendalltau.append(np.abs(r_value_kendalltau_[0]))

                # if (train_set.epoch_completed + 1) % 1 == 0:
                #     print('----epoch{}------mse_loss{}----predicted_mse{}----mae_predicted---{}-'
                #           '---predicted_r_value---{}-predicted_spearman---{}-'
                #           '--predicted_kendalltau---{}--count  {}'.format(train_set.epoch_completed,
                #                                                           mse_loss, mse_loss_predicted,
                #                                                           mae_predicted,
                #                                                           np.mean(r_value_all),
                #                                                           np.mean(r_value_spearman),
                #                                                           np.mean(r_value_kendalltau),
                #                                                           count))
                # if (np.mean(r_value_all) > 0.87) and (np.mean(r_value_all) < 0.88) and (train_set.epoch_completed == 49):
                #     np.savetxt('AED_generated_trajectory.csv', predicted_trajectory_test.numpy().reshape(-1, feature_dims), delimiter=',')

        tf.compat.v1.reset_default_graph()
    return mse_loss_predicted, mae_predicted, np.mean(r_value_all), np.mean(
        p_value_all)
Пример #36
0
class Algorithm:
    """
    
    An abstract class to define the specifics of a wrapper of an algorithm. 
    An object of this class represents to an executable 
    wrapper of target algorithm. It provokes the target
    algorithm to solve a problem and collects the elementary
    measures

    An object of this class works as an interface of the target algorithm 
    with OPAL. It contains at least three informations:
    
    1. What are the parammeters
    2. How to invoke the algorithm to solve a problem
    3. What are the measures we get after running algorithm

    
    :parameters:
        :name:  Name of the algorithm (string)
        :purpose: Synopsis of purpose (string)

    Each algorithm has two aspect:

     1. Algorithmic aspect: the name, purpose, parameters, measures and the
        constraints on the parameters. The measures represent the output of the
        alorithm.

     2. Computational aspect: the description of how to run algorithm and what
        the output is.

    Example:

      >>> dfo = Algorithm(name='DFO', purpose='Derivative-free optimization')
      >>> delmin = Parameter(default=1.0e-3, name='DELMIN')
      >>> dfo.add_param(delmin)
      >>> maxit = Parameter(type='integer', default=100, name='MAXIT')
      >>> dfo.add_param(maxit)
      >>> cpuTime = Measure(type='real', name='TIME')
      >>> dfo.add_measure(cpuTime)
      >>> print [param.name for param in dfo.parameters]
      ['DELMIN', 'MAXIT']
      >>> real_params = [param for param in dfo.parameters if param.is_real]
      >>> print [param.name for param in real_params]
      ['DELMIN']
    """

    def __init__(self, name=None, description=None, **kwargs):
       
        # Algorithmic description
        self.name = name
        self.description = description
        self.parameters = DataSet(name='Parameter set')  # List of parameters 
                                                         # (of type Parameter)
        self.measures = DataSet(name='Measure set')  # List of measures 
                                                     # (the observation of the 
                                                     # algorithm)
        self.constraints = []

        # Computational description
        self.parameter_file = self.name + '.param'
        self.sessions = {} # dictionary map between session id and parameter
                           # values

    def add_param(self, param):
        "Add a parameter to an algorithm"
        if isinstance(param, Parameter):
            self.parameters.append(param)
        else:
            raise TypeError, 'param must be a Parameter'
        return
    
    def add_measure(self, measure):
        "Add a measure to an algorithm"
        if isinstance(measure, Measure):
            self.measures.append(measure)
        else:
            raise TypeError, 'measure must be a Measure object'
        return

   
    def update_parameters(self, parameters):
        """
        
        This method return an unique identity for the 
        test basing on the parameter values

        The identity obtains by hashing the parameter values string. 
        This is an inversable function. It means that we can get 
        the parameter_values form the id


        This virtual method determines how values for the parameters of the
        algorithm are written to intermediated file that are read after by 
        algorithm driver. 
        
        The format of intermediated file depend on this method. By default, 
        the parameter set are written by pickle.
       
        """
        values = dict((param.name,param.value) for param in parameters)
        # Fill the values to parameter set
        self.parameters.set_values(values)
        # Write the values to a temporary parameter file 
        # for communicating with an executable wrapper 
        return 
    

    def create_tag(self, problem):
        return 

    def set_executable_command(self, command):
        self.executable = command
        return

    def write_parameter(self, fileName):
        f = open(fileName, 'w')
        for param in self.parameters:
            f.write(param.name + ':' +  param.kind + ':' + \
                    str(param.value) + '\n')
        f.close()
        return

    def read_measure(self, fileName):
        """

        Ths virtual method determines how to  measure value from the
        output of the algorithm.

        :parameters:
            :problem:
            :measures: List of measures we want to extract

        :returns: A mapping measure name --> measure value

        By default, the algorithm returns the measure values to the standard
        output. In the `run()` method, the output is redirected to file.
        """
        
        f = open(fileName)
        lines = f.readlines()
        f.close()
        converters = {'categorical':str, 'integer':int, 'real':float}
        measure_values = {}
        for line in lines:
            line.strip('\n')
            if len(line) < 1:
                continue
            fields = line.split(' ')
            if len(fields) < 2:
                continue
            measureName = fields[0].strip(' ')
            if measureName not in self.measures:
                continue
            measure_values[measureName] = fields[1].strip(' ')
        for i in range(len(self.measures)):
            convert = converters[self.measures[i].get_type()]
            try:
                measure_values[self.measures[i].name] = \
                    convert(measure_values[self.measures[i].name])
            except ValueError:
                return None
        return measure_values

    def solve(self, problem, parameters=None, parameterTag=None ):
        """
        .. warning::

            Why do we need `paramValues` here???
            What kind of object is `problem`???

        This virtual method determines how to run the algorithm.

        :parameters:
            :paramValues: List of parameter values
            :problem: Problem (???)

        :returns: The command for executing the algorithm.

        By default, the algorithm is called by the command 

            `./algorithm paramfile problem`
        """
        
        if parameters is not None:
            self.update_parameters(parameters)

        if parameterTag is not None:
            sessionTag = problem.name + '_' + parameterTag
        else:
            sessionTag = self.create_tag(problem)

        algoName = self.name.replace(' ','_')
        parameterFile = algoName + '_' +\
                        str(sessionTag) +\
                        '.param'
                                                        
        outputFile = algoName + '_' +\
                     str(sessionTag) +\
                     '.measure'

        if not os.path.exists(parameterFile):
            self.write_parameter(parameterFile)
        cmd = self.executable + ' ' +\
              parameterFile + ' ' +\
              problem.name + ' ' +\
              outputFile        
       
            
        return cmd, parameterFile, outputFile, sessionTag

    
    def add_parameter_constraint(self, paramConstraint):
        """
        Specify the domain of a parameter.
        """
        if isinstance(paramConstraint, ParameterConstraint):
            self.constraints.append(paramConstraint)
        elif isinstance(paramConstraint, str):
            self.constraints.append(ParameterConstraint(paramConstraint))
        else:
            msg = 'paramConstraint must be a String or ParameterConstraint'
            raise TypeError, msg
        return

    def are_parameters_valid(self):
        """
        Return True if all parameters are in their domain and satisfy the
        constraints. Return False otherwise.
        """
        #print '[algorithm.py]',[param.value for param in parameters]
        for constraint in self.constraints:
            if constraint(self.parameters) is ParameterConstraint.violated:
                return ParameterConstraint.violated
        for param in self.parameters:
            if not param.is_valid():
                return False
        return True
Пример #37
0
def validate(data_type,
             model,
             seq_length=10,
             saved_model=None,
             concat=False,
             class_limit=None,
             image_shape=None):
    batch_size = 16

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # val_generator = data.frame_generator(batch_size, 'test', data_type, concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Evaluate!
    # results = rm.model.evaluate_generator(
    #     generator=val_generator,
    #     val_samples=17)

    flnames = []
    data_csv = pd.read_csv(
        '/home/takubuntu/PycharmProjects/DL/Wake_detect/IR_classification/data/data_file.csv'
    )

    flnames = list(data_csv[data_csv.iloc[:, 0] == 'test'].iloc[:, 2])
    fcategories = list(data_csv[data_csv.iloc[:, 0] == 'test'].iloc[:, 1])

    filenumber = 0
    nb_samples = 0
    nb_incorrect = 0
    for filename in flnames:
        fnumber = flnames[filenumber]
        fcategory = fcategories[filenumber]
        filenumber += 1
        allpath_f = '/home/takubuntu/PycharmProjects/DL/Wake_detect/IR_classification/data/test/' + fcategory + '/' + fnumber + '.csv'

        if not os.path.isfile(allpath_f):
            continue

        f = np.loadtxt(allpath_f, delimiter=',').reshape((-1, 16, 16, 1))

        if len(f) > seq_length:
            skip = len(f) // seq_length
            f = f[::skip]
            f = f[:seq_length]
        else:
            continue

        f = f.reshape((1, -1, 16, 16, 1))

        resultpred = rm.model.predict(x=f, batch_size=5, verbose=0)

        resultclass = np.argmax(resultpred)

        # print(np.squeeze(resultpred).shape)
        # print(resultclass.shape)

        if data.classes[int(resultclass)] != fcategory:
            print(filename)
            print('Answer: {}'.format(fcategory))
            print('Predict: {}'.format(data.classes[int(resultclass)]))
            # print('classes: {}'.format(data.classes))

            rankindex = np.argsort(np.squeeze(resultpred))[::-1]
            rankclass = []
            for i in rankindex:
                rankclass.append(data.classes[i])

            print('Predict_possibility: {}\n'.format(rankclass))
            nb_incorrect += 1
        nb_samples += 1
    print('The number of incorrect is {0} in {1} samples'.format(
        nb_incorrect, nb_samples))

    print(data.classes)
Пример #38
0
        str_ans_corr = fmt % ans_corr
        str_ans_out = fmt % ans_out
        if str_ans_corr == str_ans_out:
            outcome = CORRECT
        else:
            print >> sys.stderr, "correctness FAILED: %s (correct is %s, output had %s)" % (ppinput(input_graph), str_ans_corr, str_ans_out)
            outcome = INCORRECT

    # log the result of the correctness check
    if rev is not None and run is not None:
        if ti is None:
            ti = __get_ti(input_graph)

        data = CorrResult(ti.dims, ti.min, ti.max, ti.num_verts, ti.num_edges, ti.seed, rev, run, outcome)
        try:
            DataSet.add_data_to_log_file(data)
            print 'logged correctness result to ' + data.get_path()
        except DataError, e:
            fmt = "Unable to log result to file %s (correct is %s, output had %s): %s"
            print >> sys.stderr, fmt % (ppinput(input_graph), str_ans_corr, str_ans_out, e)

    return outcome

def main(argv=sys.argv[1:]):
    usage = """usage: %prog [options] INPUT_GRAPH OUTPUT_TO_CHECK
Checks the validity of an MST.  Exits with code 0 on success.  Otherwise, it
prints an error message and exits with a non-zero code.  Does not log the result."""
    parser = OptionParser(usage)
    parser.add_option("-f", "--force-recompute",
                      action="store_true", default=False,
                      help="recomputes the MST weight with the checker even if we have a cached value")
Пример #39
0
extract all 101 classes. For instance, set class_limit = 8 to just
extract features for the first 8 (alphabetical) classes in the dataset.
Then set the same number when training models.
"""
import numpy as np
import os.path
from data import DataSet
from extractor import Extractor
from tqdm import tqdm

# Set defaults.
seq_length = 40
class_limit = None  # Number of classes to extract. Can be 1-101 or None for all.

# Get the dataset.
data = DataSet(seq_length=seq_length, class_limit=class_limit)

# get the model.
model = Extractor()

# Loop through data.
pbar = tqdm(total=len(data.data))
for video in data.data:

    # Get the path to the sequence for this video.
    path = os.path.join('data', 'sequences', video[2] + '-' + str(seq_length) + \
        '-features')  # numpy will auto-append .npy

    # Check if we already have it.
    if os.path.isfile(path + '.npy'):
        pbar.update(1)
def train(data_type, seq_length, model, saved_model=None,
          class_limit=None, image_shape=None,
          load_to_memory=False, batch_size=32, nb_epoch=100):
    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(
            X,
            y,
            batch_size=batch_size,
            validation_data=(X_test, y_test),
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger],
            epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
Пример #41
0
        get_results_for_rev = lambda _ : DataSet.read_from_file(WeightResult, WeightResult.get_path_to(wtype))
        collect_missing_data = collect_missing_weight_data
    elif options.dims > 0 or options.edge:
        parser.error('-v is required whenever -d or -e is used')

    # handle -i, -l: collect data for a particular graph(s)
    if options.input_graph and options.inputs_list_file:
        parser.error('-i and -l are mutually exclusive')
    if options.input_graph is not None:
        try:
            i = extract_input_footer(options.input_graph)
        except ExtractInputFooterError, e:
            parser.error(e)
        input_solns = DataSet({0:InputSolution(i.prec,i.dims,i.min,i.max,i.num_verts,i.num_edges,i.seed)})
    elif options.inputs_list_file is not None:
        input_solns = DataSet.read_from_file(InputSolution, options.inputs_list_file)

    # prepare for a correctness data collection
    if options.correctness:
        num_on += 1
        get_results_for_rev = lambda rev : DataSet.read_from_file(CorrResult, CorrResult.get_path_to(rev))
        options.inputs_list_file_arg = '' if options.inputs_list_file is None else ' -l ' + options.inputs_list_file
        collect_missing_data = lambda w,x,y,z: collect_missing_correctness_data(w,x,y,z,options.inputs_list_file_arg)

    # make sure no more than 1 type of data collection was specified
    if num_on > 1:
        parser.error('at most one of -c, -d, and -e may be specified')
    elif num_on == 0:
        # prepare for a performance data collection (default if nothing else is specified)
        get_results_for_rev = lambda rev : DataSet.read_from_file(PerfResult, PerfResult.get_path_to(rev))
        collect_missing_data = collect_missing_performance_data
from subprocess import call
import matplotlib.pyplot as plt

if len(sys.argv) == 1:
    print("No args... exiting")
    exit()

fname_ext = os.path.basename(sys.argv[1])
fname = fname_ext.split('.')[0]

call([
    "ffmpeg", "-i", sys.argv[1],
    os.path.join('data/test_vid', fname + '-%04d.jpg')
])

data = DataSet(seq_length=40, class_limit=8)
frames = sorted(glob.glob(os.path.join('data/test_vid', fname + '*jpg')))
frames = data.rescale_list(frames, 40)

sequence = []
model = Extractor()  #This uses inception cnn model

for image in frames:
    features = model.extract(image)
    sequence.append(features)
np.save('data/test_vid/', sequence)

saved_model = 'data/checkpoints/lstm-features.008-0.105.hdf5'  #lstm custom model which is generated by training
model = load_model(saved_model)
prediction = model.predict(np.expand_dims(sequence, axis=0))
# to predict the class of input data
Пример #43
0
def gather_perf_data(alg, rev, index, latest):
    """Gathers performance data for a single revision of an algorithm"""
    print 'gathering perf data for %s (rev=%s index=%u latest=%s)' % (alg, rev, index, str(latest))

    # get the results
    results = {} # maps (|V|, |E|) to ResultAccumulator
    ds = DataSet.read_from_file(PerfResult, PerfResult.get_path_to(rev))
    for data in ds.dataset.values():
        key = (data.input().num_verts, data.input().num_edges)
        result = results.get(key)
        if result is None:
            result = ResultAccumulator(data.time_sec)
            result.defaultCI = DEFAULT_CI
            results[key] = result
        else:
            result.add_data(data.time_sec)

    # put the results in order
    keys_density = results.keys()
    keys_density.sort(density_compare)
    keys_pom = results.keys()
    keys_pom.sort(pom_compare)
    keys = {}
    keys['density'] = keys_density
    keys['pom'] = keys_pom

    # compute stats for all the results
    for num_verts in results.keys():
        results[num_verts].compute_stats()

    # generate dat files for each x-axis cross important vertex counts
    for xaxis in keys:
        if xaxis == 'pom':
            computex = lambda v, e : get_percent_of_max(v, e)
        elif xaxis == 'density':
            computex = lambda v, e : get_density(v, e)
        else:
            print >> sys.stderr, "unexpected x-axis value: " + str(xaxis)
            sys.exit(-1)
        header_txt = '#|V|\t|E|\t' + xaxis + '\tLower\tAverage\tUpper\t#Runs  (Lower/Upper from ' + str(DEFAULT_CI) + '% CI)'

        for vip in IMPORTANT_VERTS:
            # open a file to output to
            dat = get_output_dat_name(xaxis, alg, rev, index, vip)
            print 'creating ' + dat
            if latest:
                latest_fn = make_latest(xaxis, alg, rev, index, vip)
            try:
                fh = open(dat, 'w')

                # compute relevant stats and output them
                print >> fh, header_txt
                count = 0
                for (v, e) in keys[xaxis]:
                    if vip=='all' or vip==v:
                        count += 1
                        r = results[(v, e)]
                        x = computex(v, e)
                        print >> fh, '%u\t%u\t%.6f\t%.3f\t%.3f\t%.3f\t%u' % (v, e, x, r.lower99, r.mean, r.upper99, len(r.values))
                fh.close()

                # don't create empty files
                if count == 0:
                    quiet_remove(dat)
                    if latest:
                        quiet_remove(latest_fn)

            except IOError, e:
                print sys.stderr, "failed to write file: " + str(e)
                return -1
Пример #44
0
class Scrublet():
    def __init__(self,
                 counts_matrix,
                 stat_filename,
                 total_counts=None,
                 sim_doublet_ratio=2.0,
                 expected_doublet_rate=0.1,
                 stdev_doublet_rate=0.02,
                 random_state=0,
                 max_iter=20,
                 sample_rate=0.5,
                 learn_rate=0.7,
                 max_depth=1,
                 split_points=1000,
                 p2u_pro=0.1,
                 train_rate=0.7):

        if not scipy.sparse.issparse(counts_matrix):
            counts_matrix = scipy.sparse.csc_matrix(counts_matrix)
        elif not scipy.sparse.isspmatrix_csc(counts_matrix):
            counts_matrix = counts_matrix.tocsc()

        self.counts_matrix_d = DataSet(counts_matrix)
        self.counts_matrix_d.describe()

        # initialize counts matrices
        self._E_obs = counts_matrix
        self._E_sim = None
        self._E_obs_norm = None
        self._E_sim_norm = None
        self.max_iter = max_iter
        self.sample_rate = sample_rate
        self.learn_rate = learn_rate
        self.max_depth = max_depth
        self.split_points = split_points
        self.p2u_pro = p2u_pro
        self.train_rate = train_rate
        self.stat_filename = stat_filename

        if total_counts is None:
            self._total_counts_obs = self._E_obs.sum(1).A.squeeze()
        else:
            self._total_counts_obs = total_counts

        self._gene_filter = np.arange(self._E_obs.shape[1])
        self._embeddings = {}

        self.sim_doublet_ratio = sim_doublet_ratio
        self.expected_doublet_rate = expected_doublet_rate
        self.stdev_doublet_rate = stdev_doublet_rate
        self.random_state = random_state

    ######## Core Scrublet functions ########

    def scrub_doublets(self,
                       synthetic_doublet_umi_subsampling=1.0,
                       min_counts=3,
                       min_cells=3,
                       min_gene_variability_pctl=85,
                       log_transform=False,
                       mean_center=True,
                       normalize_variance=True,
                       n_prin_comps=30,
                       verbose=True):
        t0 = time.time()

        self._E_sim = None
        self._E_obs_norm = None
        self._E_sim_norm = None
        self._gene_filter = np.arange(self._E_obs.shape[1])

        print_optional('Preprocessing...', verbose)
        pipeline_normalize(self)
        pipeline_get_gene_filter(
            self,
            min_counts=min_counts,
            min_cells=min_cells,
            min_gene_variability_pctl=min_gene_variability_pctl)
        pipeline_apply_gene_filter(self)

        print_optional('Simulating doublets...', verbose)
        self.simulate_doublets(
            sim_doublet_ratio=self.sim_doublet_ratio,
            synthetic_doublet_umi_subsampling=synthetic_doublet_umi_subsampling
        )
        pipeline_normalize(self, postnorm_total=1e6)
        if log_transform:
            pipeline_log_transform(self)
        if mean_center and normalize_variance:
            pipeline_zscore(self)
        elif mean_center:
            pipeline_mean_center(self)
        elif normalize_variance:
            pipeline_normalize_variance(self)

        if mean_center:
            print_optional('Embedding transcriptomes using PCA...', verbose)
            pipeline_pca(self,
                         n_prin_comps=n_prin_comps,
                         random_state=self.random_state)
        else:
            print_optional('Embedding transcriptomes using Truncated SVD...',
                           verbose)
            pipeline_truncated_svd(self,
                                   n_prin_comps=n_prin_comps,
                                   random_state=self.random_state)

        t1 = time.time()
        print_optional('Elapsed time: {:.1f} seconds'.format(t1 - t0), verbose)

    def simulate_doublets(self,
                          sim_doublet_ratio=None,
                          synthetic_doublet_umi_subsampling=1.0):
        ''' Simulate doublets by adding the counts of random observed transcriptome pairs.

        Arguments
        ---------
        sim_doublet_ratio : float, optional (default: None)
            Number of doublets to simulate relative to the number of observed 
            transcriptomes. If `None`, self.sim_doublet_ratio is used.

        synthetic_doublet_umi_subsampling : float, optional (defuault: 1.0) 
            Rate for sampling UMIs when creating synthetic doublets. If 1.0, 
            each doublet is created by simply adding the UMIs from two randomly 
            sampled observed transcriptomes. For values less than 1, the 
            UMI counts are added and then randomly sampled at the specified
            rate.

        Sets
        ----
        doublet_parents_
        '''

        if sim_doublet_ratio is None:
            sim_doublet_ratio = self.sim_doublet_ratio
        else:
            self.sim_doublet_ratio = sim_doublet_ratio

        self.n_obs = self._E_obs.shape[0]
        self.n_sim = int(self.n_obs * sim_doublet_ratio)

        np.random.seed(self.random_state)
        pair_ix = np.random.randint(0, self.n_obs, size=(self.n_sim, 2))

        E1 = self._E_obs[pair_ix[:, 0], :]
        E2 = self._E_obs[pair_ix[:, 1], :]
        tots1 = self._total_counts_obs[pair_ix[:, 0]]
        tots2 = self._total_counts_obs[pair_ix[:, 1]]
        if synthetic_doublet_umi_subsampling < 1:
            self._E_sim, self._total_counts_sim = subsample_counts(
                E1 + E2,
                synthetic_doublet_umi_subsampling,
                tots1 + tots2,
                random_seed=self.random_state)
        else:
            self._E_sim = E1 + E2
            self._total_counts_sim = tots1 + tots2
        self.doublet_parents_ = pair_ix
        return

    def classifier(self, exp_doub_rate=0.1, stdev_doub_rate=0.03):

        stat_file = open(self.stat_filename, "w+", encoding='gbk')

        stat_file.write(
            "iteration\taverage_loss_in_train_data\tprediction_accuracy_on_test_data\taverage_loss_in_test "
            "data\n")
        doub_labels = np.concatenate(
            (np.zeros(self.n_obs, dtype=int), np.ones(self.n_sim, dtype=int)))

        model = Model(self.max_iter, self.sample_rate, self.learn_rate,
                      self.max_depth, self.split_points)
        train_data = self.counts_matrix_d.train_data_id(
            self.p2u_pro, self.train_rate)
        test_data = self.counts_matrix_d.test_data_id(self.p2u_pro,
                                                      self.train_rate)
        model.train(self.counts_matrix_d, train_data, stat_file, test_data)
        test_data_predict, x, y = model.test(self.counts_matrix_d, test_data)
        y_true = []
        for id in test_data:
            y_true.append(self.counts_matrix_d.get_instance(id)['label'])
        y_pred = test_data_predict
        y_pred = [int(id) for id in y_pred]
        print(y_pred)
        auc_score = roc_auc_score(y_true, y_pred)
        print('auc_score=', auc_score)
        stat_file.close()
Пример #45
0
def train(hidden_size, learning_rate, l2_regularization, n_disc,
          generated_mse_imbalance, generated_loss_imbalance,
          likelihood_imbalance):
    # train_set = np.load("../../Trajectory_generate/dataset_file/train_x_.npy").reshape(-1, 6, 60)
    # test_set = np.load("../../Trajectory_generate/dataset_file/test_x.npy").reshape(-1, 6, 60)
    # test_set = np.load("../../Trajectory_generate/dataset_file/validate_x_.npy").reshape(-1, 6, 60)

    train_set = np.load(
        '../../Trajectory_generate/dataset_file/HF_train_.npy').reshape(
            -1, 6, 30)
    # test_set = np.load('../../Trajectory_generate/dataset_file/HF_validate_.npy').reshape(-1, 6, 30)
    test_set = np.load(
        '../../Trajectory_generate/dataset_file/HF_test_.npy').reshape(
            -1, 6, 30)

    # train_set = np.load("../../Trajectory_generate/dataset_file/mimic_train_x_.npy").reshape(-1, 6, 37)
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_test_x_.npy").reshape(-1, 6, 37)
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_validate_.npy").reshape(-1, 6, 37)

    # sepsis mimic dataset
    # train_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_train.npy').reshape(-1, 13, 40)
    # test_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_test.npy').reshape(-1, 13, 40)
    # test_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_validate.npy').reshape(-1, 13, 40)

    previous_visit = 3
    predicted_visit = 3

    feature_dims = train_set.shape[2] - 1

    train_set = DataSet(train_set)
    train_set.epoch_completed = 0
    batch_size = 64
    epochs = 50

    # hidden_size = 2 ** (int(hidden_size))
    # learning_rate = 10 ** learning_rate
    # l2_regularization = 10 ** l2_regularization
    # n_disc = int(n_disc)
    # generated_mse_imbalance = 10 ** generated_mse_imbalance
    # generated_loss_imbalance = 10 ** generated_loss_imbalance
    # likelihood_imbalance = 10 ** likelihood_imbalance

    print('previous_visit---{}---predicted_visit----{}-'.format(
        previous_visit, predicted_visit))

    print(
        'hidden_size---{}---learning_rate---{}---l2_regularization---{}---n_disc---{}'
        'generated_mse_imbalance---{}---generated_loss_imbalance---{}---'
        'likelihood_imbalance---{}'.format(hidden_size, learning_rate,
                                           l2_regularization, n_disc,
                                           generated_mse_imbalance,
                                           generated_loss_imbalance,
                                           likelihood_imbalance))
    encode_share = Encoder(hidden_size=hidden_size)
    decoder_share = Decoder(hidden_size=hidden_size, feature_dims=feature_dims)
    hawkes_process = HawkesProcess()
    discriminator = Discriminator(previous_visit=previous_visit,
                                  predicted_visit=predicted_visit,
                                  hidden_size=hidden_size)

    logged = set()
    max_loss = 0.001
    max_pace = 0.0001
    count = 0
    loss = 0
    optimizer_generation = tf.keras.optimizers.RMSprop(
        learning_rate=learning_rate)
    optimizer_discriminator = tf.keras.optimizers.RMSprop(
        learning_rate=learning_rate)
    cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

    while train_set.epoch_completed < epochs:
        input_train = train_set.next_batch(batch_size=batch_size)
        input_x_train = tf.cast(input_train[:, :, 1:], tf.float32)
        input_t_train = tf.cast(input_train[:, :, 0], tf.float32)
        batch = input_train.shape[0]

        with tf.GradientTape() as gen_tape, tf.GradientTape(
                persistent=True) as disc_tape:
            generated_trajectory = tf.zeros(shape=[batch, 0, feature_dims])
            probability_likelihood = tf.zeros(shape=[batch, 0, 1])
            for predicted_visit_ in range(predicted_visit):
                sequence_last_time = input_x_train[:, previous_visit +
                                                   predicted_visit_ - 1, :]
                for previous_visit_ in range(previous_visit +
                                             predicted_visit_):
                    sequence_time = input_x_train[:, previous_visit_, :]
                    if previous_visit_ == 0:
                        encode_c = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))
                        encode_h = tf.Variable(
                            tf.zeros(shape=[batch, hidden_size]))

                    encode_c, encode_h = encode_share(
                        [sequence_time, encode_c, encode_h])
                context_state = encode_h

                if predicted_visit_ == 0:
                    decode_c = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))
                    decode_h = tf.Variable(
                        tf.zeros(shape=[batch, hidden_size]))

                current_time_index_shape = tf.ones(
                    shape=[previous_visit + predicted_visit_])
                intensity_value, likelihood = hawkes_process(
                    [input_t_train, current_time_index_shape])
                probability_likelihood = tf.concat(
                    (probability_likelihood,
                     tf.reshape(likelihood, [batch, -1, 1])),
                    axis=1)

                generated_next_visit, decode_c, decode_h = decoder_share([
                    sequence_last_time, context_state, decode_c,
                    decode_h * intensity_value
                ])
                generated_trajectory = tf.concat(
                    (generated_trajectory,
                     tf.reshape(generated_next_visit,
                                [batch, -1, feature_dims])),
                    axis=1)

            d_real_pre_, d_fake_pre_ = discriminator(input_x_train,
                                                     generated_trajectory)
            d_real_pre_loss = cross_entropy(tf.ones_like(d_real_pre_),
                                            d_real_pre_)
            d_fake_pre_loss = cross_entropy(tf.zeros_like(d_fake_pre_),
                                            d_fake_pre_)
            d_loss = d_real_pre_loss + d_fake_pre_loss

            gen_loss = cross_entropy(tf.ones_like(d_fake_pre_), d_fake_pre_)
            generated_mse_loss = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :], generated_trajectory))

            likelihood_loss = tf.reduce_mean(probability_likelihood)

            loss += generated_mse_loss * generated_mse_imbalance + likelihood_loss * likelihood_imbalance + \
                    gen_loss * generated_loss_imbalance

            for weight in discriminator.trainable_variables:
                d_loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            variables = [var for var in encode_share.trainable_variables]
            for weight in encode_share.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            for weight in decoder_share.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

            for weight in hawkes_process.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)
                variables.append(weight)

        for disc in range(n_disc):
            gradient_disc = disc_tape.gradient(
                d_loss, discriminator.trainable_variables)
            optimizer_discriminator.apply_gradients(
                zip(gradient_disc, discriminator.trainable_variables))

        gradient_gen = gen_tape.gradient(loss, variables)
        optimizer_generation.apply_gradients(zip(gradient_gen, variables))

        if train_set.epoch_completed % 1 == 0 and train_set.epoch_completed not in logged:
            logged.add(train_set.epoch_completed)
            loss_pre = generated_mse_loss

            mse_generated = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_train[:, previous_visit:previous_visit +
                                  predicted_visit, :], generated_trajectory))

            loss_diff = loss_pre - mse_generated

            if mse_generated > max_loss:
                count = 0
            else:
                if loss_diff > max_pace:
                    count = 0
                else:
                    count += 1
            if count > 9:
                break

            input_x_test = tf.cast(test_set[:, :, 1:], tf.float32)
            input_t_test = tf.cast(test_set[:, :, 0], tf.float32)

            batch_test = test_set.shape[0]
            generated_trajectory_test = tf.zeros(
                shape=[batch_test, 0, feature_dims])
            for predicted_visit_ in range(predicted_visit):
                for previous_visit_ in range(previous_visit):
                    sequence_time_test = input_x_test[:, previous_visit_, :]
                    if previous_visit_ == 0:
                        encode_c_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))
                        encode_h_test = tf.Variable(
                            tf.zeros(shape=[batch_test, hidden_size]))

                    encode_c_test, encode_h_test = encode_share(
                        [sequence_time_test, encode_c_test, encode_h_test])

                if predicted_visit_ != 0:
                    for i in range(predicted_visit_):
                        encode_c_test, encode_h_test = encode_share([
                            generated_trajectory_test[:, i, :], encode_c_test,
                            encode_h_test
                        ])

                context_state_test = encode_h_test

                if predicted_visit_ == 0:
                    decode_c_test = tf.Variable(
                        tf.zeros(shape=[batch_test, hidden_size]))
                    decode_h_test = tf.Variable(
                        tf.zeros(shape=[batch_test, hidden_size]))
                    sequence_last_time_test = input_x_test[:, previous_visit +
                                                           predicted_visit_ -
                                                           1, :]

                current_time_index_shape = tf.ones(
                    [previous_visit + predicted_visit_])
                intensity_value, likelihood = hawkes_process(
                    [input_t_test, current_time_index_shape])
                generated_next_visit, decode_c_test, decode_h_test = decoder_share(
                    [
                        sequence_last_time_test, context_state_test,
                        decode_c_test, decode_h_test * intensity_value
                    ])
                generated_trajectory_test = tf.concat(
                    (generated_trajectory_test,
                     tf.reshape(generated_next_visit,
                                [batch_test, -1, feature_dims])),
                    axis=1)
                sequence_last_time_test = generated_next_visit

            mse_generated_test = tf.reduce_mean(
                tf.keras.losses.mse(
                    input_x_test[:, previous_visit:previous_visit +
                                 predicted_visit, :],
                    generated_trajectory_test))
            mae_generated_test = tf.reduce_mean(
                tf.keras.losses.mae(
                    input_x_test[:, previous_visit:previous_visit +
                                 predicted_visit, :],
                    generated_trajectory_test))

            r_value_all = []
            for patient in range(batch_test):
                r_value = 0.0
                for feature in range(feature_dims):
                    x_ = input_x_test[patient, previous_visit:,
                                      feature].numpy().reshape(
                                          predicted_visit, 1)
                    y_ = generated_trajectory_test[patient, :,
                                                   feature].numpy().reshape(
                                                       predicted_visit, 1)
                    r_value += DynamicTimeWarping(x_, y_)
                r_value_all.append(r_value / 29.0)

            print(
                '------epoch{}------mse_loss{}----mae_loss{}------predicted_r_value---{}--'
                '-count  {}'.format(train_set.epoch_completed,
                                    mse_generated_test, mae_generated_test,
                                    np.mean(r_value_all), count))

            # r_value_all = []
            # p_value_all = []
            # r_value_spearman = []
            # r_value_kendalltau = []
            # for visit in range(predicted_visit):
            #     for feature in range(feature_dims):
            #         x_ = input_x_test[:, previous_visit+visit, feature]
            #         y_ = generated_trajectory_test[:, visit, feature]
            #         r_value_ = stats.pearsonr(x_, y_)
            #         r_value_spearman_ = stats.spearmanr(x_, y_)
            #         r_value_kendalltau_ = stats.kendalltau(x_, y_)
            #         if not np.isnan(r_value_[0]):
            #             r_value_all.append(np.abs(r_value_[0]))
            #             p_value_all.append(np.abs(r_value_[1]))
            #         if not np.isnan(r_value_spearman_[0]):
            #             r_value_spearman.append(np.abs(r_value_spearman_[0]))
            #         if not np.isnan(r_value_kendalltau_[0]):
            #             r_value_kendalltau.append(np.abs(r_value_kendalltau_[0]))
            # print('------epoch{}------mse_loss{}----mae_loss{}------predicted_r_value---{}--'
            #       'r_value_spearman---{}---r_value_kendalltau---{}--count  {}'.format(train_set.epoch_completed,
            #                                                                           mse_generated_test,

# 																		  mae_generated_test,
#                                                                           np.mean(r_value_all),
#                                                                           np.mean(r_value_spearman),
#                                                                           np.mean(r_value_kendalltau),
#                                                                           count))

    tf.compat.v1.reset_default_graph()
    return mse_generated_test, mae_generated_test, np.mean(r_value_all)
def train(hidden_size, l2_regularization, learning_rate, generated_imbalance, likelihood_imbalance):
    train_set = np.load("../../Trajectory_generate/dataset_file/HF_train_.npy").reshape(-1, 6, 30)
    test_set = np.load("../../Trajectory_generate/dataset_file/HF_test_.npy").reshape(-1, 6, 30)
    # test_set = np.load("../../Trajectory_generate/dataset_file/HF_validate_.npy").reshape(-1, 6, 30)

    # train_set = np.load("../../Trajectory_generate/dataset_file/mimic_train_x_.npy").reshape(-1, 6, 37)
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_test_x_.npy").reshape(-1, 6, 37)
    # test_set = np.load("../../Trajectory_generate/dataset_file/mimic_validate_.npy").reshape(-1, 6, 37)

    # sepsis mimic dataset
    # train_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_train.npy').reshape(-1, 13, 40)
    # test_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_test.npy').reshape(-1, 13, 40)
    # test_set = np.load('../../Trajectory_generate/dataset_file/sepsis_mimic_validate.npy').reshape(-1, 13, 40)

    previous_visit = 3
    predicted_visit = 3

    feature_dims = train_set.shape[2] - 1

    train_set = DataSet(train_set)
    train_set.epoch_completed = 0
    batch_size = 64
    epochs = 50

    # hidden_size = 2 ** (int(hidden_size))
    # learning_rate = 10 ** learning_rate
    # l2_regularization = 10 ** l2_regularization
    # generated_imbalance = 10 ** generated_imbalance
    # likelihood_imbalance = 10 ** likelihood_imbalance

    print('previous_visit---{}---predicted_visit----{}-'.format(previous_visit, predicted_visit))

    print('hidden_size----{}---'
          'l2_regularization---{}---'
          'learning_rate---{}---'
          'generated_imbalance---{}---'
          'likelihood_imbalance---{}'.
          format(hidden_size, l2_regularization, learning_rate,
                 generated_imbalance, likelihood_imbalance))

    decoder_share = Decoder(hidden_size=hidden_size, feature_dims=feature_dims)
    encode_share = Encoder(hidden_size=hidden_size)
    hawkes_process = HawkesProcess()

    logged = set()
    max_loss = 0.01
    max_pace = 0.001
    loss = 0

    count = 0
    optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate)

    while train_set.epoch_completed < epochs:
        input_train = train_set.next_batch(batch_size=batch_size)
        batch = input_train.shape[0]
        input_x_train = tf.cast(input_train[:, :, 1:], tf.float32)
        input_t_train = tf.cast(input_train[:, :, 0], tf.float32)

        with tf.GradientTape() as tape:
            predicted_trajectory = tf.zeros(shape=[batch, 0, feature_dims])
            likelihood_all = tf.zeros(shape=[batch, 0, 1])
            for predicted_visit_ in range(predicted_visit):
                sequence_time_last_time = input_x_train[:, previous_visit+predicted_visit_-1, :]
                for previous_visit_ in range(previous_visit+predicted_visit_):
                    sequence_time = input_x_train[:, previous_visit_, :]
                    if previous_visit_ == 0:
                        encode_c = tf.Variable(tf.zeros(shape=[batch, hidden_size]))
                        encode_h = tf.Variable(tf.zeros(shape=[batch, hidden_size]))

                    encode_c, encode_h = encode_share([sequence_time, encode_c, encode_h])
                context_state = encode_h

                if predicted_visit_ == 0:
                    decode_c = tf.Variable(tf.zeros(shape=[batch, hidden_size]))
                    decode_h = tf.Variable(tf.zeros(shape=[batch, hidden_size]))
                current_time_index_shape = tf.ones(shape=[predicted_visit_+previous_visit])
                condition_intensity, likelihood = hawkes_process([input_t_train, current_time_index_shape])
                likelihood_all = tf.concat((likelihood_all, tf.reshape(likelihood, [batch, -1, 1])), axis=1)
                generated_next_visit, decode_c, decode_h = decoder_share([sequence_time_last_time, context_state, decode_c, decode_h*condition_intensity])
                predicted_trajectory = tf.concat((predicted_trajectory, tf.reshape(generated_next_visit, [batch, -1, feature_dims])), axis=1)

            mse_generated_loss = tf.reduce_mean(tf.keras.losses.mse(input_x_train[:, previous_visit:previous_visit+predicted_visit, :], predicted_trajectory))
            mae_generated_loss = tf.reduce_mean(tf.keras.losses.mae(input_x_train[:, previous_visit:previous_visit+predicted_visit, :], predicted_trajectory))
            likelihood_loss = tf.reduce_mean(likelihood_all)

            loss += mse_generated_loss * generated_imbalance + likelihood_loss * likelihood_imbalance

            variables = [var for var in encode_share.trainable_variables]
            for weight in encode_share.trainable_variables:
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            for weight in decoder_share.trainable_variables:
                variables.append(weight)
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            for weight in hawkes_process.trainable_variables:
                variables.append(weight)
                loss += tf.keras.regularizers.l2(l2_regularization)(weight)

            gradient = tape.gradient(loss, variables)
            optimizer.apply_gradients(zip(gradient, variables))

            if train_set.epoch_completed % 1 == 0 and train_set.epoch_completed not in logged:
                logged.add(train_set.epoch_completed)

                loss_pre = mse_generated_loss
                mse_generated_loss = tf.reduce_mean(
                    tf.keras.losses.mse(input_x_train[:, previous_visit:previous_visit + predicted_visit, :],
                                        predicted_trajectory))

                loss_diff = loss_pre - mse_generated_loss

                if max_loss < mse_generated_loss:
                    count = 0
                else:
                    if max_pace < loss_diff:
                        count = 0

                    else:
                        count += 1
                if count > 9:
                    break

                input_x_test = tf.cast(test_set[:, :, 1:], tf.float32)
                input_t_test = tf.cast(test_set[:, :, 0], tf.float32)

                batch_test = input_x_test.shape[0]
                predicted_trajectory_test = tf.zeros(shape=[batch_test, 0, feature_dims])
                for predicted_visit_ in range(predicted_visit):
                    for previous_visit_ in range(previous_visit):
                        sequence_time_test = input_x_test[:, previous_visit_, :]
                        if previous_visit_ == 0:
                            encode_c_test = tf.Variable(tf.zeros(shape=[batch_test, hidden_size]))
                            encode_h_test = tf.Variable(tf.zeros(shape=[batch_test, hidden_size]))
                        encode_c_test, encode_h_test = encode_share([sequence_time_test, encode_c_test, encode_h_test])

                    if predicted_visit_ != 0:
                        for i in range(predicted_visit_):
                            encode_c, encode_h_test = encode_share([predicted_trajectory_test[:, i, :], encode_c_test, encode_h_test])
                    context_state_test = encode_h_test

                    if predicted_visit_ == 0:
                        decode_c_test = tf.Variable(tf.zeros(shape=[batch_test, hidden_size]))
                        decode_h_test = tf.Variable(tf.zeros(shape=[batch_test, hidden_size]))
                        sequence_time_last_time_test = input_x_test[:, predicted_visit_+previous_visit-1, :]

                    current_time_index_shape_test = tf.ones(shape=[previous_visit+predicted_visit_])
                    condition_intensity_test, likelihood_test = hawkes_process([input_t_test, current_time_index_shape_test])

                    sequence_next_visit_test, decode_c_test, decode_h_test = decoder_share([sequence_time_last_time_test, context_state_test, decode_c_test, decode_h_test*condition_intensity_test])
                    predicted_trajectory_test = tf.concat((predicted_trajectory_test, tf.reshape(sequence_next_visit_test, [batch_test, -1, feature_dims])), axis=1)
                    sequence_time_last_time_test = sequence_next_visit_test

                mse_generated_loss_test = tf.reduce_mean(tf.keras.losses.mse(input_x_test[:, previous_visit:previous_visit+predicted_visit, :], predicted_trajectory_test))
                mae_generated_loss_test = tf.reduce_mean(tf.keras.losses.mae(input_x_test[:, previous_visit:previous_visit+predicted_visit, :], predicted_trajectory_test))

                r_value_all = []
                for patient in range(batch_test):
                    r_value = 0.0
                    for feature in range(feature_dims):
                        x_ = input_x_test[patient, previous_visit:, feature].numpy().reshape(predicted_visit, 1)
                        y_ = predicted_trajectory_test[patient, :, feature].numpy().reshape(predicted_visit, 1)
                        r_value += DynamicTimeWarping(x_, y_)
                    r_value_all.append(r_value / 29.0)
                print("epoch  {}---train_mse_generate {}- - "
                      "mae_generated_loss--{}--test_mse {}--test_mae  "
                      "{}--r_value {}-count {}".format(train_set.epoch_completed,
                                                       mse_generated_loss,
                                                       mae_generated_loss,
                                                       mse_generated_loss_test,
                                                       mae_generated_loss_test,
                                                       np.mean(r_value_all),
                                                       count))


                # r_value_all = []
                # p_value_all = []
                # r_value_spearman_all = []
                # r_value_kendall_all = []
                # for visit in range(predicted_visit):
                #     for feature in range(feature_dims):
                #         x_ = input_x_test[:, previous_visit+visit, feature]
                #         y_ = predicted_trajectory_test[:, visit, feature]
                #         r_value_ = stats.pearsonr(x_, y_)
                #         r_value_spearman = stats.spearmanr(x_, y_)
                #         r_value_kendall = stats.kendalltau(x_, y_)
                #         if not np.isnan(r_value_[0]):
                #             r_value_all.append(np.abs(r_value_[0]))
                #             p_value_all.append(np.abs(r_value_[1]))
                #         if not np.isnan(r_value_spearman[0]):
                #             r_value_spearman_all.append(np.abs(r_value_spearman[0]))
                #         if not np.isnan(r_value_kendall[0]):
                #             r_value_kendall_all.append(np.abs(r_value_kendall[0]))

                # print("epoch  {}---train_mse_generate {}- - "
                #       "mae_generated_loss--{}--test_mse {}--test_mae  "
                #       "{}----r_value {}--r_spearman---{}-"
                #       "r_kendall---{}    -count {}".format(train_set.epoch_completed,
                #                                            mse_generated_loss,
                #                                            mae_generated_loss,
                #                                            mse_generated_loss_test,
                #                                            mae_generated_loss_test,
                #                                            np.mean(r_value_all),
                #                                            np.mean(r_value_spearman_all),
                #                                            np.mean(r_value_kendall_all),
                #                                            count))
    tf.compat.v1.reset_default_graph()
    return mse_generated_loss_test, mae_generated_loss_test, np.mean(r_value_all)
Пример #47
0
Based on:
https://keras.io/preprocessing/image/
and
https://keras.io/applications/
"""
from keras.applications.inception_v3 import InceptionV3
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from data import DataSet
import os.path

data = DataSet()

# Helper: Save the model.
checkpointer = ModelCheckpoint(filepath=os.path.join(
    'data', 'checkpoints', 'inception.{epoch:03d}-{val_loss:.2f}.hdf5'),
                               verbose=1,
                               save_best_only=True)

# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=10)

# Helper: TensorBoard
tensorboard = TensorBoard(log_dir=os.path.join('data', 'logs'))


def get_generators():
Пример #48
0
    mst_weight = -1
    if options.dont_generate:
        print_if_not_quiet('graph not saved (as requested)')
    else:
        print_input_footer(num_verts, num_edges, about, out)
        print_if_not_quiet('graph saved to ' + ppinput(options.output_file))
        if out != sys.stdout:
            out.close()

        # generate output with correctness checker, if desired
        if options.correctness:
            if options.dont_track:
                print >> sys.stderr, "warning: skipping correctness output (only done when -t is not specified)"
                return 0
            try:
                mst_weight = compute_mst_weight(options.output_file)
            except CheckerError, e:
                print >> sys.stderr, e

    # record this new input in our input log
    if not options.dont_track:
        data = InputSolution(options.precision, dimensionality, min_val, max_val, num_verts, num_edges, __RND_SEED, mst_weight)
        path = data.get_path() if options.inputs_list_file is None else options.inputs_list_file
        DataSet.add_data_to_log_file(data, path)
        print_if_not_quiet('logged to ' + path)

    return 0

if __name__ == "__main__":
    sys.exit(main())