コード例 #1
0
ファイル: nature_replicate.py プロジェクト: wzkwzk123/new_drl
def init_network(input_shape, action_size, model):
    if model == 'nature':
        qnet = network(input_shape, action_size, 'qnet')
        tnet = network(input_shape, action_size, 'tnet')
        update_ops = update_target_graph('qnet', 'tnet')
    elif model == 'gated':
        sys.path.append('../prototype8/gated')
        sys.path.append('../prototype8/')
        from gated_regularized_qnetwork import gated_regularized_qnetwork_visual_input
        from utils import update_target_graph_vars
        qnet = gated_regularized_qnetwork_visual_input(input_shape,
                                                       action_size)
        tnet = None
        update_ops = update_target_graph_vars(qnet.qnet_vars, qnet.tnet_vars)
    return qnet, tnet, update_ops
コード例 #2
0
ファイル: model.py プロジェクト: Xiaohui9607/mmvp
    def __init__(self, opt):
        self.opt = opt
        self.device = self.opt.device
        print("use haptic: ", opt.use_haptic, "    use behavior: ",
              opt.use_behavior, "    use audio: ", opt.use_audio,
              "    use vibro: ", opt.use_vibro)
        train_dataloader, valid_dataloader = build_dataloader_CY101(opt)
        self.dataloader = {
            'train': train_dataloader,
            'valid': valid_dataloader
        }
        self.gen_images = []
        if not self.opt.use_haptic:
            self.opt.haptic_layer = 0

        if not self.opt.use_vibro:
            self.opt.vibro_layer = 0

        if not self.opt.use_audio:
            self.opt.audio_layer = 0

        if self.opt.baseline:
            self.net = baseline(self.opt, self.opt.channels, self.opt.height,
                                self.opt.width, -1, self.opt.schedsamp_k,
                                self.opt.num_masks, self.opt.model == 'STP',
                                self.opt.model == 'CDNA',
                                self.opt.model == 'DNA',
                                self.opt.context_frames)
        else:
            self.net = network(self.opt, self.opt.channels, self.opt.height,
                               self.opt.width, -1, self.opt.schedsamp_k,
                               self.opt.num_masks, self.opt.model == 'STP',
                               self.opt.model == 'CDNA',
                               self.opt.model == 'DNA',
                               self.opt.context_frames, self.opt.dna_kern_size,
                               self.opt.haptic_layer, self.opt.behavior_layer,
                               self.opt.audio_layer, self.opt.vibro_layer)

        self.net.to(self.device)
        self.mse_loss = nn.MSELoss()

        if self.opt.pretrained_model:
            self.load_weight()
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          self.opt.learning_rate,
                                          weight_decay=1e-4)
コード例 #3
0
ファイル: nmode_utils.py プロジェクト: reedessick/nmodelte
def load_log(filename, enforce_float=False):
    """
  loads system from pickled file
  """
    import networks
    import gmodes, pmodes
    from mode_selection import compute_wo

    network = networks.network()

    f = open(filename, "r")

    Porb = pickle.load(f)
    eccentricity = pickle.load(f)
    Mprim = pickle.load(f)
    Mcomp = pickle.load(f)
    Rprim = pickle.load(f)

    wo = compute_wo(Mprim, Rprim)

    for mode_tuple, mode_type in pickle.load(f):
        if mode_type == "generic":
            mode = networks.mode().from_tuple(mode_tuple)
        elif mode_type == "gmode":
            mode = gmodes.gmode().from_tuple(mode_tuple, wo=wo)
        elif mode_type == "pmode":
            mode = pmodes.pmode().from_tuple(mode_tuple)
        else:
            sys.exit("unknown mode_type : %s" % mode_type)
        network.modes.append(mode)

    network.K = pickle.load(f)
    if enforce_float:
        network.K = [[(i, j, float(k)) for i, j, k in coup] for coup in network.K]
    network._update()

    f.close()

    system = networks.system(Mprim, Mcomp, Rprim, Porb, eccentricity, net=network)

    return system
コード例 #4
0
    def __init__(self, opt):
        self.opt = opt
        self.device = self.opt.device

        train_dataloader, valid_dataloader = build_dataloader(opt)
        self.dataloader = {
            'train': train_dataloader,
            'valid': valid_dataloader
        }

        self.net = network(self.opt, self.opt.channels, self.opt.height,
                           self.opt.width)

        self.net.to(self.device)
        self.criterion = nn.CrossEntropyLoss()

        if self.opt.pretrained_model:
            self.load_weight()
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          self.opt.learning_rate,
                                          weight_decay=1e-4)
コード例 #5
0
    def __init__(self, opt):
        self.opt = opt
        self.device = self.opt.device

        train_dataloader, valid_dataloader = build_dataloader(opt)
        self.dataloader = {
            'train': train_dataloader,
            'valid': valid_dataloader
        }

        self.net = network(self.opt.channels, self.opt.height, self.opt.width,
                           -1, self.opt.schedsamp_k, self.opt.use_state,
                           self.opt.num_masks, self.opt.model == 'STP',
                           self.opt.model == 'CDNA', self.opt.model == 'DNA',
                           self.opt.context_frames)
        self.net.to(self.device)
        self.mse_loss = nn.MSELoss()
        self.w_state = 1e-4
        if self.opt.pretrained_model:
            self.load_weight()
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          self.opt.learning_rate)
def train():
    lr = tf.placeholder("float")
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    labels = tf.placeholder("float", [None, 4])
    is_training = tf.placeholder("bool")
    prediction = network(inputs, is_training)
    correct_prediction = tf.equal(tf.argmax(prediction, 1),
                                  tf.argmax(labels, 1))
    accurancy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    loss = -tf.reduce_sum(labels * tf.log(prediction + EPSILON)) + tf.add_n(
        [tf.nn.l2_loss(var)
         for var in tf.trainable_variables()]) * WEIGHT_DECAY
    Opt = tf.train.AdamOptimizer(lr).minimize(loss)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    data = sio.loadmat("./CV_1_801.mat")
    traindata = np.reshape(data["train"], [2400, 64, 64, 1]) / 127.5 - 1.0
    trainlabel = data["train_label"]
    testdata = np.reshape(data["test"], [800, 64, 64, 1]) / 127.5 - 1.0
    testlabel = data["test_label"]
    max_test_acc = 0
    loss_list = []
    acc_list = []
    for i in range(11000):
        batch_data, label_data = random_read_batch(traindata, trainlabel,
                                                   BATCH_SIZE)
        sess.run(Opt,
                 feed_dict={
                     inputs: batch_data,
                     labels: label_data,
                     is_training: True,
                     lr: LEARNING_RATE
                 })
        if i % 20 == 0:
            [LOSS, TRAIN_ACCURACY] = sess.run(
                [loss, accurancy],
                feed_dict={
                    inputs: batch_data,
                    labels: label_data,
                    is_training: False,
                    lr: LEARNING_RATE
                })
            loss_list.append(LOSS)
            TEST_ACCURACY = 0
            for j in range(16):
                TEST_ACCURACY += sess.run(accurancy,
                                          feed_dict={
                                              inputs:
                                              testdata[j * 50:j * 50 + 50],
                                              labels:
                                              testlabel[j * 50:j * 50 + 50],
                                              is_training: False,
                                              lr: LEARNING_RATE
                                          })
            TEST_ACCURACY /= 16
            acc_list.append(TEST_ACCURACY)
            if TEST_ACCURACY > max_test_acc:
                max_test_acc = TEST_ACCURACY
            print(
                "Step: %d, loss: %4g, training accuracy: %4g, testing accuracy: %4g, max testing accuracy: %4g"
                % (i, LOSS, TRAIN_ACCURACY, TEST_ACCURACY, max_test_acc))
        if i % 1000 == 0:
            np.savetxt("loss_list.txt", loss_list)
            np.savetxt("acc_list.txt", acc_list)
コード例 #7
0
def compute_score(img_path,
                  img_360=False,
                  mask_only=True,
                  object_map_only=True):
    height, width = get_dimension(img_path)
    object_map = np.zeros((height, width))

    # --- Object detection: SSD/retina_net_coco/Mask_RCNN ---
    network_to_use = 'Mask_RCNN'
    payload = {'key1': img_path}
    if network_to_use == 'SSD':
        current_network = network(8001, 'SSD_network.py')
        current_network.start_network()
        r = requests.get('http://localhost:' + str(current_network.port) +
                         '/compute',
                         params=payload)
        print('Network used: SSD')
    elif network_to_use == 'retina_net_coco':
        current_network = network(8009, 'retina_net_coco_network.py')
        current_network.start_network()
        r = requests.get('http://localhost:' + str(current_network.port) +
                         '/compute',
                         params=payload)
        print('Network used: SSD')
    elif network_to_use == 'Mask_RCNN':
        current_network = network(8003, 'Mask_RCNN_network.py')
        current_network.start_network()
        r = requests.get('http://localhost:' + str(current_network.port) +
                         '/score',
                         params=payload)
        print('Network used: Mask_RCNN')
    else:
        print('ERROR: No network found')
        return

    label_obj = get_txt('label')
    coord_obj = get_txt('coord')

    if coord_obj and label_obj:
        coordinates_obj = vector2matrix(coord_obj, 4)
        print('coordinates obj', coordinates_obj)

        score_area_obj = get_score_area(coordinates_obj, img_path, 'object')
        print('score_area_obj', score_area_obj)

        if img_360:
            score_position_obj = get_score_position_360(
                coordinates_obj, img_path)
            print('score_position_obj_360', score_position_obj)
        else:
            score_position_obj = get_score_position(coordinates_obj, img_path)
            print('score_position_obj_reg', score_position_obj)

        score_classe_obj = get_score_classe(label_obj)
        print('classes_obj', label_obj)
        print('score_classes_obj', score_classe_obj)

        score_final = np.multiply(score_position_obj, score_area_obj)
        score_final = np.multiply(score_final, score_classe_obj)
        print('score_final_obj', score_final)

        object_map = attention_map(coordinates_obj, score_final, object_map)
        print('--- MAP (obj) --- : ', np.max(object_map))

    else:
        print('no object detected')

    current_network.stop_network()

    # --- Text detection: CTPN ---
    current_network.port, current_network.name = 8002, 'CTPN_network.py'
    current_network.start_network()

    payload = {'key1': img_path}
    r = requests.get('http://localhost:' + str(current_network.port) +
                     '/compute',
                     params=payload)
    coord_txt = get_txt('boxes')

    if coord_txt:
        score_classe = 1
        coordinates_txt = vector2matrix(coord_txt, 9)
        coordinates_txt = ctpn_convert(coordinates_txt)
        print('xmin, ymin, xmax, ymax', coordinates_txt)

        score_area_txt = get_score_area(coordinates_txt, img_path, 'text')
        print('score_area_txt', score_area_txt)
        if img_360:
            score_position_txt = get_score_position_360(
                coordinates_txt, img_path)
            print('score_position_txt', score_position_txt)
        else:
            score_position_txt = get_score_position(coordinates_txt, img_path)
            print('score_position_txt', score_position_txt)

        score_classe_txt = 1

        score_final = np.multiply(score_area_txt, score_position_txt)
        score_final = np.multiply(score_final, score_classe_txt)
        print('score_final_txt', score_final)
        object_map = attention_map(coordinates_txt, score_final, object_map)
        print('--- MAP (txt) --- : ', np.max(object_map))

    else:
        print('no text detected')

    current_network.stop_network()

    # --- Face detection: retina_net (faces) ---
    network_to_use = 'retina_net'
    if network_to_use == 'retina_net':
        current_network.port, current_network.name = 8008, 'retina_net_network.py'
        current_network.start_network()
    elif network_to_use == 'face_CV_network':
        current_network.port, current_network.name = 8006, 'face_CV_network.py'
        current_network.start_network()

    payload = {'key1': img_path}
    r = requests.get('http://localhost:' + str(current_network.port) +
                     '/compute',
                     params=payload)

    label_faces = get_txt('label')
    coord_faces = get_txt('coord')
    if label_faces and coord_faces:
        coordinates_faces = vector2matrix(coord_faces, 4)
        print('coordinates_faces', coordinates_faces)

        score_area_faces = get_score_area(coordinates_faces, img_path, 'face')
        print('score_area_faces', score_area_faces)

        if img_360:
            score_position_faces = get_score_position_360(
                coordinates_faces, img_path)
            print('score_position_faces', score_position_faces)
        else:
            score_position_faces = get_score_position(coordinates_faces,
                                                      img_path)
            print('score_position_faces', score_position_faces)

        score_classe_faces = get_score_classe(label_faces)
        print('classes_faces', label_faces)
        print('score_classes_faces', score_classe_faces)

        score_final = np.multiply(score_area_faces, score_position_faces)
        score_final = np.multiply(score_final, score_classe_faces)
        object_map = attention_map(coordinates_faces, score_final, object_map)
        print('--- MAP (faces) --- : ', np.max(object_map))

    current_network.stop_network()

    output_path = os.path.join('output', 'object_map.jpg')
    object_map = object_map / np.max(object_map)  #normalised between 0 and 1

    basic_map = get_basic_map(height, width)
    print('basic_map_shape : ', basic_map.shape)

    if object_map_only:
        final_map = object_map
    else:
        final_map = get_final_map(height, width, basic_map, object_map)

    if mask_only:
        final_map = 255 * final_map
        cv2.imwrite(output_path, final_map)
    else:
        temp = np.repeat(final_map[:, :, np.newaxis], 3, axis=2)
        initial_img = cv2.imread(img_path)
        output_img = np.multiply(temp, initial_img)
        cv2.imwrite(output_path, output_img)

    return (output_path)
def compute_classifying(img_path):
    width, height = get_dimension(img_path)

    pos = []
    classe = []

    # --- Object detection: SSD/retina_net_coco/Mask_RCNN ---
    network_to_use = 'SSD'
    payload = {'key1': img_path}
    if network_to_use == 'SSD':
        current_network = network(8001, 'SSD_network.py')
        current_network.start_network()
        r = requests.get('http://localhost:' + str(current_network.port) +
                         '/compute',
                         params=payload)
        print('Network used: SSD')
    elif network_to_use == 'retina_net_coco':
        current_network = network(8009, 'retina_net_coco_network.py')
        current_network.start_network()
        r = requests.get('http://localhost:' + str(current_network.port) +
                         '/compute',
                         params=payload)
        print('Network used: SSD')
    elif network_to_use == 'Mask_RCNN':
        current_network = network(8003, 'Mask_RCNN_network.py')
        current_network.start_network()
        r = requests.get('http://localhost:' + str(current_network.port) +
                         '/score',
                         params=payload)
        print('Network used: Mask_RCNN')
    else:
        print('ERROR: No network found')
        return

    label_obj = get_txt('label')
    coord_obj = get_txt('coord')

    if coord_obj and label_obj:
        coordinates_obj = vector2matrix(coord_obj, 4)
        print('coordinates obj', coordinates_obj)

        pos.extend(get_position(coordinates_obj, height, width))
        classe.extend(label_obj)

    else:
        print('no object detected')

    current_network.stop_network()

    # --- Text detection: CTPN ---
    current_network.port, current_network.name = 8002, 'CTPN_network.py'
    current_network.start_network()

    payload = {'key1': img_path}
    r = requests.get('http://localhost:' + str(current_network.port) +
                     '/compute',
                     params=payload)
    coord_txt = get_txt('boxes')

    if coord_txt:
        coordinates_txt = vector2matrix(coord_txt, 9)
        coordinates_txt = ctpn_convert(coordinates_txt)

        for i in range(coordinates_txt.shape[0]):
            classe.append('text')

        pos.extend(get_position(coordinates_txt, height, width))

    else:
        print('no text detected')

    current_network.stop_network()

    # --- Face detection: retina_net (faces) ---
    network_to_use = 'face_CV_network'
    if network_to_use == 'retina_net':
        current_network.port, current_network.name = 8008, 'retina_net_network.py'
        current_network.start_network()
    elif network_to_use == 'face_CV_network':
        current_network.port, current_network.name = 8006, 'face_CV_network.py'
        current_network.start_network()

    payload = {'key1': img_path}
    r = requests.get('http://localhost:' + str(current_network.port) +
                     '/compute',
                     params=payload)

    coord_faces = get_txt('coord')

    if coord_faces:
        coordinates_faces = vector2matrix(coord_faces, 4)
        print('coordinates_faces', coordinates_faces)

        for i in range(coordinates_faces.shape[0]):
            classe.append('face')

        pos.extend(get_position(coordinates_faces, height, width))

    current_network.stop_network()

    # Assembly and count same boxes

    final_list = assembly(classe, pos)

    # Convert to text file
    final_list = csv_to_text(final_list)

    return (final_list)
コード例 #9
0
from __future__ import print_function

from flask import Flask, redirect, request, url_for, send_file
from redis import Redis
import re
import os
#from import_unimport_function import import_unimport, compute_img
from werkzeug.utils import secure_filename
from networks import network
import_state = network(0, 'None')

#8001 : SSD
#8002 : CTPN
#8003 : Mask_RCNN
#8004 : face_detector
#8005 : face_landmark_detection
#8006 : face_CV_network
#8007 : score
#8007 : score_360 (same port)
#8008 : retina_net (faces)
#8009 : retina_net (coco)
#8010 : research

UPLOAD_FOLDER = 'upload'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'JPG', 'JPEG', 'PNG'])

redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)

app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER