コード例 #1
0
def train_net(solver_prototxt, roidb, output_dir, pretrained_model, max_iter, gpus):
    """
    Training the network with multiple gpu
    :param solver_prototxt: the network prototxt
    :param roidb: the training roidb
    :param output_dir: the output directory to be used for saving the models
    :param pretrained_model: the pre-trained model for fine-tuning
    :param max_iter: maximum number of iterations for solver
    :param gpus: the GPU ids to be used for solving
    :return:
    """

    # Initiate Caffe NCCL
    uid = caffe.NCCL.new_uid()
    caffe.init_log(0,True)
    caffe.log('Using devices %s' % str(gpus))
    # Create a process per GPU
    procs = []
    for rank in range(len(gpus)):
        p = Process(target=worker,
                    args=(rank, uid, gpus, solver_prototxt, roidb, pretrained_model, max_iter, output_dir))
        p.daemon = True
        p.start()
        procs.append(p)
    for p in procs:
        p.join() 
    print('done solving!')
コード例 #2
0
ファイル: train.py プロジェクト: BVLC/caffe
def train(
        solver,  # solver proto definition
        snapshot,  # solver snapshot to restore
        gpus,  # list of device ids
        timing=False,  # show timing info for compute and communications
):
    # NCCL uses a uid to identify a session
    uid = caffe.NCCL.new_uid()

    caffe.init_log()
    caffe.log('Using devices %s' % str(gpus))

    procs = []
    for rank in range(len(gpus)):
        p = Process(target=solve,
                    args=(solver, snapshot, gpus, timing, uid, rank))
        p.daemon = True
        p.start()
        procs.append(p)
    for p in procs:
        p.join()
コード例 #3
0
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    gpu_id = args.gpu_id
    gpu_list = gpu_id.split(',')
    gpus = [int(i) for i in gpu_list]

    print('Using config:')
    pprint.pprint(cfg)
    assert cfg.TEST.HAS_RPN

    image_ids = load_image_ids(args.data_split)
    random.seed(10)
    random.shuffle(image_ids)
    # Split image ids between gpus
    image_ids = [image_ids[i::len(gpus)] for i in range(len(gpus))]

    caffe.init_log()
    caffe.log('Using devices %s' % str(gpus))
    procs = []

    for i, gpu_id in enumerate(gpus):
        outfile = '%s.%d' % (args.outfile, gpu_id)
        p = Process(target=generate_tsv,
                    args=(gpu_id, args.prototxt, args.caffemodel, image_ids[i], outfile))
        p.daemon = True
        p.start()
        procs.append(p)
    for p in procs:
        p.join()
コード例 #4
0
            net = caffe.Net('upresnet10_3.prototxt', caffe.TEST)
            for key in net.params:
                l = len(net.params[key])
                net.params[key][0].data[...] = params[key + '/W']
                if l >= 2:
                    net.params[key][1].data[...] = params[key + '/b']

            input_data = np.empty(net.blobs['input'].data.shape,
                                  dtype=np.float32)
            input_data[...] = np.random.random_sample(
                net.blobs['input'].data.shape)

            net.blobs['input'].data[...] = input_data
            ret = net.forward()

            input_data = np.empty(net.blobs['input'].data.shape,
                                  dtype=np.float32)
            input_data[...] = np.random.random_sample(
                net.blobs['input'].data.shape)

            net.blobs['input'].data[...] = input_data
            ret = net.forward()

            batch_y = model(input_data)
            print(batch_y.array - ret['/conv_post'])


if __name__ == '__main__':
    caffe.init_log(3)
    main()
コード例 #5
0
def main(params):

    sys.path.insert(0, os.path.join(params.bottomup_path, 'lib'))
    from fast_rcnn.config import cfg, cfg_from_file
    from fast_rcnn.test import im_detect, _get_blobs
    from fast_rcnn.nms_wrapper import nms

    ###########################
    # CNN : Faster-RCNN setting
    data_path = os.path.join(params.bottomup_path, 'data/genome/1600-400-20')

    # Load classes
    classes = ['__background__']
    with open(os.path.join(data_path, 'objects_vocab.txt')) as f:
        for object in f.readlines():
            classes.append(object.split(',')[0].lower().strip())

    # Load attributes
    attributes = ['__no_attribute__']
    with open(os.path.join(data_path, 'attributes_vocab.txt')) as f:
        for att in f.readlines():
            attributes.append(att.split(',')[0].lower().strip())

    GPU_ID = params.gpu_id  # if we have multiple GPUs, pick one
    caffe.init_log()
    caffe.set_device(GPU_ID)
    caffe.set_mode_gpu()
    net = None
    cfg_from_file(
        os.path.join(params.bottomup_path,
                     'experiments/cfgs/faster_rcnn_end2end_resnet.yml'))

    weights = os.path.join(
        params.bottomup_path,
        'data/faster_rcnn_models/resnet101_faster_rcnn_final.caffemodel')
    prototxt = os.path.join(
        params.bottomup_path,
        'models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt')

    net = caffe.Net(prototxt, caffe.TEST, weights=weights)

    conf_thresh = 0.4
    min_boxes = params.num_objects
    max_boxes = params.num_objects
    ###########################

    ###########################
    # RNN : Caption generation setting
    # load json file
    label_info = json.load(open(params.input_labels))
    word_to_idx = label_info['word_to_idx']

    # load h5 file
    caps_info = h5py.File(params.input_caps, 'r', driver='core')
    seq_length = caps_info['labels'].shape[1]

    # GPU options
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True

    # build a graph to sample captions
    graph_gen_cap = tf.Graph()
    sess_gen_cap = tf.Session(graph=graph_gen_cap, config=config)
    with graph_gen_cap.as_default():
        model = CaptionGenerator(word_to_idx,
                                 num_features=params.num_objects,
                                 dim_feature=params.dim_features,
                                 dim_embed=params.dim_word_emb,
                                 dim_hidden=params.rnn_hid_size,
                                 dim_attention=params.att_hid_size,
                                 n_time_step=seq_length - 1)
        alphas, sampled_captions = model.build_sampler(max_len=params.max_len)
        saver1 = tf.train.Saver()
        saver1.restore(sess_gen_cap, params.test_model)
    tf.reset_default_graph()
    ############################

    ###########################
    # Face : Replacer
    name_replacer = NameReplacer(model.idx_to_word, params.score_thr)
    ############################

    ###########################
    # Run Image Captioning with face detection

    while True:
        full_fname = raw_input("Enter the image path and name:")
        if full_fname == 'Exit':
            break
        if not os.path.exists(full_fname):
            print("Not Exist File : {}".format(full_fname))
            continue

        ###########################
        # Object Detection
        im = cv2.imread(full_fname)
        scores, boxes, attr_scores, rel_scores = im_detect(net, im)

        # Keep the original boxes, don't worry about the regression bbox outputs
        rois = net.blobs['rois'].data.copy()
        # unscale back to raw image space
        blobs, im_scales = _get_blobs(im, None)

        cls_boxes = rois[:, 1:5] / im_scales[0]
        cls_prob = net.blobs['cls_prob'].data
        attr_prob = net.blobs['attr_prob'].data
        pool5 = net.blobs['pool5_flat'].data

        # Keep only the best detections
        max_conf = np.zeros((rois.shape[0]))
        for cls_ind in range(1, cls_prob.shape[1]):
            cls_scores = scores[:, cls_ind]
            dets = np.hstack(
                (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
            keep = np.array(nms(dets, cfg.TEST.NMS))
            max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep],
                                      cls_scores[keep], max_conf[keep])

        keep_boxes = np.where(max_conf >= conf_thresh)[0]
        if len(keep_boxes) < min_boxes:
            keep_boxes = np.argsort(max_conf)[::-1][:min_boxes]
        elif len(keep_boxes) > max_boxes:
            keep_boxes = np.argsort(max_conf)[::-1][:max_boxes]

        feats = pool5[keep_boxes]
        ############################

        ###########################
        # Caption generation using CNN features
        feed_dict = {model.features: [feats]}
        alps, sam_cap = sess_gen_cap.run([alphas, sampled_captions], feed_dict)
        decoded = decode_captions(sam_cap, model.idx_to_word)
        ############################

        ###########################
        # Name replacer
        name_list, conf_list, roi_list = vtt_face_recognize(
            full_fname, params.url, params.post_data)
        replace_decoded, words = name_replacer.name_replace_caps(
            sam_cap, alps, cls_boxes, name_list, conf_list, roi_list)
        print("Original caption : %s" % decoded[0])
        print("Replaced caption : %s" % replace_decoded[0])
        ############################

        ###########################
        # Showing
        img = skimage.io.imread(full_fname)
        img = skimage.img_as_float(img)
        boxes = cls_boxes[keep_boxes]
        boxes = boxes.astype(int)

        # draw attention map
        fig = plt.figure(figsize=(16, 8))
        ax = fig.add_subplot(3, 6, 1)
        ax.imshow(img)
        plt.axis('off')

        # Plot images with attention weights
        words = words[0]
        for t in range(len(words)):
            if t > 16:
                break
            if words[t] == '<BLANK>':
                continue
            alphamap = np.zeros((img.shape[0], img.shape[1]))
            for b in range(boxes.shape[0]):
                alphamap[boxes[b, 1]:boxes[b, 3],
                         boxes[b, 0]:boxes[b, 2]] += alps[0, t, b]
            max_idx = np.argmax(alps[0, t, :])
            att_img = np.dstack((img, alphamap))
            ax = fig.add_subplot(3, 6, t + 2)
            plt.text(0,
                     1,
                     '%s' % (words[t]),
                     color='black',
                     backgroundcolor='white',
                     fontsize=8)
            ax.imshow(att_img)
            ax.add_patch(
                patches.Rectangle((boxes[max_idx, 0], boxes[max_idx, 1]),
                                  boxes[max_idx, 2] - boxes[max_idx, 0],
                                  boxes[max_idx, 3] - boxes[max_idx, 1],
                                  linewidth=1,
                                  edgecolor='r',
                                  facecolor='none'))
            plt.axis('off')

        fig.tight_layout()
        plt.show()
コード例 #6
0
def caffe_to_sas(network_file,
                 model_name,
                 network_param=None,
                 phase=caffe.TEST,
                 verbose=False):
    '''
    Generate a SAS deep learning model from Caffe definition

    Parameters
    ----------
    network_file : string
       Fully qualified file name of network definition file (*.prototxt).
    sas_file : string
       Fully qualified file name of SAS deep learning Python model definition.
    model_name : string
       Name for deep learning model.
    network_param : string, optional
       Fully qualified file name of network parameter file (*.caffemodel).
    phase : int, optional
       One of {caffe.TRAIN, caffe.TEST, None}.
    verbose : bool, optional
       To view all Caffe information messages, set to True.

    '''

    # open output file

    try:
        output_code = ''
        # initialize Caffe logging facility
        caffe.init_log(0, verbose)

        # instantiate a model and read network parameters
        if (network_param is None):
            model = caffe.Net(network_file, phase)
        else:
            model = caffe.Net(network_file, phase, weights=network_param)
        net = caffe_pb2.NetParameter()
        text_format.Merge(open(network_file + '.tmp').read(), net)

        # remove temporary file created
        if os.path.isfile(network_file + '.tmp'):
            os.remove(network_file + '.tmp')

        # identify common Caffe/SAS computation layers
        layer_list = []
        for layer in net.layer:
            include_layer = False
            if (len(layer.include) == 0):
                include_layer = True
            else:
                for layer_phase in layer.include:
                    if (caffe.TEST == layer_phase.phase):
                        include_layer = True

            # exclude layers not implemented (or implemented in a different fashion)
            if (layer.type.lower() not in common_layers):
                include_layer = False

            if include_layer:
                layer_list.append(make_composite_layer(layer))

        # associate activations with computation layers
        for layer in net.layer:
            layer_type = layer.type.lower()
            if (layer_type in ['relu', 'prelu', 'elu', 'sigmoid', 'tanh']):
                layer_index = None
                for ii in range(len(layer_list)):
                    if (layer.top[0] == layer_list[ii].layer_parm.top[0]):
                        layer_index = ii

                if layer_index is not None:
                    layer_list[layer_index].related_layers.append(layer)
                else:
                    raise CaffeParseError(
                        'Activation layer ' + layer.name +
                        ' is not associated with any computation layer.')

        # associate dropout with computation layers
        for layer in net.layer:
            layer_type = layer.type.lower()
            if (layer_type == 'dropout'):
                layer_index = None
                for ii in range(len(layer_list)):
                    if (layer.top[0] == layer_list[ii].layer_parm.top[0]):
                        layer_index = ii

                if layer_index is not None:
                    layer_list[layer_index].related_layers.append(layer)
                else:
                    raise CaffeParseError(
                        'Dropout layer ' + layer.name +
                        ' is not associated with any computation layer.')

        # associate softmax with a fully-connected layer
        for layer in net.layer:
            layer_type = layer.type.lower()
            if (layer_type in ['softmax', 'softmaxwithloss']):
                layer_index = None
                for ii in range(len(layer_list)):
                    for jj in range(len(layer.bottom)):
                        if (layer.bottom[jj] ==
                                layer_list[ii].layer_parm.top[0]):
                            layer_index = ii

                if layer_index is not None:
                    layer_list[layer_index].related_layers.append(layer)
                else:
                    raise CaffeParseError(
                        'Softmax layer ' + layer.name +
                        ' is not associated with any fully-connected layer.')

        # determine source layer(s) for computation layers
        for ii in range(len(layer_list)):
            for kk in range(len(layer_list[ii].layer_parm.bottom)):
                name = None
                for jj in range(ii):
                    if (layer_list[ii].layer_parm.bottom[kk] ==
                            layer_list[jj].layer_parm.top[0]):
                        name = layer_list[jj].layer_parm.name

                if name:
                    layer_list[ii].source_layer.append(name)

        # associate scale layer with batchnorm layer
        for layer in net.layer:
            if (layer.type.lower() == 'scale'):
                bn_found = False
                for ii in range(len(layer_list)):
                    if ((layer_list[ii].layer_parm.type.lower() == 'batchnorm')
                            and
                        (layer_list[ii].layer_parm.top[0] == layer.top[0])):
                        layer_list[ii].related_layers.append(layer)
                        bn_found = True
                        break

                if not bn_found:
                    raise CaffeParseError(
                        'Scale layer ' + layer.name +
                        ' is not associated with a batch normalization layer')

        # loop over included layers
        for clayer in layer_list:
            layer_type = clayer.layer_parm.type.lower()
            if (layer_type == 'pooling'):  # average/max pooling
                sas_code = caffe_pooling_layer(clayer, model_name)
            elif (layer_type == 'convolution'):  # 2D convolution
                sas_code = caffe_convolution_layer(clayer, model_name)
            elif (layer_type == 'batchnorm'):  # batch normalization
                sas_code = caffe_batch_normalization_layer(clayer, model_name)
            elif (layer_type in ['data', 'memorydata']):  # input layer
                sas_code = caffe_input_layer(clayer, model_name)
            elif (layer_type == 'eltwise'):  # residual
                sas_code = caffe_residual_layer(clayer, model_name)
            elif (layer_type == 'innerproduct'):  # fully connected
                sas_code = caffe_full_connect_layer(clayer, model_name)
            else:
                raise CaffeParseError(layer_type +
                                      ' is an unsupported layer type')

            # write SAS code associated with Caffe layer
            if sas_code:
                output_code = output_code + sas_code + '\n\n'

            else:
                raise CaffeParseError(
                    'Unable to generate SAS definition for layer ' +
                    clayer.layer_parm.name)

            # convert from BINARYPROTO to HDF5
            if network_param is not None:
                sas_hdf5 = os.path.join(os.getcwd(),
                                        '{}_weights.h5'.format(model_name))
                write_caffe_hdf5(model, layer_list, sas_hdf5)

        return output_code

    except CaffeParseError as err_msg:
        print(err_msg)
コード例 #7
0
        basename, ext = os.path.splitext(filename)
        if ext == '.npz':
            model_path = os.path.join(model_dir, filename)
            print(model_path)
            channels = 3 if 'rgb' in filename else 1
            model = model_class(channels)
            size = 64 + model.offset
            data = np.zeros((1, channels, size, size), dtype=np.float32)
            x = chainer.Variable(data)
            chainer.serializers.load_npz(model_path, model)

            params = {}
            for path, param in model.namedparams():
                params[path] = param.array

            net = caffe.Net('upresnet10_3.prototxt', caffe.TEST)
            for key in net.params:
                l = len(net.params[key])
                net.params[key][0].data[...] = params[key + '/W']
                if l >= 2:
                    net.params[key][1].data[...] = params[key + '/b']
 
            prototxt_path = '{}.prototxt'.format(fname_convert_table[basename])
            caffemodel_path = '{}.json.caffemodel'.format(fname_convert_table[basename])
            net.save(caffemodel_path)
            shutil.copy('upresnet10_3.prototxt', prototxt_path)

if __name__ == '__main__':
    caffe.init_log(3)
    main()
コード例 #8
0
def partseg_train(network, exp_dir, category, args):
    def solve2(solver, args, uid, rank):
        if args.cpu:
            caffe.set_mode_cpu()
        else:
            caffe.set_mode_gpu()
        caffe.set_device(args.gpus[rank])
        caffe.set_solver_count(len(args.gpus))
        caffe.set_solver_rank(rank)
        caffe.set_multiprocess(True)
        
        solver = caffe.get_solver(solver)

        if args.init_model:
            if args.init_model.endswith('.caffemodel'):
                solver.net.copy_from(args.init_model)
            else:
                solver.net.copy_from(os.path.join(exp_dir, '{}_iter_{}.caffemodel'.format(category, args.init_model)))

        if args.init_state:
            if args.init_state.endswith('.solverstate'):
                solver.restore(args.init_state)
            else:
                solver.restore(os.path.join(exp_dir, '{}_iter_{}.solverstate'.format(category, args.init_state)))

        nccl = caffe.NCCL(solver, uid)
        nccl.bcast()
        if solver.param.layer_wise_reduce:
            solver.net.after_backward(nccl)
        print(rank)
        #pdb.set_trace()
        solver.step(solver.param.max_iter)
        #solver.solve()

        #caffe.set_device(0)

    if network == 'seq':
        batch_norm = True
        conv_weight_filler = 'xavier'
        network = models.partseg_seq(arch_str=args.arch,
                                     skip_str=args.skips,
                                     dataset=args.dataset,
                                     dataset_params=args.dataset_params,
                                     category=category,
                                     feat_dims_str=args.feat,
                                     lattice_dims_str=args.lattice,
                                     sample_size=args.sample_size,
                                     batch_size=args.batch_size,
                                     batchnorm=batch_norm,
                                     conv_weight_filler=conv_weight_filler,
                                     save_path=os.path.join(exp_dir, category + '_net.prototxt'))

        models.partseg_seq(deploy=True,
                           arch_str=args.arch,
                           skip_str=args.skips,
                           dataset=args.dataset,
                           dataset_params=args.dataset_params,
                           category=category,
                           feat_dims_str=args.feat,
                           lattice_dims_str=args.lattice,
                           sample_size=args.sample_size,
                           batchnorm=batch_norm,
                           save_path=os.path.join(exp_dir, category + '_net_deploy.prototxt'))
    else:
        assert network.endswith('.prototxt'), 'Please provide a valid prototxt file'
        print('Using network defined at {}'.format(network))

    random_seed = 0
    debug_info = False
    
    solver = create_solver.standard_solver(network,
                                           network,
                                           os.path.join(exp_dir, category)+'_' +args.prefix,
                                           base_lr=args.base_lr,
                                           gamma=args.lr_decay,
                                           stepsize=args.stepsize,
                                           test_iter=args.test_iter,
                                           test_interval=args.test_interval,
                                           max_iter=args.num_iter,
                                           snapshot=args.snapshot_interval,
                                           solver_type=args.solver_type,
                                           weight_decay=args.weight_decay,
                                           iter_size=args.iter_size,
                                           debug_info=debug_info,
                                           random_seed=random_seed,
                                           save_path=os.path.join(exp_dir, category+'_solver.prototxt'))    
    ## Multiple GPUs
    uid = caffe.NCCL.new_uid()
    
    caffe.init_log(0, True)
    caffe.log('Using devices %s' % str(args.gpus))
    procs = []
    
    for rank in range(len(args.gpus)):
        p = Process(target=solve2,
                    args=(solver, args, uid, rank))
        p.daemon = True
        p.start()
        procs.append(p)
    for p in procs:
        p.join()
コード例 #9
0
ファイル: train_net_multigpu.py プロジェクト: nnop/mtnet
        solver.step(step_iters)
        if rank == 0:
            logging.info('curr_iter: {}, step_iters: {}'.format(
                curr_iter, step_iters))
            solver.snapshot()
            curr_iter += step_iters


if __name__ == "__main__":
    solver_proto = 'models/multigpu/solver.prototxt'
    weights_file = 'data/imagenet_models/VGG16.v2.caffemodel'
    cfg_file = 'experiments/faster_rcnn_end2end.yml'
    gpus = [3, 4, 5, 6]

    # caffe
    caffe.init_log(0, True)
    caffe.log('Using device {}'.format(str(gpus)))
    uid = caffe.NCCL.new_uid()

    # cfg
    cfg_from_file(cfg_file)
    assert (cfg.TRAIN.HAS_RPN                \
        and cfg.TRAIN.BBOX_REG               \
        and cfg.TRAIN.BBOX_NORMALIZE_TARGETS \
        and cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED)

    # roidb
    imdb_name = 'ftdata_train'
    imdb = get_imdb(imdb_name)
    if cfg.TRAIN.USE_FLIPPED:
        print 'Appending horizontally-flipped training examples...'
コード例 #10
0
def set_log_level(level):
    from caffe import init_log
    init_log(level)
コード例 #11
0
def beam_decode(
        model,  # net proto definition
        vocab_file,  # model vocab text file
        weights,  # pretrained weights to use
        gpu,  # device id
        outfile,  # json output
):

    vocab = []
    with open(vocab_file) as f:
        for word in f:
            vocab.append(word.strip())
    print 'Loaded {:,} words into caption vocab'.format(len(vocab))

    caffe.init_log(0, 1)
    caffe.log('Using device %s' % str(gpu))
    caffe.set_device(int(gpu))
    caffe.set_mode_gpu()

    net = caffe.Net(model, weights, caffe.TEST)
    print 'Loaded proto {} with weights {}'.format(model, weights)
    net.layers[0].load_dataset()

    id_to_caption = {}
    iteration = 0
    while True:
        ending = False
        out = net.forward()
        image_ids = net.blobs['image_id'].data
        captions = net.blobs['caption'].data
        scores = net.blobs['log_prob'].data
        batch_size = image_ids.shape[0]

        if captions.shape[0] == batch_size:
            # Decoding a compact net
            beam_size = captions.shape[2]
            for n in range(batch_size):
                if iteration == 0:
                    print "\nhttp://mscoco.org/explore/?id=%d" % image_ids[n][0]
                for b in range(beam_size):
                    cap = translate(vocab, captions[n][0][b])
                    score = scores[n][0][b]
                    if iteration == 0:
                        print '[%d] %.2f %s' % (b, score, cap)
        else:
            # Decoding an unrolled net
            beam_size = captions.shape[0] / batch_size
            if iteration == 0:
                print "Beam size: %d" % beam_size
            for n in range(batch_size):
                image_id = int(image_ids[n][0])
                if iteration == 0:
                    print "\nhttp://mscoco.org/explore/?id=%d" % image_id
                for b in range(beam_size):
                    cap = translate(vocab, captions[n * beam_size + b])
                    score = scores[n * beam_size + b]
                    if b == 0:
                        if image_id in id_to_caption:
                            ending = True
                        else:
                            id_to_caption[image_id] = cap
                    if iteration == 0:
                        print '[%d] %.2f %s' % (b, score, cap)
        iteration += 1
        if iteration % 1000 == 0:
            print 'Iteration: %d' % iteration
        if ending:
            break

    output = []
    for image_id in sorted(id_to_caption.keys()):
        output.append({
            'image_id': image_id,
            'caption': id_to_caption[image_id]
        })
    with open(outfile, 'w') as f:
        json.dump(output, f)
    print 'Generated %d outputs, saving to %s' % (len(output), outfile)
    s = CaptionScorer()
    s.score(outfile)
コード例 #12
0
        solver.net.copy_from(_weights)
    
    solver.net.layers[0].get_gpu_id(gpus[rank])

    nccl = caffe.NCCL(solver, uid)
    nccl.bcast()
    solver.add_callback(nccl)

    if solver.param.layer_wise_reduce:
        solver.net.after_backward(nccl)

    for _ in range(max_iter):
        solver.step(1)


if __name__ == '__main__':
    uid = caffe.NCCL.new_uid()
    caffe.init_log()
    caffe.log('Using devices %s' % str(gpus))
    procs = []

    for rank in range(len(gpus)):
        p = Process(target=solve,
                    args=(solver_prototxt, gpus, uid, rank, max_iter))
        p.daemon = False
        p.start()
        procs.append(p)
    for p in procs:
        p.join()

コード例 #13
0
ファイル: face.py プロジェクト: nerddd/Useful_tools
    def verify(self, image1, fpoints1, image2, fpoints2):
        """
        Verify similarity of given two image
        * argv   : numpy  OpenCV like BGR uint8
                   list   Facial key points
        * return : float  Cosine similarity
        """
        face1 = self._align(image1, fpoints1)
        face2 = self._align(image2, fpoints2)
        
        #anti_net=self.anti_init_net()
        anti_pred=self.anti_spoofing(self._anti_net,face1)
        if anti_pred==0:#face1 is real
            self._extract([face1, face2])
            if self._pca_model:
                return self._similar(self._pca.transform(self._merge))
            else:
                return self._similar(self._merge)
        else:
            return -4
        '''
        self._extract([face1, face2])
        if self._pca_model:
            return self._similar(self._pca.transform(self._merge))
        else:
            return self._similar(self._merge)
        '''
if __name__ == "__main__":
    caffe.init_log(2)
    test_main()
コード例 #14
0
ファイル: pycaffe_mnist.py プロジェクト: avolkov1/rpyc-for-DL
def main(args_dict, daemon_flag=True, timeout=None):
    workdir = args_dict['workdir']
    os.chdir(workdir)
    mnist_dst = os.path.join(workdir, 'mnist')
    mkdir_p(mnist_dst)

    # https://github.com/BVLC/caffe/issues/4715
    os.environ['GLOG_log_dir'] = mnist_dst
    # os.environ['GLOG_logtostderr'] = "1"
    import caffe  # @UnresolvedImport
    caffe.init_log()
    # caffe.set_mode_gpu()

    print('LOGS WILL APPEAR IN: {}'.format(workdir))

    from google.protobuf import text_format  # @UnresolvedImport
    from caffe.proto import caffe_pb2  # @UnresolvedImport

    netp = caffe_pb2.NetParameter()

    codedir = args_dict['codedir']
    datadir = args_dict['datadir']

    # print('CODEDIR: {}'.format(codedir))
    # print('WORKDIR: {}'.format(workdir))
    # print('WORKDIR: {}'.format(datadir))
    # print('CWD: {}'.format(os.getcwd()))

    lenet_file_src = '{}/mnist/lenet_train_test.prototxt.tmpl'.format(codedir)
    with open(lenet_file_src, 'r') as fo:
        text_format.Parse(fo.read(), netp)

    for ll in netp.layer:
        # print ll
        if ll.name == "mnist" and ll.type == "Data":
            # print 'setting path'
            if ll.include[0].phase == caffe.TRAIN:
                ll.data_param.source = '{}/mnist_test_lmdb'.format(datadir)
            if ll.include[0].phase == caffe.TEST:
                ll.data_param.source = '{}/mnist_train_lmdb'.format(datadir)

    lenet_file = '{}/mnist/lenet_train_test.prototxt'.format(workdir)
    with open(lenet_file, 'w') as fo:
        fo.write(str(netp))

    solver_file_src = '{}/mnist/lenet_solver.prototxt'.format(codedir)
    solver_file = '{}/mnist/lenet_solver.prototxt'.format(workdir)
    shutil.copy(solver_file_src, solver_file)

    ngpus = args_dict['ngpus']
    gpus = range(ngpus)

    # Latest bvlc/caffe:gpu has a working multigpu implemenation via python.
    # https://github.com/BVLC/caffe/blob/master/python/train.py
    from train import solve  # @UnresolvedImport

    # Customize the multigpu solver if you'd like. Change the caffe_mgpu_train
    # file locally and use that.
    # codelibdir = args_dict['codelibdir']
    # sys.path.insert(1, codelibdir)  #
    # import caffe_mgpu_train as train
    # train.train(solver_file, None, gpus, False)

    def train(
            solver,  # solver proto definition
            snapshot,  # solver snapshot to restore
            gpus,  # list of device ids
            timing=False,  # show timing info for compute and communications
    ):
        # NCCL uses a uid to identify a session
        uid = caffe.NCCL.new_uid()

        # caffe.init_log()
        caffe.log('Using devices %s' % str(gpus))

        procs = []
        for rank in range(len(gpus)):
            p = Process(target=solve,
                        args=(solver, snapshot, gpus, timing, uid, rank))
            # p.daemon = True
            p.daemon = daemon_flag
            p.start()
            procs.append(p)

        for p in procs:
            p.join(timeout=timeout)

    train(solver_file, None, gpus, False)
    active_children()