Пример #1
0
    def __init__(self,
                 model_file,
                 pretrained_file,
                 gpu=False,
                 mean=None,
                 input_scale=None,
                 raw_scale=None,
                 channel_swap=None,
                 context_pad=None):
        """
        Take
        gpu, mean, input_scale, raw_scale, channel_swap: params for
            preprocessing options.
        context_pad: amount of surrounding context to take s.t. a `context_pad`
            sized border of pixels in the network input image is context, as in
            R-CNN feature extraction.
        """
        caffe.Net.__init__(self, model_file, pretrained_file)
        caffe.set_phase_test()

        if gpu:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        if mean is not None:
            self.set_mean(self.inputs[0], mean)
        if input_scale is not None:
            self.set_input_scale(self.inputs[0], input_scale)
        if raw_scale is not None:
            self.set_raw_scale(self.inputs[0], raw_scale)
        if channel_swap is not None:
            self.set_channel_swap(self.inputs[0], channel_swap)

        self.configure_crop(context_pad)
Пример #2
0
def main():
    import caffe
    import numpy as np
    caffe_dir = "../caffe"
    MODEL_FILE = caffe_dir + "/models/bvlc_reference_caffenet/deploy.prototxt"
    PRETRAINED = caffe_dir + "/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"
    IMAGE_FILE = "../cat.jpg"

    with open("synset_words.txt") as f:
        words = f.readlines()
    words = map(lambda x: x.strip(), words)

    net = caffe.Classifier(MODEL_FILE, PRETRAINED,
                           mean=np.load(caffe_dir + '/python/caffe/imagenet/ilsvrc_2012_mean.npy'),
                           channel_swap=(2,1,0),
                           raw_scale=255,
                           image_dims=(256, 256)) 
    caffe.set_phase_test()
    caffe.set_mode_gpu()
    input_image = caffe.io.load_image(IMAGE_FILE)
    #prediction = net.predict([input_image])
    prediction = net.forward_all(data=np.asarray([net.preprocess('data', input_image)]))

    i = prediction["prob"].argmax()
    print(i)
    print(words[i])
Пример #3
0
    def __init__(self, model_file, pretrained_file, gpu=False, mean=None,
                 input_scale=None, raw_scale=None, channel_swap=None,
                 context_pad=None):
        """
        Take
        gpu, mean, input_scale, raw_scale, channel_swap: params for
            preprocessing options.
        context_pad: amount of surrounding context to take s.t. a `context_pad`
            sized border of pixels in the network input image is context, as in
            R-CNN feature extraction.
        """
        caffe.Net.__init__(self, model_file, pretrained_file)
        caffe.set_phase_test()

        if gpu:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        if mean is not None:
            self.set_mean(self.inputs[0], mean)
        if input_scale is not None:
            self.set_input_scale(self.inputs[0], input_scale)
        if raw_scale is not None:
            self.set_raw_scale(self.inputs[0], raw_scale)
        if channel_swap is not None:
            self.set_channel_swap(self.inputs[0], channel_swap)

        self.configure_crop(context_pad)
Пример #4
0
def main():
    import caffe
    import numpy as np
    caffe_dir = "../caffe"
    MODEL_FILE = caffe_dir + "/models/bvlc_reference_caffenet/deploy.prototxt"
    PRETRAINED = caffe_dir + "/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"
    IMAGE_FILE = "../cat.jpg"

    with open("synset_words.txt") as f:
        words = f.readlines()
    words = map(lambda x: x.strip(), words)

    net = caffe.Classifier(
        MODEL_FILE,
        PRETRAINED,
        mean=np.load(caffe_dir +
                     '/python/caffe/imagenet/ilsvrc_2012_mean.npy'),
        channel_swap=(2, 1, 0),
        raw_scale=255,
        image_dims=(256, 256))
    caffe.set_phase_test()
    caffe.set_mode_gpu()
    input_image = caffe.io.load_image(IMAGE_FILE)
    #prediction = net.predict([input_image])
    prediction = net.forward_all(
        data=np.asarray([net.preprocess('data', input_image)]))

    i = prediction["prob"].argmax()
    print(i)
    print(words[i])
Пример #5
0
    def __init__(self, model_file, pretrained_file, image_dims=None,
                 gpu=False, mean=None, input_scale=None, raw_scale=None,
                 channel_swap=None):
        """
        Take
        image_dims: dimensions to scale input for cropping/sampling.
            Default is to scale to net input size for whole-image crop.
        gpu, mean, input_scale, raw_scale, channel_swap: params for
            preprocessing options.
        """
        caffe.Net.__init__(self, model_file, pretrained_file)
        caffe.set_phase_test()

        if gpu:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        if mean is not None:
            self.set_mean(self.inputs[0], mean)
        if input_scale is not None:
            self.set_input_scale(self.inputs[0], input_scale)
        if raw_scale is not None:
            self.set_raw_scale(self.inputs[0], raw_scale)
        if channel_swap is not None:
            self.set_channel_swap(self.inputs[0], channel_swap)

        #self.crop_dims = np.array(self.blobs[self.inputs[0]].data.shape[2:])
        if not image_dims:
            image_dims = self.crop_dims
        self.image_dims = image_dims
Пример #6
0
def main():
    args = get_args()

    caffe.set_phase_test()
    if args.mode == "cpu":
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()

    if not os.path.isfile(args.model):
        if not args.solver:
            print "Error: Model does not exist. No solver specified."
            sys.exit(1)

        print "Warning: model %s does not exist. Creating..."
        solver = SGDSolver(args.solver)
        solver.net.save(args.model)

    # Initialize objects
    net = BaristaNet(args.architecture, args.model, args.driver,
                     reset_log=True)

    replay_dataset = ReplayDataset(args.dataset, net.state[0].shape,
                                   dset_size=args.dset_size,
                                   overwrite=args.overwrite)
    net.add_dataset(replay_dataset)

    game = SnakeGame()
    preprocessor = generate_preprocessor(net.state.shape[2:], gray_scale)
    exp_gain = ExpGain(net, ['w', 'a', 's', 'd'], preprocessor, game.cpu_play,
                       replay_dataset, game.encode_state())

    if(args.overwrite):
        for _ in xrange(min(args.initial_replay, args.dset_size)):
            exp_gain.generate_experience(0)

    # Start server loop
    serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    serversocket.bind(('127.0.0.1', args.port))
    serversocket.listen(5)

    print
    print "*"*80
    print "* Starting BARISTA server: listening on port %d." % args.port
    print "*"*80
    # Signal Spark Executor that Barista is ready to receive connections
    issue_ready_signal(args.port)
    while True:
        (clientsocket, address) = serversocket.accept()
        if args.debug:
            handler = debug_process_connection
        else:
            handler = process_connection

        client_thread = threading.Thread(
                            target=handler,
                            args=(clientsocket, net, exp_gain))
        client_thread.run()
Пример #7
0
def process(set, ent, inputEnt):
    highres = misc.imread(ent, flatten=False).astype(np.float32)
    lowres = misc.imread(inputEnt, flatten=False).astype(np.float32)
    width = highres.shape[1]
    height = highres.shape[0]

    print "processing %s (%dx%d)" % (ent, width, height)

    defFile = "scratch/test_SR_deploy.prototxt"
    pycnn.preprocessFile("deploy.prototmp", defFile, {"WIDTH": width, "HEIGHT": height})

    if "youtube" in set:
        print "using youtube mean"
        mean_bgr = tb.readFloat("/misc/lmbraid17/ilge/caffe/superresolution/datasets/youtube/test/mean3.float3").astype(
            np.float32
        )
    else:
        mean_bgr = tb.readFloat("/home/ilge/data/caffe/superresolution/datasets/coco/mean.float3").astype(np.float32)

    mean_bgr = cv2.resize(mean_bgr, (width, height), interpolation=cv2.INTER_CUBIC)
    mean_bgr_lowres = cv2.resize(mean_bgr, (width / 4, height / 4), interpolation=cv2.INTER_CUBIC)

    highres_nomean_bgr = highres[:, :, (2, 1, 0)] - mean_bgr
    lowres_nomean_bgr = lowres[:, :, (2, 1, 0)] - mean_bgr_lowres

    caffe.set_phase_test()
    caffe.set_mode_gpu()
    caffe.set_logging_disabled()
    net = caffe.Net(defFile, modelFile)

    print "network forward pass"
    blobs = net.forward(
        highres=np.asarray([net.preprocess("highres", highres_nomean_bgr / 255.0)]),
        lowres=np.asarray([net.preprocess("lowres", lowres_nomean_bgr / 255.0)]),
    )

    output_bgr = 255.0 * blobs["output"].transpose(0, 2, 3, 1).squeeze()
    output_bgr += mean_bgr
    output_bgr[output_bgr < 0] = 0
    output_bgr[output_bgr > 255] = 255

    os.system("mkdir -p %s/%s" % (out_dir, set))
    basename = os.path.basename(ent)[:-4].replace("_GT", "")
    misc.imsave("%s/%s/%s-gt.png" % (out_dir, set, basename), highres)
    misc.imsave("%s/%s/%s-recon.png" % (out_dir, set, basename), output_bgr[:, :, (2, 1, 0)])

    # nn, li, cu = tb.computeBasePSNRs(ent, downsampledFilename=inputEnt)
    nn = tb.PSNR()
    li = tb.PSNR()
    cu = tb.PSNR()

    psnr = tb.PSNR()
    psnr.set(blobs["psnr"][0, 0, 0, 0], blobs["psnr_y"][0, 0, 0, 0])

    print "nn=%5s, li=%5s, cu=%5s, net=%5s" % (nn, li, cu, psnr)

    return (nn, li, cu, psnr)
Пример #8
0
def modelUpdate(jobPath):
	start = time.clock()
        caffe_root = '/home/ubuntu/caffe/'
	train_path = os.path.join(jobPath,'train')
	model_path = os.path.join(jobPath,'util')
	dirs = [os.path.join(train_path,d) for d in os.listdir(train_path) if os.path.isdir(os.path.join(train_path,d))]
	new_labels = np.array(os.listdir(train_path),dtype=object)
	sio.savemat(os.path.join(model_path,'new_labels.mat'), {'WNID':new_labels})
	num_labels = len(dirs)

	s = open(os.path.join(caffe_root,'models/bvlc_reference_caffenet/deploy.prototxt')).read()
	s = s.replace('1000', '%d'%(1000+num_labels))
	s = s.replace('fc8', 'fc8-new')
	f = open(os.path.join(model_path,'newCaffeModel.prototxt'), 'w')
	f.write(s)
	f.close()
	print 'Time when prototxt is prepared is ',time.clock()-start

        net = caffe.Classifier(caffe_root+'models/bvlc_reference_caffenet/deploy.prototxt', caffe_root+'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
	#net = caffe.Classifier(caffe_root+'models/bvlc_alexnet/deploy.prototxt', caffe_root+'models/bvlc_alexnet/bvlc_alexnet.caffemodel')
        net_new = caffe.Classifier(os.path.join(model_path,'newCaffeModel.prototxt'), caffe_root+'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
        net_new.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'))  # ImageNet mean
        net_new.set_raw_scale('data', 255)
        net_new.set_channel_swap('data', (2,1,0))
        #net_new.set_phase_test()
        #net_new.set_mode_gpu()
        pr = 'fc8'
        pr_new = 'fc8-new'
        fc_params = (net.params[pr][0].data, net.params[pr][1].data)
        fc_params_new = (net_new.params[pr_new][0].data, net_new.params[pr_new][1].data)
        net.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'))  # ImageNet mean
        net.set_raw_scale('data', 255)
        net.set_channel_swap('data', (2,1,0))
        #net.set_phase_test()
        #net.set_mode_gpu()
	caffe.set_mode_gpu()
	caffe.set_phase_test()
        print '{} weights are {} dimensional and biases are {} dimensional'.format(pr_new, fc_params_new[0].shape, fc_params_new[1].shape)
        fc_params_new[1][:,:,:,:1000] = fc_params[1]
        fc_params_new[0][:,:,:1000,:] = fc_params[0]
	print 'max existing bias = ',max(fc_params_new[1][0,0,0,:])
	print 'min existing bias = ',min(fc_params_new[1][0,0,0,:])
	print 'max existing weight =',(fc_params_new[0][0,0,:,:].max())
        print 'min existing weight =',(fc_params_new[0][0,0,:,:].min())
	num_cores = multiprocessing.cpu_count()
	results = Parallel(n_jobs=num_cores)(delayed(trainaclass)(dirs[label]) for label in range(num_labels))

	for label in range(num_labels):
		w0_new = results[label][0] 
		print 'new bias is ',w0_new
	        print 'min weight = ',(results[label][1].min())
	        print 'max weight = ',(results[label][1].max())
		fc_params_new[1][0,0,0,1000+label] = w0_new
		fc_params_new[0][0,0,1000+label,:] = results[label][1].transpose()[0,:]
        
        net_new.save(os.path.join(model_path,'newCaffeModel.caffemodel'))
Пример #9
0
def process(set,ent,inputEnt):
    highres = misc.imread(ent, flatten=False).astype(np.float32)
    lowres = misc.imread(inputEnt, flatten=False).astype(np.float32)
    width = highres.shape[1]
    height = highres.shape[0]

    print 'processing %s (%dx%d)' % (ent, width, height)

    defFile = 'scratch/test_SR_deploy.prototxt'
    pycnn.preprocessFile('deploy.prototmp', defFile, {'WIDTH': width, 'HEIGHT': height})

    if 'youtube' in set:
        print 'using youtube mean'
        mean_bgr = tb.readFloat("/misc/lmbraid17/ilge/caffe/superresolution/datasets/youtube/test/mean3.float3").astype(np.float32)
    else:
        mean_bgr = tb.readFloat("/home/ilge/data/caffe/superresolution/datasets/coco/mean.float3").astype(np.float32)

    mean_bgr = cv2.resize(mean_bgr, (width, height), interpolation=cv2.INTER_CUBIC)
    mean_bgr_lowres = cv2.resize(mean_bgr, (width/4, height/4), interpolation=cv2.INTER_CUBIC)

    highres_nomean_bgr = highres[:, :, (2, 1, 0)] - mean_bgr
    lowres_nomean_bgr = lowres[:, :, (2, 1, 0)] - mean_bgr_lowres

    caffe.set_phase_test()
    caffe.set_mode_gpu()
    caffe.set_logging_disabled()
    net = caffe.Net(
       defFile,
       modelFile
    )

    print 'network forward pass'
    blobs = net.forward(highres=np.asarray([net.preprocess('highres', highres_nomean_bgr / 255.0)]),lowres=np.asarray([net.preprocess('lowres', lowres_nomean_bgr / 255.0)]))

    output_bgr = 255.0 * blobs['output'].transpose(0, 2, 3, 1).squeeze()
    output_bgr += mean_bgr
    output_bgr[output_bgr < 0] = 0
    output_bgr[output_bgr > 255] = 255

    os.system('mkdir -p %s/%s' % (out_dir, set))
    basename = os.path.basename(ent)[:-4].replace('_GT', '')
    misc.imsave('%s/%s/%s-gt.png' % (out_dir, set, basename), highres)
    misc.imsave('%s/%s/%s-recon.png' % (out_dir, set, basename), output_bgr[:, :, (2, 1, 0)])

    #nn, li, cu = tb.computeBasePSNRs(ent, downsampledFilename=inputEnt)
    nn = tb.PSNR(); li=tb.PSNR(); cu=tb.PSNR()

    psnr = tb.PSNR()
    psnr.set(blobs['psnr'][0, 0, 0, 0],  blobs['psnr_y'][0, 0, 0, 0])

    print 'nn=%5s, li=%5s, cu=%5s, net=%5s' % (nn, li, cu, psnr)

    return (nn, li, cu, psnr)
Пример #10
0
def classifyImagesWithNewModel(jobPath, socketid, result_path):
    print jobPath, socketid, result_path
    try:
        ImagePath = os.path.join(jobPath, 'test')
        modelPath = os.path.join(jobPath, 'util')

        new_labels = sio.loadmat(os.path.join(modelPath, 'new_labels.mat'))
        new_labels_cells = new_labels['WNID']

        # Set the right path to your model file, pretrained model,
        # and the image you would like to classify.
        MODEL_FILE = os.path.join(modelPath, 'newCaffeModel.prototxt')
        PRETRAINED = os.path.join(modelPath, 'newCaffeModel.caffemodel')

        caffe.set_phase_test()
        caffe.set_mode_gpu()

        net = caffe.Classifier(
            MODEL_FILE,
            PRETRAINED,
            mean=np.load(
                os.path.join(CAFFE_DIR,
                             'python/caffe/imagenet/ilsvrc_2012_mean.npy')),
            channel_swap=(2, 1, 0),
            raw_scale=255,
            image_dims=(256, 256))

        results = {}

        if os.path.isdir(ImagePath):
            for file_name in os.listdir(ImagePath):
                image_path = os.path.join(ImagePath, file_name)
                if os.path.isfile(image_path):

                    tags = caffe_classify_image(net, image_path,
                                                new_labels_cells)
                    log_to_terminal("Results: " + str(tags), socketid)
                    webResult = {}
                    webResult[os.path.join(result_path, file_name)] = tags

                    r.publish(
                        'chat',
                        json.dumps({
                            'web_result': json.dumps(webResult),
                            'socketid': str(socketid)
                        }))

            log_to_terminal('Thank you for using CloudCV', socketid)

    except Exception as e:
        log_to_terminal(str(traceback.format_exc()), socketid)
Пример #11
0
def load_network():
    print 'IMAGENET NETWORK'
    background_model = model_path
    net = caffe.Classifier(model_deploy,background_model,
                           mean=np.load(caffe_root+'python/caffe/imagenet/ilsvrc_2012_mean.npy'),
                           channel_swap=(2,1,0),
                           raw_scale=255,
                           image_dims=[227,227])
    caffe.set_phase_test()
    caffe.set_mode_gpu()
    #net.set_mean('data',caffe_root+'python/caffe/imagenet/ilsvrc_2012_mean.npy')
    #net.set_channel_swap('data',(2,1,0))
    #net.set_input_scale('data',255)
    return net
def get_deep_net():
    """ 
    get pretrained deep network model 
    """
    caffe_root = '/home/cai-mj/programs/caffe-master'
    MEAN_FILE = caffe_root + '/python/caffe/imagenet/ilsvrc_2012_mean.npy'
    MODEL_FILE = caffe_root + '/python/feature/imgnet_feature.prototxt'
    PRETRAINED = caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'        
    net = caffe.Classifier(MODEL_FILE, PRETRAINED, gpu=True)
    caffe.set_phase_test()
    net.set_mean('data', np.load(MEAN_FILE))
    net.set_raw_scale('data', 255)
    net.set_channel_swap('data', (2,1,0))
    return net
Пример #13
0
def load(layer_id):
    global net, input_name
    fname = "split/origin.7.%d.Model.prototxt" % layer_id
    mname = "split/origin.7.%d.Model.caffemodel" % layer_id 
    param = caffe_pb2.NetParameter()
    if(not os.path.exists(fname)):
        return False
    with open("split/origin.7.%d.Model.prototxt" % layer_id) as f:
        google.protobuf.text_format.Merge(f.read(), param)
    log.write("load %s\n" % layer_id)
    input_name = str(param.input[0])
    net = caffe.Net(fname, mname)
    caffe.set_phase_test()
    caffe.set_mode_gpu()
    return True
Пример #14
0
def load(layer_id):
    global net, input_name
    fname = "split/origin.7.%d.Model.prototxt" % layer_id
    mname = "split/origin.7.%d.Model.caffemodel" % layer_id
    param = caffe_pb2.NetParameter()
    if (not os.path.exists(fname)):
        return False
    with open("split/origin.7.%d.Model.prototxt" % layer_id) as f:
        google.protobuf.text_format.Merge(f.read(), param)
    log.write("load %s\n" % layer_id)
    input_name = str(param.input[0])
    net = caffe.Net(fname, mname)
    caffe.set_phase_test()
    caffe.set_mode_gpu()
    return True
Пример #15
0
def InitializeNet():
    plt.rcParams['figure.figsize'] = (10, 10)
    plt.rcParams['image.interpolation'] = 'nearest'
    plt.rcParams['image.cmap'] = 'gray'

    caffe.set_phase_test()
    caffe.set_mode_cpu()
    net = caffe.Classifier(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',
                           caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
    # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    net.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'))  # ImageNet mean
    net.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
    net.set_channel_swap('data', (2,1,0))  # the reference model has channels in BGR order instead of RGB

    return net
Пример #16
0
def main_tlc():
    import caffe
    import numpy as np
    import skimage.io
    np.set_printoptions(threshold='nan')

    options = SETTINGS["imagenet"]
    IMAGE_FILE = "../cat.jpg"
    #IMAGE_FILE = "../fish.jpg"
    mean = np.zeros([3] + list(options['input_dims']))
    mean.fill(128.0)
    with open(
            "/home/haichen/datasets/imagenet/meta/2010/synset_words.txt") as f:
        words = f.readlines()
    words = map(lambda x: x.strip(), words)

    net = caffe.Classifier(options["model_file"],
                           options["pretrained"],
                           mean=mean,
                           input_scale=0.0078125,
                           image_dims=options["image_dims"])
    sys.stderr.write("model file: %s\n" % options["model_file"])
    sys.stderr.write("pretrained: %s\n" % options["pretrained"])

    caffe.set_phase_test()
    caffe.set_mode_gpu()
    #net.set_mode_cpu()

    with open(options["raw"]) as f:
        content = f.read()
        rawinput = content.strip(' ,\t\r\n').split(options["sep"])
        rawinput = map(lambda x: eval(x), rawinput)
    rawinput = np.asarray(rawinput).reshape([1, 3] + options['input_dims'])
    prediction = net.predict_raw(rawinput)
    return

    input_image = skimage.io.imread(IMAGE_FILE)
    prediction = net.predict([input_image], oversample=True)
    #prediction = net.forward_all(data=np.asarray([net.preprocess('data', input_image)]))
    #return
    label = prediction.argmax()
    #for i,v in enumerate(prediction[0]):
    #    print i, v
    print label
    print words[label]
    def __init__(self, model_def_file, pretrained_model_file, mean_file,
                 raw_scale, class_labels_file, image_dim, gpu_mode,
                 swap_colors_wtf, grey):
        logging.info('Loading net and associated files...')
        if swap_colors_wtf:
            print("swap_colors_wtf")
            swap = (2, 1, 0)
        else:
            print("OK, not swapping any colors")
            swap = False


#        model = "numbers_deploy.prototxt"
#        weights = "numbers_iter_47000.caffemodel"
        model = "syllables_deploy.prototxt"
        weights = "syllables_iter_27000.caffemodel"
        #        model = "words_deploy.prototxt"
        #        weights = "words_iter_1000.caffemodel"
        print "gray mode = %s" % grey
        print "model_def %s" % model
        print "model_file %s" % weights
        print "image_dims=(%d,%d)" % (int(image_dim), int(image_dim))
        print "raw_scale=%d" % int(raw_scale)
        print "mean=%s" % mean_file
        print "channel_swap=%s" % str(swap)
        print "gpu_mode %s" % gpu_mode

        # better do caffe.Classifier(...).predict by hand:
        self.net = caffe.Net(model, weights)
        #        help(self.net)
        caffe.set_phase_test()
        self.net.set_raw_scale('data', 255.0)
        caffe.set_mode_gpu()

        if class_labels_file:
            with open(class_labels_file) as f:
                labels_df = pd.DataFrame([{
                    'synset_id':
                    l.strip().split(' ')[0],
                    'name':
                    ' '.join(l.strip().split(' ')[1:]).split(',')[0]
                } for l in f.readlines()])
                self.labels = labels_df.sort('synset_id')['name'].values
Пример #18
0
def classifyImagesWithNewModel(jobPath, socketid, result_path):
    print jobPath, socketid, result_path
    try:
        ImagePath = os.path.join(jobPath,'test')
        modelPath = os.path.join(jobPath,'util')

        new_labels = sio.loadmat(os.path.join(modelPath,'new_labels.mat'))
        new_labels_cells = new_labels['WNID']

        # Set the right path to your model file, pretrained model,
        # and the image you would like to classify.
        MODEL_FILE = os.path.join(modelPath,'newCaffeModel.prototxt')
        PRETRAINED = os.path.join(modelPath,'newCaffeModel.caffemodel')

        caffe.set_phase_test()
        caffe.set_mode_gpu()

        net = caffe.Classifier(MODEL_FILE, PRETRAINED,
                        mean=np.load(os.path.join(CAFFE_DIR, 'python/caffe/imagenet/ilsvrc_2012_mean.npy')),
                        channel_swap=(2, 1, 0),
                        raw_scale=255,
                        image_dims=(256, 256))

        results = {}

        if os.path.isdir(ImagePath):
            for file_name in os.listdir(ImagePath):
                image_path = os.path.join(ImagePath, file_name)
                if os.path.isfile(image_path):

                    tags = caffe_classify_image(net, image_path, new_labels_cells)
                    log_to_terminal("Results: "+str(tags), socketid)
                    webResult = {}
                    webResult[os.path.join(result_path,file_name)] = tags

                    r.publish('chat',
                                   json.dumps({'web_result': json.dumps(webResult), 'socketid': str(socketid)}))

            log_to_terminal('Thank you for using CloudCV', socketid)

    except Exception as e:
        log_to_terminal(str(traceback.format_exc()), socketid)
Пример #19
0
def main_tlc():
    import caffe
    import numpy as np
    import skimage.io
    np.set_printoptions(threshold='nan')

    options = SETTINGS["imagenet"]
    IMAGE_FILE = "../cat.jpg"
    #IMAGE_FILE = "../fish.jpg"
    mean = np.zeros([3] + list(options['input_dims']))
    mean.fill(128.0)
    with open("/home/haichen/datasets/imagenet/meta/2010/synset_words.txt") as f:
        words = f.readlines()
    words = map(lambda x: x.strip(), words)
   
    net = caffe.Classifier(options["model_file"], options["pretrained"],
                           mean=mean, input_scale=0.0078125,
                           image_dims=options["image_dims"])
    sys.stderr.write("model file: %s\n" % options["model_file"])
    sys.stderr.write("pretrained: %s\n" % options["pretrained"])

    caffe.set_phase_test()
    caffe.set_mode_gpu()
    #net.set_mode_cpu()
   
    with open(options["raw"]) as f:
        content = f.read()
        rawinput = content.strip(' ,\t\r\n').split(options["sep"])
        rawinput = map(lambda x: eval(x), rawinput)
    rawinput = np.asarray(rawinput).reshape([1,3] + options['input_dims'])
    prediction = net.predict_raw(rawinput)
    return

    input_image = skimage.io.imread(IMAGE_FILE)
    prediction = net.predict([input_image], oversample=True)
    #prediction = net.forward_all(data=np.asarray([net.preprocess('data', input_image)]))
    #return
    label = prediction.argmax()
    #for i,v in enumerate(prediction[0]):
    #    print i, v
    print label
    print words[label]
Пример #20
0
	def load_net(self):
		caffe.set_phase_test()
		# input preprocessing: 'data' is the name of the input blob == net.inputs[0]

		#scores = net.predict([caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')])


		channel_swap=(2,1,0) if self.nethandle.channel_swap else None
		self.net=caffe.Classifier(str(self.nethandle.protopath),str(self.nethandle.modelpath),mean=np.load(str(self.nethandle.meanpath)),gpu=False,raw_scale=int(self.nethandle.raw_scale),channel_swap=channel_swap)
		#print str(self.nethandle.protopath),str(self.nethandle.modelpath),'mean=',np.load(str(self.nethandle.meanpath)),'gpu=',False,'raw_scale=',int(self.nethandle.raw_scale),'channel_swap=',channel_swap 
		#self.net=caffe.Classifier(str(self.nethandle.protopath),str(self.nethandle.modelpath),mean=np.load(str(self.nethandle.meanpath)),gpu=False,channel_swap=(2,1,0))
		print 
		f=open(self.outpath+'/'+self.name+'_paramslist.txt','w')
		self.paramslist=self.net.params.keys()
		f.write("\n".join(self.net.params.keys()))
		f.close()
		f=open(self.outpath+'/'+self.name+'_blobslist.txt','w')
		self.blobslist=self.net.blobs.keys()
		f.write("\n".join(self.net.blobs.keys()))
		f.close()
Пример #21
0
def load_sp_model(configfile):
    # load model
    config = ConfigParser.ConfigParser()
    config.read(configfile)

    model_name = config.get("net", "net")
    model_def = os.path.join(model_dir, "%s.prototxt" % model_name)
    pretrained = os.path.join(model_dir, "%s.caffemodel" % model_name)
    mean = config.get("net", "mean")
    dim = int(config.get("net", "image_dim"))
    raw_scale = int(config.get("net", "raw_scale"))
    layer_name = config.get("train", "target_layer")

    net = caffe.Classifier(model_def, pretrained, channel_swap=(2,1,0), mean=np.load(mean), raw_scale=raw_scale, image_dims=(dim, dim), batch=1)
    caffe.set_phase_test()
    caffe.set_mode_gpu()

    net2 = caffe.Net(config.get("train", "target_test"), config.get("train", "target_model"), 1)

    return net, net2 
    def __init__(self, model_def_file, pretrained_model_file, mean_file,
                 raw_scale, class_labels_file, image_dim, gpu_mode, swap_colors_wtf,grey):
        logging.info('Loading net and associated files...')
        if swap_colors_wtf:
            print("swap_colors_wtf")
            swap = (2, 1, 0)
        else:
            print("OK, not swapping any colors")
            swap = False
#        model = "numbers_deploy.prototxt"
#        weights = "numbers_iter_47000.caffemodel"
        model = "syllables_deploy.prototxt"
        weights = "syllables_iter_27000.caffemodel"
#        model = "words_deploy.prototxt"
#        weights = "words_iter_1000.caffemodel"
        print "gray mode = %s" % grey
        print "model_def %s" % model
        print "model_file %s" % weights
        print "image_dims=(%d,%d)" % (int(image_dim), int(image_dim))
        print "raw_scale=%d" % int(raw_scale)
        print "mean=%s" % mean_file
        print "channel_swap=%s" % str(swap)
        print "gpu_mode %s" % gpu_mode

        # better do caffe.Classifier(...).predict by hand:
        self.net = caffe.Net(model, weights)
#        help(self.net)
        caffe.set_phase_test()
        self.net.set_raw_scale('data', 255.0)
        caffe.set_mode_gpu()

        if class_labels_file:
            with open(class_labels_file) as f:
                labels_df = pd.DataFrame([
                    {
                        'synset_id': l.strip().split(' ')[0],
                        'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
                    }
                    for l in f.readlines()
                ])
                self.labels = labels_df.sort('synset_id')['name'].values
Пример #23
0
def save_filters(network_def, network_model, save_path):
    # print 'arg1', network_def
    # print 'arg2', network_model
    # print 'arg3', save_path

    #--->added by zhaoyafei 2017-05-09
    caffe.set_phase_test()
    caffe.set_mode_cpu()
    #--->end added by zhaoyafei 2017-05-09

    net = caffe.Classifier(network_def, network_model)
    #--->commented by zhaoyafei 2017-05-09
#    net.set_phase_test()
#    net.set_mode_cpu()
    #--->end commented by zhaoyafei 2017-05-09

    '''
    net.set_mean('data', None)
    net.set_channel_swap('data', (2,1,0))
    net.set_input_scale('data', 256)

    data_shape = net.blobs['data'].data.shape[1:]
    print data_shape
    dummy_data = np.zeros(data_shape, dtype='float32')
    scores = net.predict([dummy_data], oversample=False)
    blobs = OrderedDict( [(k, v.data) for k, v in net.blobs.items()])
    '''
    params = []
    for k, v in net.params.items():
        print k, type(v), len(v)

        vlist = [vt.data for vt in v]
        params.append((k, vlist))

    # exit()
    # params = [(k, v) for k, v in net.params.items()]
    dc = dict(params)
    sio.savemat(save_path, dc)

    return
Пример #24
0
def batch_predict(configfile, targets):

    config = ConfigParser.ConfigParser()
    config.read(configfile)

    model_name = config.get("net", "net")
    model_def = os.path.join(model_dir, "%s.prototxt" % model_name)
    pretrained = os.path.join(model_dir, "%s.caffemodel" % model_name)
    mean = config.get("net", "mean")
    dim = int(config.get("net", "image_dim"))
    raw_scale = int(config.get("net", "raw_scale"))

    net = caffe.Classifier(model_def, pretrained, channel_swap=(2,1,0), mean=np.load(mean), raw_scale=raw_scale, image_dims=(dim, dim), batch=1)
    caffe.set_phase_test()
    caffe.set_mode_gpu()

    net2 = caffe.Net(config.get("train", "target_test"), config.get("train", "target_model"), 1)
    with open(config.get("train", "target_list"), "rb") as f:
        reverse_index = pickle.load(f)

    layer_name = config.get("train", "target_layer")
   
    count = 0
    exec_times = []
    prepare_times = []
    for cur_idx, image_path in enumerate(targets):
        im = caffe.io.load_image(image_path)
        tic = time.time() 
        prepared = face_input_prepare_n(net, [im], False)
        out = net.forward(end=layer_name, **{net.inputs[0]: prepared})
        out2 = net2.forward_all(**{net2.inputs[0]: out[layer_name]})[net2.outputs[0]]
        toc = time.time()
        exec_times += [(toc - tic)]
        i = out2[0].argmax()
        if i == len(reverse_index):
            print -1
        else:
            print reverse_index[i]

    print 'Execution time(ms) mean:', np.mean(exec_times),'std:', np.std(exec_times, ddof = 1)
Пример #25
0
    def __init__(self,
                 model_file,
                 pretrained_file,
                 image_dims=None,
                 gpu=False,
                 mean=None,
                 input_scale=None,
                 raw_scale=None,
                 channel_swap=None):
        """
        Take
        image_dims: dimensions to scale input for cropping/sampling.
            Default is to scale to net input size for whole-image crop.
        gpu, mean, input_scale, raw_scale, channel_swap: params for
            preprocessing options.
        """
        caffe.Net.__init__(self, model_file, pretrained_file)
        caffe.set_phase_test()

        if gpu:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        if mean is not None:
            self.set_mean(self.inputs[0], mean)
        if input_scale is not None:
            self.set_input_scale(self.inputs[0], input_scale)
        if raw_scale is not None:
            self.set_raw_scale(self.inputs[0], raw_scale)
        if channel_swap is not None:
            self.set_channel_swap(self.inputs[0], channel_swap)

        self.crop_dims = np.array(self.blobs[self.inputs[0]].data.shape[2:])
        if not image_dims:
            image_dims = self.crop_dims
        self.image_dims = image_dims
Пример #26
0
    def load_net(self):
        caffe.set_phase_test()
        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]

        #scores = net.predict([caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')])

        channel_swap = (2, 1, 0) if self.nethandle.channel_swap else None
        self.net = caffe.Classifier(str(self.nethandle.protopath),
                                    str(self.nethandle.modelpath),
                                    mean=np.load(str(self.nethandle.meanpath)),
                                    gpu=False,
                                    raw_scale=int(self.nethandle.raw_scale),
                                    channel_swap=channel_swap)
        #print str(self.nethandle.protopath),str(self.nethandle.modelpath),'mean=',np.load(str(self.nethandle.meanpath)),'gpu=',False,'raw_scale=',int(self.nethandle.raw_scale),'channel_swap=',channel_swap
        #self.net=caffe.Classifier(str(self.nethandle.protopath),str(self.nethandle.modelpath),mean=np.load(str(self.nethandle.meanpath)),gpu=False,channel_swap=(2,1,0))
        print
        f = open(self.outpath + '/' + self.name + '_paramslist.txt', 'w')
        self.paramslist = self.net.params.keys()
        f.write("\n".join(self.net.params.keys()))
        f.close()
        f = open(self.outpath + '/' + self.name + '_blobslist.txt', 'w')
        self.blobslist = self.net.blobs.keys()
        f.write("\n".join(self.net.blobs.keys()))
        f.close()
Пример #27
0
def modelUpdate(jobPath):
    start = time.clock()
    caffe_root = '/home/ubuntu/caffe/'
    train_path = os.path.join(jobPath, 'train')
    model_path = os.path.join(jobPath, 'util')
    dirs = [
        os.path.join(train_path, d) for d in os.listdir(train_path)
        if os.path.isdir(os.path.join(train_path, d))
    ]
    new_labels = np.array(os.listdir(train_path), dtype=object)
    sio.savemat(os.path.join(model_path, 'new_labels.mat'),
                {'WNID': new_labels})
    num_labels = len(dirs)

    s = open(
        os.path.join(caffe_root,
                     'models/bvlc_reference_caffenet/deploy.prototxt')).read()
    s = s.replace('1000', '%d' % (1000 + num_labels))
    s = s.replace('fc8', 'fc8-new')
    f = open(os.path.join(model_path, 'newCaffeModel.prototxt'), 'w')
    f.write(s)
    f.close()
    print 'Time when prototxt is prepared is ', time.clock() - start

    net = caffe.Classifier(
        caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',
        caffe_root +
        'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
    #net = caffe.Classifier(caffe_root+'models/bvlc_alexnet/deploy.prototxt', caffe_root+'models/bvlc_alexnet/bvlc_alexnet.caffemodel')
    net_new = caffe.Classifier(
        os.path.join(model_path, 'newCaffeModel.prototxt'), caffe_root +
        'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
    net_new.set_mean(
        'data',
        np.load(caffe_root +
                'python/caffe/imagenet/ilsvrc_2012_mean.npy'))  # ImageNet mean
    net_new.set_raw_scale('data', 255)
    net_new.set_channel_swap('data', (2, 1, 0))
    #net_new.set_phase_test()
    #net_new.set_mode_gpu()
    pr = 'fc8'
    pr_new = 'fc8-new'
    fc_params = (net.params[pr][0].data, net.params[pr][1].data)
    fc_params_new = (net_new.params[pr_new][0].data,
                     net_new.params[pr_new][1].data)
    net.set_mean(
        'data',
        np.load(caffe_root +
                'python/caffe/imagenet/ilsvrc_2012_mean.npy'))  # ImageNet mean
    net.set_raw_scale('data', 255)
    net.set_channel_swap('data', (2, 1, 0))
    #net.set_phase_test()
    #net.set_mode_gpu()
    caffe.set_mode_gpu()
    caffe.set_phase_test()
    print '{} weights are {} dimensional and biases are {} dimensional'.format(
        pr_new, fc_params_new[0].shape, fc_params_new[1].shape)
    fc_params_new[1][:, :, :, :1000] = fc_params[1]
    fc_params_new[0][:, :, :1000, :] = fc_params[0]
    print 'max existing bias = ', max(fc_params_new[1][0, 0, 0, :])
    print 'min existing bias = ', min(fc_params_new[1][0, 0, 0, :])
    print 'max existing weight =', (fc_params_new[0][0, 0, :, :].max())
    print 'min existing weight =', (fc_params_new[0][0, 0, :, :].min())
    num_cores = multiprocessing.cpu_count()
    results = Parallel(n_jobs=num_cores)(delayed(trainaclass)(dirs[label])
                                         for label in range(num_labels))

    for label in range(num_labels):
        w0_new = results[label][0]
        print 'new bias is ', w0_new
        print 'min weight = ', (results[label][1].min())
        print 'max weight = ', (results[label][1].max())
        fc_params_new[1][0, 0, 0, 1000 + label] = w0_new
        fc_params_new[0][0, 0,
                         1000 + label, :] = results[label][1].transpose()[0, :]

    net_new.save(os.path.join(model_path, 'newCaffeModel.caffemodel'))
Пример #28
0
def classifynum(correctphone, bmpnum):
    MODEL_FILE = '/home/songqing/dl/caffe/examples/mnist/lenet.prototxt'
    PRETRAINED = '/home/songqing/dl/caffe/examples/mnist/zsq11_lenet_iter_10000.caffemodel'
    piccount = 0
    iternum = 0
    allnumlist = []
    allratelist = []
    finallist = []
    poslist = []
    ratelist = []
    outfile = open('smallpic.txt', 'a+')
    outfile.write('phone ' + str(bmpnum) + '\n')
    outfile.write(str(correctphone) + '\n')
    for iternum in range(1, 140):
        #print iternum
        counttemp = piccount
        piccount = piccount + 1
        filename = '0'
        if counttemp < 10:
            filename = filename + '00' + str(counttemp)
        elif counttemp < 100:
            filename = filename + '0' + str(counttemp)
        else:
            filename = filename + str(counttemp)

        IMAGE_FILE = '/home/songqing/dl/dl/phone/phonenum_recognition/BMP7/version2/testpic/pic' + filename + '.bmp'
        if (os.path.isfile(IMAGE_FILE) == False):
            continue
        caffe.set_phase_test()
        caffe.set_mode_cpu()
        net = caffe.Classifier(MODEL_FILE, PRETRAINED)
        #                       mean=np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'),
        #							channel_swap=(2,1,0),
        #                      raw_scale=28,
        #                       image_dims=(28, 28))
        input_image = caffe.io.load_image(IMAGE_FILE, color=False)
        #plt.imshow(input_image)
        prediction = net.predict([input_image], oversample=False)
        nummax = prediction[0].argmax()
        #print 'prediction shape:', prediction[0].shape
        #plt.plot(prediction[0])
        #print 'predicted class:', prediction[0].argmax()
        #print 'predicted class:', prediction[0][nummax]
        #print 'predicted class:', prediction[0]

        # if rate is small, put 0 to it's value
        if (prediction[0][nummax] < 0.98 and nummax != 1):
            if (nummax == 9 or nummax == 6):
                if (prediction[0][nummax] < 0.88):
                    prediction[0][nummax] = 0
            else:
                prediction[0][nummax] = 0
        if (prediction[0][nummax] < 0.993 and nummax == 1):
            prediction[0][nummax] = 0
        if (prediction[0][nummax] == 0):
            """
			if(nummax==1 and iternum>10 and nummax==allnumlist[-1] and allnumlist[-1]==allnumlist[-2] and allnumlist[-2]==allnumlist[-3] and allnumlist[-3]==allnumlist[-4]):
				lianxugeshu=1#many same num but rate is not not high
				lianxupos=0
				lianxurate=0
				lianxufinal=allnumlist[-1]
				for lianxu in range(-1,poslist[-1]-iternum,-1):
					if(lianxufinal==allnumlist[lianxu]):
						lianxugeshu=lianxugeshu+1
						if(allratelist[lianxu]>lianxurate):
							lianxurate=allratelist[lianxu]
							lianxupos=iternum+lianxu
					else:
						break
				if(lianxugeshu>5 and lianxurate>0):
					finallist.append(lianxufinal)
					ratelist.append(lianxurate)
					poslist.append(lianxupos)
				outfile.write(str(nummax)+' '+str(prediction[0][nummax])+'\n')
				allnumlist.append(nummax)
				allratelist.append(prediction[0][nummax])
				continue
			else:
				"""
            outfile.write(
                str(nummax) + ' ' + str(prediction[0][nummax]) + '\n')
            allnumlist.append(nummax)
            allratelist.append(prediction[0][nummax])
            continue
#in a range , get the max value
        if ((len(finallist) == 0)):
            if (nummax == 1):  # the first num is 1
                finallist.append(nummax)
                ratelist.append(prediction[0][nummax])
                poslist.append(iternum)
        elif (iternum - poslist[-1] <= 2):
            if (prediction[0][nummax] > ratelist[-1]):
                if (len(finallist) > 1):
                    if (iternum - poslist[-2] < 14):
                        if (nummax == 1):
                            if (prediction[0][nummax] > 0.999):
                                finallist[-1] = nummax
                                ratelist[-1] = prediction[0][nummax]
                                poslist[-1] = iternum
                        else:
                            finallist[-1] = nummax
                            ratelist[-1] = prediction[0][nummax]
                            poslist[-1] = iternum
                else:
                    finallist[-1] = nummax
                    ratelist[-1] = prediction[0][nummax]
                    poslist[-1] = iternum
#put into the list OR update, limit operation
        elif (iternum - poslist[-1] == 3):  #2.26
            if (nummax == 0 and prediction[0][nummax] > 0.998
                    and finallist[-1] != 1):
                finallist.append(nummax)
                ratelist.append(prediction[0][nummax])
                poslist.append(iternum)
            elif (finallist[-1] == 1 and nummax == 0 and ratelist[-1] < 0.999
                  and prediction[0][nummax] > 0.998):
                finallist[-1] = nummax
                ratelist[-1] = prediction[0][nummax]
                poslist[-1] = iternum
            elif (nummax == finallist[-1] and finallist[-1] == finallist[-2]
                  and prediction[0][nummax] > ratelist[-1]):
                finallist[-1] = nummax
                ratelist[-1] = prediction[0][nummax]
                poslist[-1] = iternum
        elif (iternum - poslist[-1] <= 5 and iternum - poslist[-1] >= 4):
            if (len(finallist) == 1):  #add not update
                if (nummax == 1):
                    if (prediction[0][nummax] > 0.999):
                        finallist.append(nummax)
                        ratelist.append(prediction[0][nummax])
                        poslist.append(iternum)
                elif (prediction[0][nummax] > 0.99):
                    finallist.append(nummax)
                    ratelist.append(prediction[0][nummax])
                    poslist.append(iternum)
            else:
                if (ratelist[-1] > 0.999
                        and len(finallist) < 11):  # last rate is too high, add
                    if (nummax == finallist[-1]
                            and finallist[-1] == finallist[-2]):
                        if (nummax != 1 and iternum - poslist[-2] > 14):
                            finallist.append(nummax)
                            ratelist.append(prediction[0][nummax])
                            poslist.append(iternum)
                    elif (nummax == 1):
                        if (finallist[-1] != 0 and finallist[-1] != 3):
                            if (prediction[0][nummax] > 0.9999
                                    and iternum - poslist[-2] > 14):
                                finallist.append(nummax)
                                ratelist.append(prediction[0][nummax])
                                poslist.append(iternum)
                    elif (finallist[-1] == 5 and nummax == 7):
                        if (prediction[0][nummax] > 0.993):
                            finallist.append(nummax)
                            ratelist.append(prediction[0][nummax])
                            poslist.append(iternum)
                    elif (prediction[0][nummax] > 0.99):
                        finallist.append(nummax)
                        ratelist.append(prediction[0][nummax])
                        poslist.append(iternum)
                elif (ratelist[-1] > 0.98 and nummax == 9
                      and len(finallist) < 11):  # 9 rate is lower, add
                    if (nummax == finallist[-1]
                            and finallist[-1] == finallist[-2]):
                        if (nummax != 1 and iternum - poslist[-2] > 14):
                            finallist.append(nummax)
                            ratelist.append(prediction[0][nummax])
                            poslist.append(iternum)
                    elif (prediction[0][nummax] > 0.99):
                        finallist.append(nummax)
                        ratelist.append(prediction[0][nummax])
                        poslist.append(iternum)
                elif (ratelist[-1] > 0.998 and prediction[0][nummax] > 0.999
                      and nummax != 1 and
                      len(finallist) < 11):  # cunrent rate is too high, add
                    finallist.append(nummax)
                    ratelist.append(prediction[0][nummax])
                    poslist.append(iternum)
                elif (finallist[-1] == 1 and nummax == 6
                      and ratelist[-1] < 0.998
                      and prediction[0][nummax] > 0.99):  # 1 afer 6, update
                    if (iternum - poslist[-2] < 14):
                        finallist[-1] = nummax
                        ratelist[-1] = prediction[0][nummax]
                        poslist[-1] = iternum
                elif (prediction[0][nummax] > ratelist[-1]):  #update
                    if (iternum - poslist[-2] < 14):
                        if (nummax == 1):
                            if (finallist[-1] == 0):
                                if (prediction[0][nummax] > 0.9999):
                                    finallist[-1] = nummax
                                    ratelist[-1] = prediction[0][nummax]
                                    poslist[-1] = iternum
                                elif (finallist[-2] == 1
                                      and prediction[0][nummax] > 0.999
                                      and ratelist[-1] < 0.993):
                                    finallist[-1] = nummax
                                    ratelist[-1] = prediction[0][nummax]
                                    poslist[-1] = iternum
                            elif (prediction[0][nummax] > 0.999):
                                finallist[-1] = nummax
                                ratelist[-1] = prediction[0][nummax]
                                poslist[-1] = iternum
                        else:
                            finallist[-1] = nummax
                            ratelist[-1] = prediction[0][nummax]
                            poslist[-1] = iternum

#direct put into the list
        elif ((iternum - poslist[-1]) > 5 and len(finallist) < 11):
            if (len(finallist) <= 2):
                if (nummax == 1):
                    if (prediction[0][nummax] > 0.993):
                        finallist.append(nummax)
                        ratelist.append(prediction[0][nummax])
                        poslist.append(iternum)
                else:
                    finallist.append(nummax)
                    ratelist.append(prediction[0][nummax])
                    poslist.append(iternum)
            else:  # 2 same num might detect as 3 nums
                if (nummax == finallist[-1]
                        and finallist[-1] == finallist[-2]):
                    if (nummax != 1 and iternum - poslist[-2] > 14):
                        finallist.append(nummax)
                        ratelist.append(prediction[0][nummax])
                        poslist.append(iternum)
                    elif (nummax == 1 and iternum - poslist[-2] > 11):
                        finallist.append(nummax)
                        ratelist.append(prediction[0][nummax])
                        poslist.append(iternum)
                else:
                    if (nummax == 1):
                        if (finallist[-1] == 0):
                            if (iternum - poslist[-1] > 6
                                    and prediction[0][nummax] > 0.996):
                                finallist.append(nummax)
                                ratelist.append(prediction[0][nummax])
                                poslist.append(iternum)
                        elif (prediction[0][nummax] > 0.993):
                            finallist.append(nummax)
                            ratelist.append(prediction[0][nummax])
                            poslist.append(iternum)
                    else:
                        finallist.append(nummax)
                        ratelist.append(prediction[0][nummax])
                        poslist.append(iternum)

        outfile.write(str(nummax) + ' ' + str(prediction[0][nummax]) + '\n')
        allnumlist.append(nummax)
        allratelist.append(prediction[0][nummax])

        #%timeit net.predict([input_image])

        # Resize the image to the standard (256, 256) and oversample net input sized crops.
        input_oversampled = caffe.io.oversample(
            [caffe.io.resize_image(input_image, net.image_dims)],
            net.crop_dims)
        # 'data' is the input blob name in the model definition, so we preprocess for that input.
        caffe_input = np.asarray(
            [net.preprocess('data', in_) for in_ in input_oversampled])
# forward() takes keyword args for the input blobs with preprocessed input arrays.
#%timeit net.forward(data=caffe_input)
#	print finallist
#	print ratelist
#	print poslist
#	outfile.close()
    outfile.write(str(finallist) + '\n')
    outfile.write(str(ratelist) + '\n')
    outfile.write(str(poslist) + '\n')
    outfile.close()
    str_out = ''
    n_for = 0
    while (n_for < len(finallist)):
        str_out += str(finallist[n_for])
        n_for = n_for + 1


#	print str_out
    return str(str_out)
Пример #29
0
        reader = csv.reader(f, delimiter=' ')
        for row in reader:
            names.append(
                row[0].split("//")[1]
            )  #This file contains 130'000 lines like: /home/dueo/data_kaggel_bowl/test//1.jpg 42

    # Getting the header
    fc = csv.reader(file(sampleSub))
    fst = fc.next()
    # Opening the submission file
    fout = open(submissionName, 'w')
    w = csv.writer(fout)
    w.writerow(fst)

    caffe.set_mode_gpu()
    caffe.set_phase_test()
    #caffe.set_phase_train() #TODO delete
    # Taken from: https://github.com/BVLC/caffe/issues/1774
    net = caffe.Net(description, learnedModel)
    read = 0
    while (read < len(names)):
        start = time.time()
        res = net.forward(
        )  # this will load the next mini-batch as defined in the net (rewinds)
        print("Time for getting the batch " + str((time.time() - start)) +
              " " + str(read))
        preds = net.blobs[predLayerName].data
        batchSize = np.shape(preds)[0]
        for i in range(0, batchSize):
            #pred = np.reshape(preds[i],1000)[0:121] #Todo change to 121
            pred = np.reshape(preds[i], 121)
Пример #30
0
def decafImages(src_path, output_path, socketid, result_path, single_file_name='', modelname=''):
    if modelname is not '':
        lMODEL_FILE = str(os.path.join(conf.CAFFE_DIR, 'models',modelname,'deploy.prototxt'))
        lPRETRAINED = str(os.path.join(conf.CAFFE_DIR, 'models', modelname, modelname+'.caffemodel'))
        r.publish('chat', json.dumps({'error': lMODEL_FILE+'   '+lPRETRAINED, 'socketid': socketid}))
        caffe.set_phase_test()
        caffe.set_mode_cpu()
        modelnet = caffe.Classifier(lMODEL_FILE, lPRETRAINED)
        #r.publish('chat', json.dumps({'error': str(modelname), 'socketid': socketid}))

    else:
        modelnet = None

    try:
        #Entire Directory
        if os.path.isdir(os.path.join(src_path,single_file_name)):

            for file_name in os.listdir(src_path):
                tags = {}
                image_path = os.path.join(src_path, file_name)
                if os.path.isfile(image_path):

                    """ Trying to get the output of classify python script to send to user - Part 1/4
                    myPrint = CustomPrint(socketid)
                    old_stdout=sys.stdout
                    sys.stdout = myPrint
                    """
                    print 'Running caffe classify on multiple images'

                    mat_file_path = decaf.calculate_decaf_image(file_name, src_path, output_path, 3, socketid, tags, modelname, modelnet)
                    print tags
                    """ Part 2/2
                    sys.stdout=old_stdout
                    """
                    log_to_terminal("Results: "+str(tags), socketid)
                    # sorted_tags = sorted(tags.iteritems(), key=operator.itemgetter(1), reverse=True)

                    # webResult = {}
                    #webResult[str(result_path + file_name)] = sorted_tags
                    result_url = urlparse(result_path).path
                    r.publish('chat',
                                   json.dumps({'web_result': os.path.join(result_url, 'results', file_name+'.mat'), 'socketid': str(socketid)}))

            log_to_terminal('Thank you for using CloudCV', socketid)
        # Single File
        else:
            """ Part 3/4
            myPrint = CustomPrint(socketid)
            old_stdout=sys.stdout
            sys.stdout = myPrint
            """
            tags = {}
            print 'Running caffe classify on a single image: ' + single_file_name
	    try:
            	mat_file_path = decaf.calculate_decaf_image(single_file_name, src_path, output_path, 3, socketid, tags, modelname, modelnet)
            except Exception as e:
		print str(e)
	    """ Part 4/4
            sys.stdout=old_stdout
            """
            log_to_terminal("Results: "+str(tags), socketid)

            # tags = sorted(tags.iteritems(), key=operator.itemgetter(1), reverse=True)
            # web_result = {}
            # web_result[str(result_path)] = tags
            result_url = os.path.dirname(urlparse(result_path).path)
            r.publish('chat', json.dumps({'web_result': os.path.join(result_url, 'results', single_file_name+'.mat'), 'socketid': str(socketid)}))

            log_to_terminal('Thank you for using CloudCV', socketid)

    except Exception as e:
        log_to_terminal(str(traceback.format_exc()), socketid)
Пример #31
0
def main():
    args = get_args()

    caffe.set_phase_test()
    if args.mode == "cpu":
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()

    if not os.path.isfile(args.model):
        if not args.solver:
            print "Error: Model does not exist. No solver specified."
            sys.exit(1)

        print "Warning: model %s does not exist. Creating..."
        solver = SGDSolver(args.solver)
        solver.net.save(args.model)

    # Initialize objects
    net = BaristaNet(args.architecture,
                     args.model,
                     args.driver,
                     reset_log=True)

    replay_dataset = ReplayDataset(args.dataset,
                                   net.state[0].shape,
                                   dset_size=args.dset_size,
                                   overwrite=args.overwrite)
    net.add_dataset(replay_dataset)

    game = SnakeGame()
    preprocessor = generate_preprocessor(net.state.shape[2:], gray_scale)
    exp_gain = ExpGain(net, ['w', 'a', 's', 'd'], preprocessor, game.cpu_play,
                       replay_dataset, game.encode_state())

    if (args.overwrite):
        for _ in xrange(min(args.initial_replay, args.dset_size)):
            exp_gain.generate_experience(0)

    # Start server loop
    serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    serversocket.bind(('127.0.0.1', args.port))
    serversocket.listen(5)

    print
    print "*" * 80
    print "* Starting BARISTA server: listening on port %d." % args.port
    print "*" * 80
    # Signal Spark Executor that Barista is ready to receive connections
    issue_ready_signal(args.port)
    while True:
        (clientsocket, address) = serversocket.accept()
        if args.debug:
            handler = debug_process_connection
        else:
            handler = process_connection

        client_thread = threading.Thread(target=handler,
                                         args=(clientsocket, net, exp_gain))
        client_thread.run()
Пример #32
0
    def predict_sentence(self, image):
        try:

            ################ FEATURE EXTRACTION ##############
	    
            cnn_model_def = self.cnn_model_def
            cnn_model_params = self.cnn_model_params
            rnn_model = self.rnn_model
	    

            def predict(in_data, net):
                """
                Get the features for a batch of data using network

                Inputs:
                in_data: data batch
                """

                out = net.forward(**{net.inputs[0]: in_data})
                features = out[net.outputs[0]].squeeze(axis=(2,3))
                return features


            def batch_predict(filenames, net):
                """
                Get the features for all images from filenames using a network

                Inputs:
                filenames: a list of names of image files

                Returns:
                an array of feature vectors for the images in that file
                """
		        IMAGE_PATH = '/tmp/captionly_demo_uploads'

                N, C, H, W = net.blobs[net.inputs[0]].data.shape
                F = net.blobs[net.outputs[0]].data.shape[1]
                Nf = len(filenames)
                Hi, Wi, _ = imread(IMAGE_PATH + '/' + filenames[0]).shape
                allftrs = np.zeros((Nf, F))
                for i in range(0, Nf, N):
                    in_data = np.zeros((N, C, H, W), dtype=np.float32)

                    batch_range = range(i, min(i+N, Nf))
                    batch_filenames = [filenames[j] for j in batch_range]
                    Nb = len(batch_range)

                    batch_images = np.zeros((Nb, 3, H, W))
                    for j,fname in enumerate(batch_filenames):
                        im = imread(IMAGE_PATH + '/' + fname)
                        if len(im.shape) == 2:
                            im = np.tile(im[:,:,np.newaxis], (1,1,3))
                        # RGB -> BGR
                        im = im[:,:,(2,1,0)]
                        # mean subtraction
                        im = im - np.array([103.939, 116.779, 123.68])
                        # resize
                        im = imresize(im, (H, W))
                        # get channel in correct dimension
                        im = np.transpose(im, (2, 0, 1))
                        batch_images[j,:,:,:] = im

                    # insert into correct place
                    in_data[0:len(batch_range), :, :, :] = batch_images

                    # predict features
                    ftrs = predict(in_data, net)

                    for j in range(len(batch_range)):
                        allftrs[i+j,:] = ftrs[j,:]

                        print 'Done %d/%d files' % (i+len(batch_range), len(filenames))

                        return allftrs

            if self.gpu_mode:
                caffe.set_mode_gpu()
            else:   
                caffe.set_mode_cpu()

            net = caffe.Net(cnn_model_def, cnn_model_params)
            caffe.set_phase_test()
            """
            filenames = []
            with open(args.files) as fp:
                for line in fp:
                    filename = line.strip().split()[0]
                    filenames.append(filename)
            """
            filenames = ['2015-05-17_17:28:44.2513807EGRMwN.jpg']
            allftrs = batch_predict(filenames, net)

            # # store the features in a pickle file
            # with open(args.out, 'w') as fp:
            #     pickle.dump(allftrs, fp)

            # save to mat file 
            print "Saving file to vgg_feats.mat..."
            io.savemat(UPLOAD_FOLDER + '/vgg_feats',{'feats':allftrs.T})

            #################### PREDICTION ##################

            dim = 300
            # load the checkpoint
            checkpoint_path = rnn_model
            # load glove vect dict

            glove_dict_path = '../../vecDict.pickle'
            with open(glove_dict_path, 'rb') as handle:
                vec_dict = pickle.load(handle)

                print 'loading checkpoint %s' % (checkpoint_path, )
                checkpoint = pickle.load(open(checkpoint_path, 'rb'))
                checkpoint_params = checkpoint['params']
                dataset = checkpoint_params['dataset']
                model = checkpoint['model']
                misc = {}
                misc['wordtoix'] = checkpoint['wordtoix']
                ixtoword = checkpoint['ixtoword']

            # output blob which we will dump to JSON for visualizing the results
            blob = {} 
            #blob['params'] = params
            blob['checkpoint_params'] = checkpoint_params
            blob['imgblobs'] = []

            # create and load the tasks.txt file
            # root_path = params['root_path']
            allImages = os.listdir(UPLOAD_FOLDER)
            with open(os.path.join(UPLOAD_FOLDER, 'tasks.txt'), 'w') as f:
                for k, v in enumerate(allImages):
                    if k==len(allImages)-1: 
                        f.write(v)
                    else: 
                        f.write(v + '\n')


            # load the features for all images
            features_path = os.path.join(root_path, 'vgg_feats.mat')
            features_struct = scipy.io.loadmat(features_path)
            features = features_struct['feats'] # this is a 4096 x N numpy array of features
            D,N = features.shape

            fileNameToVector = {}
            # iterate over all images and predict sentences
            BatchGenerator = decodeGenerator(checkpoint_params)
            for n in xrange(N):
                print 'image %d/%d:' % (n, N)

                # encode the image
                img = {}
                img['feat'] = features[:, n]
                img['local_file_path'] =img_names[n]

                # perform the work. heavy lifting happens inside
                kwparams = { 'beam_size' : params['beam_size'] }
                Ys = BatchGenerator.predict([{'image':img}], model, checkpoint_params, **kwparams)

                # build up the output
                img_blob = {}
                img_blob['img_path'] = img['local_file_path']

                # encode the top prediction
                top_predictions = Ys[0] # take predictions for the first (and only) image we passed in
                top_prediction = top_predictions[0] # these are sorted with highest on top
                candidate = ' '.join([ixtoword[ix] for ix in top_prediction[1] if ix > 0]) # ix 0 is the END token, skip that
                print 'PRED: (%f) %s' % (top_prediction[0], candidate)

                currSentenceVector = np.zeros(dim)
                numWords = 0
                for word in candidate.split():
                  if word in vec_dict:
                    currSentenceVector += vec_dict[word].astype(np.float)
                    numWords += 1
                currSentenceVector /= numWords
                fileNameToVector[img['local_file_path']] = currSentenceVector

                img_blob['candidate'] = {'text': candidate, 'logprob': top_prediction[0]}    
                blob['imgblobs'].append(img_blob)

            # dump result struct to file
            save_file = os.path.join(root_path, 'result_struct.json')
            print 'writing predictions to %s...' % (save_file, )
            json.dump(blob, open(save_file, 'w'))

            # dump the fileNameToVector mapping to a pickle file
            with open('fileNameToVector.pickle', 'wb') as handle:
            pickle.dump(fileNameToVector, handle)

            # dump output html
            html = ''
            for img in blob['imgblobs']:
                html += '<img src="%s" height="400"><br>' % (img['img_path'], )
                html += '(%f) %s <br><br>' % (img['candidate']['logprob'], img['candidate']['text'])
            html_file = os.path.join(root_path, 'result.html')
            print 'writing html result file to %s...' % (html_file, )
            open(html_file, 'w').write(html)

            return render_template("result.html", title = 'Results')

            # return (True, meta, result, '%.3f' % (endtime - starttime))


            #img_names = open(os.path.join(root_path, 'tasks.txt'), 'r').read().splitlines()

            # starttime = time.time()
            # scores = self.net.predict([image], oversample=True).flatten()
            # endtime = time.time()

            # indices = (-scores).argsort()[:5]
            # predictions = self.labels[indices]

            # # In addition to the prediction text, we will also produce
            # # the length for the progress bar visualization.
            # meta = [
            #     (p, '%.5f' % scores[i])
            #     for i, p in zip(indices, predictions)
            # ]
            # logging.info('result: %s', str(meta))

            # # Compute expected information gain
            # expected_infogain = np.dot(
            #     self.bet['probmat'], scores[self.bet['idmapping']])
            # expected_infogain *= self.bet['infogain']

            # # sort the scores
            # infogain_sort = expected_infogain.argsort()[::-1]
            # bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
            #               for v in infogain_sort[:5]]
            # logging.info('bet result: %s', str(bet_result))

            # return (True, meta, bet_result, '%.3f' % (endtime - starttime))

        except Exception as err:
            logging.info('Classification error: %s', err)
            return (False, 'Something went wrong when classifying the '
                           'image. Maybe try another one?')
Пример #33
0
def generate_answer(
        net, 
        curr_input_image, curr_cont_input, curr_input_sentence, curr_target_sentence,
        index2word_answer, attempt):
    """
    In:
        net - network
        curr_cont_input - indicator pointing out if there is a sentence or not
        curr_input_sentence - current sentence
        curr_target_sentence - target sentence
        index2word_answer - mapping from the word index into a textual word
        attempt - number of generate_answer invocations
            generate_answer resets the LSTM network only if attempt==0 
            and EOS is generated
    Out:
        machine_answer_sequence - answer as a sentence
        machine_answer_index_sequence - answer as an index
    """
    with Timer('Forward pass'):
        net.forward(
                data=np.asarray([curr_input_image]),
                cont_sentence=curr_cont_input,
                input_sentence=curr_input_sentence,
                target_sentence=curr_target_sentence)
    out = {'predict': net.blobs['predict'].data.copy()}

    machine_answer_sequence = probs_to_sentence(out['predict'], 
            vocab_target_list, keep_list=True)
    machine_answer_index_sequence = probs_to_index_sequence(out['predict'])

    if machine_answer_sequence[answer_start_pos] == EOS and attempt == 0:
        print 'HACK: Oh no, something wrong, restart the model and do prediction again'
        net = caffe.Net(lstm_model_path, lstm_weights_path)
        caffe.set_phase_test()
        if device_id >=0:
            caffe.set_mode_gpu()
            caffe.set_device(device_id)
        else:
            caffe.set_mode_cpu()

        if CAFFE_VERSION == 'dev':
            # VERSION: caffe dev
            net = caffe.Net(lstm_model_path, lstm_weights_path)
            net.set_mean('data', np.load(mean_path), mode='channel')
            net.set_channel_swap('data', (2,1,0))
        elif CAFFE_VERSION == 'release':
            # VERSION: caffe release
            net = caffe.Net(lstm_model_path, lstm_weights_path, caffe.TEST)
            transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
            transformer.set_transpose('data', (2,0,1))
            transformer.set_channel_swap('data', (2,1,0))
            transformer.set_raw_scale('data', 255.0)
            #transformer.set_mean('data', current_mean)
            transformer.set_mean('data', np.load(mean_path).mean(1).mean(1))
        else:
            sys.exit(1)

        # run forward pass
        net.forward(
                data=curr_input_image,
                cont_sentence=curr_cont_input,
                input_sentence=curr_input_sentence,
                target_sentence=curr_target_sentence)
        out = {'predict': net.blobs['predict'].data.copy()}
        machine_answer_sequence = probs_to_sentence(out['predict'], 
                vocab_target_list, keep_list=True)
        machine_answer_index_sequence = probs_to_index_sequence(out['predict'])


    return machine_answer_sequence, machine_answer_index_sequence
Пример #34
0
import sys
import h5py
import caffe
import time
import numpy as np

# Usage: python predict.py foo.net foo.caffemodel iters y.h5
# Don't know how to feed in x here as it is hardcoded in the hdf5 layer

caffe.set_phase_test()
caffe.set_mode_gpu()
net = caffe.Net(sys.argv[1], sys.argv[2])
iters = int(sys.argv[3])
ydims = net.blobs["fc2"].data.shape
batch = ydims[0]
ydims = (iters * ydims[0], ydims[1], ydims[2], ydims[3])
y = np.zeros(ydims, dtype=net.blobs["fc2"].data.dtype)
yptr = 0
t = time.time()

for i in range(0, iters):
    net.forward()
    y[yptr : yptr + batch, :, :, :] = net.blobs["fc2"].data
    yptr = yptr + batch

print "Elapsed: %s" % (time.time() - t)

f = h5py.File(sys.argv[4])
f.create_dataset("data", data=y.squeeze())
f.close()
Пример #35
0
def decafImages(src_path,
                output_path,
                socketid,
                result_path,
                single_file_name='',
                modelname=''):
    if modelname is not '':
        lMODEL_FILE = str(
            os.path.join(conf.CAFFE_DIR, 'models', modelname,
                         'deploy.prototxt'))
        lPRETRAINED = str(
            os.path.join(conf.CAFFE_DIR, 'models', modelname,
                         modelname + '.caffemodel'))
        r.publish(
            'chat',
            json.dumps({
                'error': lMODEL_FILE + '   ' + lPRETRAINED,
                'socketid': socketid
            }))
        caffe.set_phase_test()
        caffe.set_mode_cpu()
        modelnet = caffe.Classifier(lMODEL_FILE, lPRETRAINED)
        #r.publish('chat', json.dumps({'error': str(modelname), 'socketid': socketid}))

    else:
        modelnet = None

    try:
        #Entire Directory
        if os.path.isdir(os.path.join(src_path, single_file_name)):

            for file_name in os.listdir(src_path):
                tags = {}
                image_path = os.path.join(src_path, file_name)
                if os.path.isfile(image_path):
                    """ Trying to get the output of classify python script to send to user - Part 1/4
                    myPrint = CustomPrint(socketid)
                    old_stdout=sys.stdout
                    sys.stdout = myPrint
                    """
                    print 'Running caffe classify on multiple images'

                    mat_file_path = decaf.calculate_decaf_image(
                        file_name, src_path, output_path, 3, socketid, tags,
                        modelname, modelnet)
                    print tags
                    """ Part 2/2
                    sys.stdout=old_stdout
                    """
                    log_to_terminal("Results: " + str(tags), socketid)
                    # sorted_tags = sorted(tags.iteritems(), key=operator.itemgetter(1), reverse=True)

                    # webResult = {}
                    #webResult[str(result_path + file_name)] = sorted_tags
                    result_url = urlparse(result_path).path
                    r.publish(
                        'chat',
                        json.dumps({
                            'web_result':
                            os.path.join(result_url, 'results',
                                         file_name + '.mat'),
                            'socketid':
                            str(socketid)
                        }))

            log_to_terminal('Thank you for using CloudCV', socketid)
        # Single File
        else:
            """ Part 3/4
            myPrint = CustomPrint(socketid)
            old_stdout=sys.stdout
            sys.stdout = myPrint
            """
            tags = {}
            print 'Running caffe classify on a single image: ' + single_file_name
            try:
                mat_file_path = decaf.calculate_decaf_image(
                    single_file_name, src_path, output_path, 3, socketid, tags,
                    modelname, modelnet)
            except Exception as e:
                print str(e)
            """ Part 4/4
            sys.stdout=old_stdout
            """
            log_to_terminal("Results: " + str(tags), socketid)

            # tags = sorted(tags.iteritems(), key=operator.itemgetter(1), reverse=True)
            # web_result = {}
            # web_result[str(result_path)] = tags
            result_url = os.path.dirname(urlparse(result_path).path)
            r.publish(
                'chat',
                json.dumps({
                    'web_result':
                    os.path.join(result_url, 'results',
                                 single_file_name + '.mat'),
                    'socketid':
                    str(socketid)
                }))

            log_to_terminal('Thank you for using CloudCV', socketid)

    except Exception as e:
        log_to_terminal(str(traceback.format_exc()), socketid)