Пример #1
0
  def get_checkpoint(self):
    if self.checkpoint_dir is None:
      return None

    if self.test_id == '':
      cp_pattern = self.checkpoint_dir
    else:
      cp_pattern = os.path.join(self.checkpoint_dir, "*")
    cp_files = glob.glob(cp_pattern)

    if not cp_files:
      util.log_info('Not checkpoint files found in %s' % cp_pattern)
      return None

    checkpoint_file = sorted(cp_files, key=os.path.getmtime)[-1]
    util.log('Loading from checkpoint file: %s', checkpoint_file)

    try:
      #return shelve.open(checkpoint_file, flag='r', protocol=-1, writeback=False)
      return shelve.open(checkpoint_file, flag='r', protocol=-1,
                         writeback=False)
    except:
      dict = {}
      with zipfile.ZipFile(checkpoint_file) as zf:
        for k in zf.namelist():
          dict[k] = cPickle.loads(zf.read(k))
      return dict
Пример #2
0
    def get_checkpoint(self):
        if self.checkpoint_dir is None:
            return None

        if self.test_id == '':
            cp_pattern = self.checkpoint_dir
        else:
            cp_pattern = os.path.join(self.checkpoint_dir, "*")
        cp_files = glob.glob(cp_pattern)

        if not cp_files:
            util.log_info('Not checkpoint files found in %s' % cp_pattern)
            return None

        checkpoint_file = sorted(cp_files, key=os.path.getmtime)[-1]
        util.log('Loading from checkpoint file: %s', checkpoint_file)

        try:
            #return shelve.open(checkpoint_file, flag='r', protocol=-1, writeback=False)
            return shelve.open(checkpoint_file,
                               flag='r',
                               protocol=-1,
                               writeback=False)
        except:
            dict = {}
            with zipfile.ZipFile(checkpoint_file) as zf:
                for k in zf.namelist():
                    dict[k] = cPickle.loads(zf.read(k))
            return dict
Пример #3
0
  def append_layer(self, layer):
    if self.layers:
      layer.attach(self.layers[-1])

    self.layers.append(layer)
    util.log_info('Append: %s  [%s] : %s', layer.name, layer.type, layer.get_output_shape())
    return layer
Пример #4
0
 def print_learning_rates(self):
     util.log_info('Learning rates:')
     for layer in self.layers:
         if isinstance(layer, WeightedLayer):
             util.log_info('%s: %s %s %s', layer.name,
                           layer.__class__.__name__, layer.weight.epsilon,
                           layer.bias.epsilon)
Пример #5
0
    def adjust_learning_rate(self, factor=1.0):
        util.log_info('Adjusting learning rate: %s', factor)
        for layer in self.layers:
            if isinstance(layer, WeightedLayer):
                layer.weight.epsilon *= factor
                layer.bias.epsilon *= factor

        self.print_learning_rates()
Пример #6
0
  def adjust_learning_rate(self, factor=1.0):
    util.log_info('Adjusting learning rate: %s', factor)
    for layer in self.layers:
      if isinstance(layer, WeightedLayer):
        layer.weight.epsilon *= factor
        layer.bias.epsilon *= factor

    self.print_learning_rates()
Пример #7
0
    def append_layer(self, layer):
        if self.layers:
            layer.attach(self.layers[-1])

        self.layers.append(layer)
        util.log_info('Append: %s  [%s] : %s', layer.name, layer.type,
                      layer.get_output_shape())
        return layer
Пример #8
0
def logreg_cost_col_reduce(mat, label, cost):
  mh, mw = mat.shape
  vh, vw = label.shape
  #assert(vh == 1 and vw == mw or vw == 1 and vh == mw)
  if (vh != 1 or vw != mw)  and (vw != 1 or vh != mw):
    util.log_info('%s ==> %s', mat.shape, label.shape)
    assert False

  block = (mw, 1, 1)
  grid = (1, 1)
  _logreg_cost_col_reduce_(mat, label, cost, np.int32(mat.strides[0] / 4), block=block, grid=grid)
Пример #9
0
def _initialize_cublas():
  global sgemm

  util.log_info('Initializing cublas.')
  try:
    cublas.cublasInit()
    sgemm = cublas.cublasSgemm
  except AttributeError:
    handle = cublas.cublasCreate()
    def sgemm(*args):
      cublas.cublasSgemm(handle, *args)
Пример #10
0
  def attach(self, prev_layer):
    image_shape = prev_layer.get_output_shape()
    self.numColor, self.img_size, _, self.batch_size = image_shape
    self.outputSize = 1 + divup(2 * self.padding + self.img_size - self.filterSize, self.stride)
    util.log_info('%s %s %s %s: %s', self.padding, self.img_size, self.filterSize, self.stride,
                  self.outputSize)
    self.modules = self.outputSize ** 2

    weight_shape = (self.filterSize * self.filterSize * self.numColor * self.modules, self.numFilter)
    bias_shape = (self.numFilter * self.modules, 1)
    
    self._init_weights(weight_shape, bias_shape)
Пример #11
0
def _initialize_cublas():
    global sgemm

    util.log_info('Initializing cublas.')
    try:
        cublas.cublasInit()
        sgemm = cublas.cublasSgemm
    except AttributeError:
        handle = cublas.cublasCreate()

        def sgemm(*args):
            cublas.cublasSgemm(handle, *args)
Пример #12
0
  def attach(self, prev_layer):
    image_shape = prev_layer.get_output_shape()
    self.numColor, self.img_size, _, self.batch_size = image_shape
    self.outputSize = 1 + divup(2 * self.padding + self.img_size - self.filterSize, self.stride)
    util.log_info('%s %s %s %s: %s', self.padding, self.img_size, self.filterSize, self.stride,
                  self.outputSize)
    self.modules = self.outputSize ** 2

    weight_shape = (self.filterSize * self.filterSize * self.numColor * self.modules, self.numFilter)
    bias_shape = (self.numFilter * self.modules, 1)
    
    self._init_weights(weight_shape, bias_shape)
Пример #13
0
def logreg_cost_col_reduce(mat, label, cost):
    mh, mw = mat.shape
    vh, vw = label.shape
    #assert(vh == 1 and vw == mw or vw == 1 and vh == mw)
    if (vh != 1 or vw != mw) and (vw != 1 or vh != mw):
        util.log_info('%s ==> %s', mat.shape, label.shape)
        assert False

    block = (mw, 1, 1)
    grid = (1, 1)
    _logreg_cost_col_reduce_(mat,
                             label,
                             cost,
                             np.int32(mat.strides[0] / 4),
                             block=block,
                             grid=grid)
Пример #14
0
  def __init__(self, checkpoint_dir, test_id, max_cp_size=2e9):
    self.test_id = test_id
    self.counter = iter(xrange(10000))
    self.max_cp_size = max_cp_size

    if checkpoint_dir is None:
      util.log_info('Checkpoint directory is None; checkpointing is disabled.')
      self.checkpoint_dir = None
      return

    if test_id == '':
      self.checkpoint_dir = checkpoint_dir
    else:
      self.checkpoint_dir = os.path.join(checkpoint_dir, test_id)

    if not os.path.exists(self.checkpoint_dir):
      os.system('mkdir -p \'%s\'' % self.checkpoint_dir)
Пример #15
0
    def __init__(self, checkpoint_dir, test_id, max_cp_size=2e9):
        self.test_id = test_id
        self.counter = iter(xrange(10000))
        self.max_cp_size = max_cp_size

        if checkpoint_dir is None:
            util.log_info(
                'Checkpoint directory is None; checkpointing is disabled.')
            self.checkpoint_dir = None
            return

        if test_id == '':
            self.checkpoint_dir = checkpoint_dir
        else:
            self.checkpoint_dir = os.path.join(checkpoint_dir, test_id)

        if not os.path.exists(self.checkpoint_dir):
            os.system('mkdir -p \'%s\'' % self.checkpoint_dir)
Пример #16
0
 def print_learning_rates(self):
   util.log_info('Learning rates:')
   for layer in self.layers:
     if isinstance(layer, WeightedLayer):
       util.log_info('%s: %s %s %s', layer.name, layer.__class__.__name__, 
                layer.weight.epsilon, layer.bias.epsilon)
Пример #17
0
from fastnet import util
from fastnet.util import timer, divup
from pycuda import gpuarray
from pycuda.compiler import SourceModule
from pycuda.elementwise import ElementwiseKernel
from pycuda.gpuarray import GPUArray
from scikits.cuda import cublas
from time import time
import cPickle
import cudaconv2
import numpy as np
import pycuda
import sys


util.log_info('Initializing kernels...')

sgemm = None
def _initialize_cublas():
  global sgemm

  try:
    cublas.cublasInit()
    sgemm = cublas.cublasSgemm
  except AttributeError:
    handle = cublas.cublasCreate()
    def sgemm(*args):
      cublas.cublasSgemm(handle, *args)

_initialize_cublas()