Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(17)  |  Call(16)  |  Derive(0)  |  Import(1)

src/x/b/xbob.example.faceverify-1.0.0/xbob/example/faceverify/gabor_graph.py   xbob.example.faceverify(Download)
 
# To preprocess the AT&T images, we use the TanTriggs algorithm
preprocessor = bob.ip.TanTriggs()
 
def load_images(db, group = None, purpose = None, database_directory = None, image_extension = '.pgm'):
    images[k.id] = bob.io.load(k.make_path(database_directory, image_extension)).astype(numpy.float64)
    # preprocess the images
    images[k.id] = preprocessor(images[k.id])
  return images
 

src/a/n/antispoofing.verification.gmm-1.0.2/antispoofing/verification/features/dct.py   antispoofing.verification.gmm(Download)
 
  # Initializes the Tan and Triggs preprocessing
  TT = bob.ip.TanTriggs( GAMMA, SIGMA0, SIGMA1, SIZE, THRESHOLD, ALPHA)
  preprocessed_img = numpy.ndarray((CROP_H, CROP_W), 'float64')
 
      FEN(img, cropped_img, LH, LW, RH, RW) 
      # Preprocesses a face using Tan and Triggs
      TT(cropped_img, preprocessed_img)
      # Computes DCT features
      dct_blocks=dctfeatures(preprocessed_img, BLOCK_H, BLOCK_W, 
 
  # Initializes the Tan and Triggs preprocessing
  TT = bob.ip.TanTriggs( GAMMA, SIGMA0, SIGMA1, SIZE, THRESHOLD, ALPHA)
  preprocessed_img = numpy.ndarray((CROP_H, CROP_W), 'float64')
 
  FEN(img, cropped_img, int(LH), int(LW), int(RH), int(RW))
  # Preprocesses a face using Tan and Triggs
  TT(cropped_img, preprocessed_img)
  # Computes DCT features
  dct_blocks=dctfeatures(preprocessed_img, 

src/x/b/xbob.paper.tpami2013-1.0.0/xbob/paper/tpami2013/features.py   xbob.paper.tpami2013(Download)
 
  # Initializes the Tan and Triggs preprocessing
  tt = bob.ip.TanTriggs( gamma, sigma0, sigma1, size, threshold, alpha)
  preprocessed_img = numpy.ndarray(shape=(crop_h, crop_w), dtype=numpy.float64)
 
 
      # Preprocesses a face using Tan and Triggs
      tt(cropped_img, preprocessed_img)
      preprocessed_img_s = bob.core.convert(preprocessed_img, dtype=numpy.uint8, source_range=(-threshold,threshold))
 

src/a/n/antispoofing.eyeblink-1.0.4/antispoofing/eyeblink/utils.py   antispoofing.eyeblink(Download)
  ALPHA = 0.1
 
  from bob.ip import TanTriggs
  from bob.core import convert
 
  op = TanTriggs(GAMMA, SIGMA0, SIGMA1, SIZE, THRESHOLD, ALPHA)
    if annotations.has_key(key):
      x, y, width, height = annotations[key]['face_remainder']
      res = op(frames[counter][y:(y+height), x:(x+width)])
      frames[counter][y:(y+height), x:(x+width)] = \
          convert(res, 'uint8', (0, 255), (-THRESHOLD, THRESHOLD))

src/f/a/facereclib-1.2.1/facereclib/preprocessing/TanTriggs.py   facereclib(Download)
      self.m_tan_triggs_image = None
 
    self.m_tan_triggs = bob.ip.TanTriggs(gamma, sigma0, sigma1, size, threshold, alpha)
 
 
    self.m_color_channel = color_channel
    # prepare image normalization
    self.m_tan = bob.ip.TanTriggs(gamma, sigma0, sigma1, size, threshold, alpha)
 
  def read_original_data(self, video_file):

src/f/a/facereclib-HEAD/facereclib/preprocessing/TanTriggs.py   facereclib(Download)
      self.m_tan_triggs_image = None
 
    self.m_tan_triggs = bob.ip.TanTriggs(gamma, sigma0, sigma1, size, threshold, alpha)
 
 
    self.m_color_channel = color_channel
    # prepare image normalization
    self.m_tan = bob.ip.TanTriggs(gamma, sigma0, sigma1, size, threshold, alpha)
 
  def read_original_data(self, video_file):

src/a/n/antispoofing.lbptop-1.0.4/antispoofing/lbptop/script/lbptop_calculate_parameters.py   antispoofing.lbptop(Download)
 
  #Instancianting the Tan & Triggs algorithm (The default configurations only)
  tantriggs = bob.ip.TanTriggs()
 
 
      grayFrames[i] = bob.ip.rgb_to_gray(vin[i,:,:,:])
      if(tan_triggs):
        grayFrames[i] = tantriggs(grayFrames[i])
 
    ### STARTING the video analysis