Did I find the right examples for you? yes no

All Samples(17)  |  Call(9)  |  Derive(0)  |  Import(8)
findall(string[, pos[, endpos]]) --> list.
Return a list of all non-overlapping matches of pattern in string.

src/i/n/indexer-0.6.2/default_indexer.py   indexer(Download)
 
from indexer.query import IndexerQuery, IndexerQueryScanner
from indexer.query_objects import Query, tokenize
from indexer._exceptions import StopWord
 
    def restriction_sql(self, tablename, querystr, jointo=None, not_=False):
        if isinstance(querystr, str):
            querystr = unicode(querystr, self.encoding)
        words = []
        for word in tokenize(querystr):

src/i/n/indexer-0.6.2/postgres8_indexer.py   indexer(Download)
 
from indexer.default_indexer import Indexer, normalize_words
from indexer.query_objects import tokenize
 
 
        if isinstance(querystr, str):
            querystr = unicode(querystr, self.encoding)
        words = normalize_words(tokenize(querystr))
        cursor = cursor or self._cnx.cursor()
        cursor.execute('SELECT 1, uid FROM appears '
        if isinstance(querystr, str):
            querystr = unicode(querystr, self.encoding)
        words = normalize_words(tokenize(querystr))
        # XXX replace '%' since it makes tsearch fail, dunno why yet, should
        # be properly fixed

src/i/n/indexer-0.6.2/mysql_indexer.py   indexer(Download)
 
from indexer.default_indexer import Indexer, normalize_words
from indexer.query_objects import tokenize
 
APPEARS_SCHEMA = """
        if isinstance(querystr, str):
            querystr = unicode(querystr, self.encoding)
        words = normalize_words(tokenize(querystr))
        cursor = cursor or self._cnx.cursor()
        cursor.execute('SELECT 1, uid FROM appears '
        if isinstance(querystr, str):
            querystr = unicode(querystr, self.encoding)
        words = normalize_words(tokenize(querystr))
        sql = "MATCH (%s.words) AGAINST ('%s' IN BOOLEAN MODE)" % (tablename, ' '.join(words))
        if not_:

src/i/n/indexer-0.6.2/indexable_objects.py   indexer(Download)
import mimetypes
 
from indexer.query_objects import tokenize
from indexer._exceptions import UnknownExtension, UnknownFileType
 
            if value is None:
                continue
            for word in tokenize(value):
                yield word
 
    def _get_words(self, buffer):
        """ extract word from a plain text buffer """
        for line in buffer.xreadlines():
            for word in tokenize(unicode(line, self.encoding)):
                yield word

src/i/n/indexer-0.6.2/test/unittest_tokenizer.py   indexer(Download)
# -*- coding: utf-8 -*-
 
import unittest
 
from indexer.query_objects import tokenize
from indexer.default_indexer import normalize
 
def _tokenize(string):
    words = []
    for word in tokenize(string):

src/i/n/indexer-0.6.2/test/unittest_postgres8_indexer.py   indexer(Download)
from unittest_default_indexer import IndexableObject
 
from indexer.query_objects import tokenize
from indexer.postgres8_indexer import PGIndexer
 

src/i/n/indexer-0.6.2/test/unittest_mysql_indexer.py   indexer(Download)
from unittest_default_indexer import IndexableObject
 
from indexer.query_objects import tokenize
from indexer.mysql_indexer import MyIndexer
 

src/i/n/indexer-0.6.2/test/unittest_default_indexer.py   indexer(Download)
from logilab.common.testlib import MockConnection
 
from indexer.query_objects import tokenize
from indexer.default_indexer import Indexer
 
class IndexableObject:
    def get_words(self):
        return tokenize(u'gnco-jpl bl blp blp blp')