sqlalchemy.util.warn

Here are the examples of the python api sqlalchemy.util.warn taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

42 Examples 7

Example 1

Project: SickGear Source File: base.py
Function: define_constraint_cascades
    def define_constraint_cascades(self, constraint):
        text = ""
        if constraint.ondelete is not None:
            text += " ON DELETE %s" % constraint.ondelete

        # oracle has no ON UPDATE CASCADE -
        # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
        if constraint.onupdate is not None:
            util.warn(
                "Oracle does not contain native UPDATE CASCADE "
                 "functionality - onupdates will not be rendered for foreign keys. "
                 "Consider using deferrable=True, initially='deferred' or triggers.")

        return text

Example 2

Project: SickGear Source File: pysqlite.py
Function: init
    def __init__(self, **kwargs):
        SQLiteDialect.__init__(self, **kwargs)

        if self.dbapi is not None:
            sqlite_ver = self.dbapi.version_info
            if sqlite_ver < (2, 1, 3):
                util.warn(
                    ("The installed version of pysqlite2 (%s) is out-dated "
                     "and will cause errors in some cases.  Version 2.1.3 "
                     "or greater is recommended.") %
                    '.'.join([str(subver) for subver in sqlite_ver]))

Example 3

Project: cockroachdb-python Source File: dialect.py
Function: get_columns
    def get_columns(self, conn, table_name, schema=None, **kw):
        res = []
        # TODO(bdarnell): escape table name
        for row in conn.execute('SHOW COLUMNS FROM "%s"' % table_name):
            name, type_name, nullable, default = row
            # TODO(bdarnell): when there are type parameters, attach
            # them to the returned type object.
            try:
                typ = _type_map[type_name.split('(')[0].lower()]
            except KeyError:
                warn("Did not recognize type '%s' of column '%s'" % (type_name, name))
                typ = sqltypes.NULLTYPE
            res.append(dict(
                name=name,
                type=typ,
                nullable=nullable,
                default=default,
            ))
        return res

Example 4

Project: CouchPotatoV1 Source File: pymssql.py
Function: db_api
    @classmethod
    def dbapi(cls):
        module = __import__('pymssql')
        # pymmsql doesn't have a Binary method.  we use string
        # TODO: monkeypatching here is less than ideal
        module.Binary = str
        
        client_ver = tuple(int(x) for x in module.__version__.split("."))
        if client_ver < (1, ):
            util.warn("The pymssql dialect expects at least "
                            "the 1.0 series of the pymssql DBAPI.")
        return module

Example 5

Project: CouchPotatoV1 Source File: pyodbc.py
Function: detect_charset
    def _detect_charset(self, connection):
        """Sniff out the character set in use for connection results."""

        # Prefer 'character_set_results' for the current connection over the
        # value in the driver.  SET NAMES or individual variable SETs will
        # change the charset without updating the driver's view of the world.
        #
        # If it's decided that issuing that sort of SQL leaves you SOL, then
        # this can prefer the driver value.
        rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
        opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
        for key in ('character_set_connection', 'character_set'):
            if opts.get(key, None):
                return opts[key]

        util.warn("Could not detect the connection character set.  Assuming latin1.")
        return 'latin1'

Example 6

Project: CouchPotatoV1 Source File: zxjdbc.py
Function: detect_charset
    def _detect_charset(self, connection):
        """Sniff out the character set in use for connection results."""
        # Prefer 'character_set_results' for the current connection over the
        # value in the driver.  SET NAMES or individual variable SETs will
        # change the charset without updating the driver's view of the world.
        #
        # If it's decided that issuing that sort of SQL leaves you SOL, then
        # this can prefer the driver value.
        rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
        opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
        for key in ('character_set_connection', 'character_set'):
            if opts.get(key, None):
                return opts[key]

        util.warn("Could not detect the connection character set.  Assuming latin1.")
        return 'latin1'

Example 7

Project: CouchPotatoV1 Source File: base.py
Function: define_constraint_cascades
    def define_constraint_cascades(self, constraint):
        text = ""
        if constraint.ondelete is not None:
            text += " ON DELETE %s" % constraint.ondelete
            
        # oracle has no ON UPDATE CASCADE - 
        # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
        if constraint.onupdate is not None:
            util.warn(
                "Oracle does not contain native UPDATE CASCADE "
                 "functionality - onupdates will not be rendered for foreign keys. "
                 "Consider using deferrable=True, initially='deferred' or triggers.")
        
        return text

Example 8

Project: CouchPotatoV1 Source File: scoping.py
Function: configure
    def configure(self, **kwargs):
        """reconfigure the sessionmaker used by this ScopedSession."""
        
        if self.registry.has():
            warn('At least one scoped session is already present. '
                      ' configure() can not affect sessions that have '
                      'already been created.')

        self.session_factory.configure(**kwargs)

Example 9

Project: kokoropy Source File: base.py
Function: define_constraint_cascades
    def define_constraint_cascades(self, constraint):
        text = ""
        if constraint.ondelete is not None:
            text += " ON DELETE %s" % constraint.ondelete

        # oracle has no ON UPDATE CASCADE -
        # its only available via triggers
        # http://asktom.oracle.com/tkyte/update_cascade/index.html
        if constraint.onupdate is not None:
            util.warn(
                "Oracle does not contain native UPDATE CASCADE "
                "functionality - onupdates will not be rendered for foreign "
                "keys.  Consider using deferrable=True, initially='deferred' "
                "or triggers.")

        return text

Example 10

Project: maraschino Source File: pymssql.py
Function: db_api
    @classmethod
    def dbapi(cls):
        module = __import__('pymssql')
        # pymmsql doesn't have a Binary method.  we use string
        # TODO: monkeypatching here is less than ideal
        module.Binary = str

        client_ver = tuple(int(x) for x in module.__version__.split("."))
        if client_ver < (1, ):
            util.warn("The pymssql dialect expects at least "
                            "the 1.0 series of the pymssql DBAPI.")
        return module

Example 11

Project: maraschino Source File: base.py
Function: define_constraint_cascades
    def define_constraint_cascades(self, constraint):
        text = ""
        if constraint.ondelete is not None:
            text += " ON DELETE %s" % constraint.ondelete

        # oracle has no ON UPDATE CASCADE - 
        # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
        if constraint.onupdate is not None:
            util.warn(
                "Oracle does not contain native UPDATE CASCADE "
                 "functionality - onupdates will not be rendered for foreign keys. "
                 "Consider using deferrable=True, initially='deferred' or triggers.")

        return text

Example 12

Project: maraschino Source File: scoping.py
Function: configure
    def configure(self, **kwargs):
        """reconfigure the sessionmaker used by this ScopedSession."""

        if self.registry.has():
            warn('At least one scoped session is already present. '
                      ' configure() can not affect sessions that have '
                      'already been created.')

        self.session_factory.configure(**kwargs)

Example 13

Project: SickGear Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source,
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pk_constraint = self.get_pk_constraint(connection, table_name)
        pkey_cols = pk_constraint['constrained_columns']

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif issubclass(coltype, Integer) and row['fprec'] != 0:
                coltype = NUMERIC(
                                precision=row['fprec'],
                                scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype()

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                            'DEFAULT', "Unrecognized default value: %s" % \
                            defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name': name,
                'type': coltype,
                'nullable': not bool(row['null_flag']),
                'default': defvalue,
                'autoincrement': defvalue is None
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols) == 1 and name == pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 14

Project: SickGear Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'

        params = {"table_name": table_name}
        text = "SELECT column_name, data_type, %(char_length_col)s, "\
                "data_precision, data_scale, "\
                "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\
                "WHERE table_name = :table_name"
        if schema is not None:
            params['owner'] = schema
            text += " AND owner = :owner "
        text += " ORDER BY column_id"
        text = text % {'dblink': dblink, 'char_length_col': char_length_col}

        c = connection.execute(sql.text(text), **params)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5] == 'Y', row[6])

            if coltype == 'NUMBER':
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype:
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'autoincrement': default is None
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns

Example 15

Project: SickGear Source File: base.py
    def _get_column_info(self, name, type_, nullable, autoincrement, default,
            precision, scale, length):

        coltype = self.ischema_names.get(type_, None)

        kwargs = {}

        if coltype in (NUMERIC, DECIMAL):
            args = (precision, scale)
        elif coltype == FLOAT:
            args = (precision,)
        elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
            args = (length,)
        else:
            args = ()

        if coltype:
            coltype = coltype(*args, **kwargs)
            #is this necessary
            #if is_array:
            #     coltype = ARRAY(coltype)
        else:
            util.warn("Did not recognize type '%s' of column '%s'" %
                      (type_, name))
            coltype = sqltypes.NULLTYPE

        if default:
            default = re.sub("DEFAULT", "", default).strip()
            default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
        else:
            default = None

        column_info = dict(name=name, type=coltype, nullable=nullable,
                           default=default, autoincrement=autoincrement)
        return column_info

Example 16

Project: alembic Source File: revision.py
Function: add_revision
    def add_revision(self, revision, _replace=False):
        """add a single revision to an existing map.

        This method is for single-revision use cases, it's not
        appropriate for fully populating an entire revision map.

        """
        map_ = self._revision_map
        if not _replace and revision.revision in map_:
            util.warn("Revision %s is present more than once" %
                      revision.revision)
        elif _replace and revision.revision not in map_:
            raise Exception("revision %s not in map" % revision.revision)

        map_[revision.revision] = revision
        self._add_branches(revision, map_)
        self._add_depends_on(revision, map_)

        if revision.is_base:
            self.bases += (revision.revision, )
        if revision._is_real_base:
            self._real_bases += (revision.revision, )
        for downrev in revision._all_down_revisions:
            if downrev not in map_:
                util.warn(
                    "Revision %s referenced from %s is not present"
                    % (downrev, revision)
                )
            map_[downrev].add_nextrev(revision)
        if revision._is_real_head:
            self._real_heads = tuple(
                head for head in self._real_heads
                if head not in
                set(revision._all_down_revisions).union([revision.revision])
            ) + (revision.revision,)
        if revision.is_head:
            self.heads = tuple(
                head for head in self.heads
                if head not in
                set(revision._versioned_down_revisions).union([revision.revision])
            ) + (revision.revision,)

Example 17

Project: sqlalchemy Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source,
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pk_constraint = self.get_pk_constraint(connection, table_name)
        pkey_cols = pk_constraint['constrained_columns']

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif issubclass(coltype, Integer) and row['fprec'] != 0:
                coltype = NUMERIC(
                    precision=row['fprec'],
                    scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype()

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                    'DEFAULT', "Unrecognized default value: %s" % \
                    defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name': name,
                'type': coltype,
                'nullable': not bool(row['null_flag']),
                'default': defvalue,
                'autoincrement': 'auto',
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols) == 1 and name == pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 18

Project: sqlalchemy Source File: base.py
    def _get_column_info(self, name, type_, nullable, autoincrement, default,
                         precision, scale, length):

        coltype = self.ischema_names.get(type_, None)

        kwargs = {}

        if coltype in (NUMERIC, DECIMAL):
            args = (precision, scale)
        elif coltype == FLOAT:
            args = (precision,)
        elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
            args = (length,)
        else:
            args = ()

        if coltype:
            coltype = coltype(*args, **kwargs)
            # is this necessary
            # if is_array:
            #     coltype = ARRAY(coltype)
        else:
            util.warn("Did not recognize type '%s' of column '%s'" %
                      (type_, name))
            coltype = sqltypes.NULLTYPE

        if default:
            default = default.replace("DEFAULT", "").strip()
            default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
        else:
            default = None

        column_info = dict(name=name, type=coltype, nullable=nullable,
                           default=default, autoincrement=autoincrement)
        return column_info

Example 19

Project: sqlalchemy_exasol Source File: base.py
    def for_update_clause(self, select):
        # Exasol has no "FOR UPDATE"
        util.warn("EXASolution does not support SELECT ... FOR UPDATE")
        return ''

Example 20

Project: sqlalchemy_exasol Source File: base.py
    def get_lastrowid(self):
        columns = self.compiled.sql_compiler.statement.table.columns
        autoinc_pk_columns = \
            [c.name for c in columns if c.autoincrement and c.primary_key]
        if len(autoinc_pk_columns) == 0:
            return None
        elif len(autoinc_pk_columns) > 1:
            util.warn("Table with more than one autoincrement, primary key"\
                       " Column!")
            raise Exception
        else:
            id_col = self.dialect.denormalize_name(autoinc_pk_columns[0])

            table = self.compiled.sql_compiler.statement.table.name
            table = self.dialect.denormalize_name(table)

            sql_stmnt = "SELECT column_identity from SYS.EXA_ALL_COLUMNS "\
                        "WHERE column_object_type = 'TABLE' and column_table "\
                        "= ? AND column_name = ?"

            schema = self.compiled.sql_compiler.statement.table.schema
            if schema is not None:
                schema = self.dialect.denormalize_name(schema)
                sql_stmnt += " AND column_schema = ?"

            cursor = self.create_cursor()
            if schema:
                cursor.execute(sql_stmnt, (table, id_col, schema))
            else:
                cursor.execute(sql_stmnt, (table, id_col))
            result = cursor.fetchone()
            cursor.close()
            return int(result[0]) - 1

Example 21

Project: sqlalchemy_exasol Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        schema = schema or connection.engine.url.database
        if schema is None:
            schema = connection.execute("select CURRENT_SCHEMA from dual").scalar()
        table_name=self.denormalize_name(table_name)
        schema=self.denormalize_name(schema)

        columns = []
        for row in self._get_all_columns(connection, schema, info_cache=kw.get("info_cache")):
            if row[9] != table_name and table_name is not None:
                continue
            (colname, coltype, length, precision, scale, nullable, default, identity, is_distribution_key) = \
                (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])

            # FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH

            # remove ASCII, UTF8 and spaces from char-like types
            coltype = re.sub(r'ASCII|UTF8| ', '', coltype)
            # remove precision and scale addition from numeric types
            coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype)
            try:
                if coltype == 'VARCHAR':
                    coltype = sqltypes.VARCHAR(length)
                elif coltype == 'DECIMAL':
                    # this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level
                    # thus, we need to convert DECIMAL(<=18,0) back to INTEGER type
                    if scale == 0 and precision <= 18:
                        coltype = sqltypes.INTEGER()
                    else:
                        coltype = sqltypes.DECIMAL(precision, scale)
                else:
                    coltype = self.ischema_names[coltype]
            except KeyError:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (coltype, colname))
                coltype = sqltypes.NULLTYPE

            cdict = {
                'name': self.normalize_name(colname),
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'is_distribution_key': is_distribution_key
            }
            # if we have a positive identity value add a sequence
            if identity is not None and identity >= 0:
                cdict['sequence'] = {'name':''}
                # TODO: we have to possibility to encode the current identity value count
                # into the column metadata. But the consequence is that it would also be used
                # as start value in CREATE statements. For now the current value is ignored.
                # Add it by changing the dict to: {'name':'', 'start': int(identity)}
            columns.append(cdict)
        return columns

Example 22

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT DISTINCT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source, 
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pkey_cols = self.get_primary_keys(connection, table_name)

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif colspec == 'INT64':
                coltype = coltype(
                                precision=row['fprec'], 
                                scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype(row)

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case 
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                            'DEFAULT', "Unrecognized default value: %s" % \
                            defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name' : name,
                'type' : coltype,
                'nullable' :  not bool(row['null_flag']),
                'default' : defvalue
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols)==1 and name==pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 23

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        schema = schema or self.default_schema_name
        c = connection.execute(
            """select colname, coltype, collength, t3.default, t1.colno from
                syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
                where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
                  and t3.tabid = t2.tabid and t3.colno = t1.colno
                order by t1.colno""", table_name, schema)

        primary_cols = self.get_primary_keys(connection, table_name, schema, **kw)

        columns = []
        rows = c.fetchall()
        for name, colattr, collength, default, colno in rows:
            name = name.lower()

            autoincrement = False
            primary_key = False

            if name in primary_cols:
                primary_key = True

            # in 7.31, coltype = 0x000
            #                       ^^-- column type
            #                      ^-- 1 not null, 0 null
            not_nullable, coltype = divmod(colattr, 256)
            if coltype not in (0, 13) and default:
                default = default.split()[-1]

            if coltype == 6: # Serial, mark as autoincrement
                autoincrement = True

            if coltype == 0 or coltype == 13: # char, varchar
                coltype = ischema_names[coltype](collength)
                if default:
                    default = "'%s'" % default
            elif coltype == 5: # decimal
                precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
                if scale == 255:
                    scale = 0
                coltype = sqltypes.Numeric(precision, scale)
            else:
                try:
                    coltype = ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, name))
                    coltype = sqltypes.NULLTYPE
            
            column_info = dict(name=name, type=coltype, nullable=not not_nullable,
                               default=default, autoincrement=autoincrement,
                               primary_key=primary_key)
            columns.append(column_info)
        return columns

Example 24

Project: CouchPotatoV1 Source File: base.py
    def reflecttable(self, connection, table, include_columns):
        denormalize = self.identifier_preparer._denormalize_name
        normalize = self.identifier_preparer._normalize_name

        st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, '
              '  NULLABLE, "DEFAULT", DEFAULTFUNCTION '
              'FROM COLUMNS '
              'WHERE TABLENAME=? AND SCHEMANAME=%s '
              'ORDER BY POS')

        fk = ('SELECT COLUMNNAME, FKEYNAME, '
              '  REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, '
              '  (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA '
              '   THEN 1 ELSE 0 END) AS in_schema '
              'FROM FOREIGNKEYCOLUMNS '
              'WHERE TABLENAME=? AND SCHEMANAME=%s '
              'ORDER BY FKEYNAME ')

        params = [denormalize(table.name)]
        if not table.schema:
            st = st % 'CURRENT_SCHEMA'
            fk = fk % 'CURRENT_SCHEMA'
        else:
            st = st % '?'
            fk = fk % '?'
            params.append(denormalize(table.schema))

        rows = connection.execute(st, params).fetchall()
        if not rows:
            raise exc.NoSuchTableError(table.fullname)

        include_columns = set(include_columns or [])

        for row in rows:
            (name, mode, col_type, encoding, length, scale,
             nullable, constant_def, func_def) = row

            name = normalize(name)

            if include_columns and name not in include_columns:
                continue

            type_args, type_kw = [], {}
            if col_type == 'FIXED':
                type_args = length, scale
                # Convert FIXED(10) DEFAULT SERIAL to our Integer
                if (scale == 0 and
                    func_def is not None and func_def.startswith('SERIAL')):
                    col_type = 'INTEGER'
                    type_args = length,
            elif col_type in 'FLOAT':
                type_args = length,
            elif col_type in ('CHAR', 'VARCHAR'):
                type_args = length,
                type_kw['encoding'] = encoding
            elif col_type == 'LONG':
                type_kw['encoding'] = encoding

            try:
                type_cls = ischema_names[col_type.lower()]
                type_instance = type_cls(*type_args, **type_kw)
            except KeyError:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (col_type, name))
                type_instance = sqltypes.NullType

            col_kw = {'autoincrement': False}
            col_kw['nullable'] = (nullable == 'YES')
            col_kw['primary_key'] = (mode == 'KEY')

            if func_def is not None:
                if func_def.startswith('SERIAL'):
                    if col_kw['primary_key']:
                        # No special default- let the standard autoincrement
                        # support handle SERIAL pk columns.
                        col_kw['autoincrement'] = True
                    else:
                        # strip current numbering
                        col_kw['server_default'] = schema.DefaultClause(
                            sql.text('SERIAL'))
                        col_kw['autoincrement'] = True
                else:
                    col_kw['server_default'] = schema.DefaultClause(
                        sql.text(func_def))
            elif constant_def is not None:
                col_kw['server_default'] = schema.DefaultClause(sql.text(
                    "'%s'" % constant_def.replace("'", "''")))

            table.append_column(schema.Column(name, type_instance, **col_kw))

        fk_sets = itertools.groupby(connection.execute(fk, params),
                                    lambda row: row.FKEYNAME)
        for fkeyname, fkey in fk_sets:
            fkey = list(fkey)
            if include_columns:
                key_cols = set([r.COLUMNNAME for r in fkey])
                if key_cols != include_columns:
                    continue

            columns, referants = [], []
            quote = self.identifier_preparer._maybe_quote_identifier

            for row in fkey:
                columns.append(normalize(row.COLUMNNAME))
                if table.schema or not row.in_schema:
                    referants.append('.'.join(
                        [quote(normalize(row[c]))
                         for c in ('REFSCHEMANAME', 'REFTABLENAME',
                                   'REFCOLUMNNAME')]))
                else:
                    referants.append('.'.join(
                        [quote(normalize(row[c]))
                         for c in ('REFTABLENAME', 'REFCOLUMNNAME')]))

            constraint_kw = {'name': fkeyname.lower()}
            if fkey[0].RULE is not None:
                rule = fkey[0].RULE
                if rule.startswith('DELETE '):
                    rule = rule[7:]
                constraint_kw['ondelete'] = rule

            table_kw = {}
            if table.schema or not row.in_schema:
                table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME)

            ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME),
                                            table_kw.get('schema'))
            if ref_key not in table.metadata.tables:
                schema.Table(normalize(fkey[0].REFTABLENAME),
                             table.metadata,
                             autoload=True, autoload_with=connection,
                             **table_kw)

            constraint = schema.ForeignKeyConstraint(
                            columns, referants, link_to_name=True,
                            **constraint_kw)
            table.append_constraint(constraint)

Example 25

Project: CouchPotatoV1 Source File: mysqldb.py
Function: detect_charset
    def _detect_charset(self, connection):
        """Sniff out the character set in use for connection results."""

        # Note: MySQL-python 1.2.1c7 seems to ignore changes made
        # on a connection via set_character_set()
        if self.server_version_info < (4, 1, 0):
            try:
                return connection.connection.character_set_name()
            except AttributeError:
                # < 1.2.1 final MySQL-python drivers have no charset support.
                # a query is needed.
                pass

        # Prefer 'character_set_results' for the current connection over the
        # value in the driver.  SET NAMES or individual variable SETs will
        # change the charset without updating the driver's view of the world.
        #
        # If it's decided that issuing that sort of SQL leaves you SOL, then
        # this can prefer the driver value.
        rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
        opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])

        if 'character_set_results' in opts:
            return opts['character_set_results']
        try:
            return connection.connection.character_set_name()
        except AttributeError:
            # Still no charset on < 1.2.1 final...
            if 'character_set' in opts:
                return opts['character_set']
            else:
                util.warn(
                    "Could not detect the connection character set with this "
                    "combination of MySQL server and MySQL-python. "
                    "MySQL-python >= 1.2.2 is recommended.  Assuming latin1.")
                return 'latin1'

Example 26

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'
 
        c = connection.execute(sql.text(
                "SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
                "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
                "WHERE table_name = :table_name AND owner = :owner " 
                "ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
                               table_name=table_name, owner=schema)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])

            if coltype == 'NUMBER' :
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype: 
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns

Example 27

Project: CouchPotatoV1 Source File: cx_oracle.py
Function: init
    def __init__(self, 
                auto_setinputsizes=True, 
                auto_convert_lobs=True, 
                threaded=True, 
                allow_twophase=True, 
                arraysize=50, **kwargs):
        OracleDialect.__init__(self, **kwargs)
        self.threaded = threaded
        self.arraysize = arraysize
        self.allow_twophase = allow_twophase
        self.supports_timestamp = self.dbapi is None or hasattr(self.dbapi, 'TIMESTAMP' )
        self.auto_setinputsizes = auto_setinputsizes
        self.auto_convert_lobs = auto_convert_lobs
        
        if hasattr(self.dbapi, 'version'):
            self.cx_oracle_ver = tuple([int(x) for x in self.dbapi.version.split('.')])
        else:  
            self.cx_oracle_ver = (0, 0, 0)
        
        def types(*names):
            return set([
                        getattr(self.dbapi, name, None) for name in names
                    ]).difference([None])

        self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB")
        self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
        self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB") 
        self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
        self.supports_native_decimal = self.cx_oracle_ver >= (5, 0)
        self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)

        if self.cx_oracle_ver is None:
            # this occurs in tests with mock DBAPIs
            self._cx_oracle_string_types = set()
            self._cx_oracle_with_unicode = False
        elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'):
            # cx_Oracle WITH_UNICODE mode.  *only* python
            # unicode objects accepted for anything
            self.supports_unicode_statements = True
            self.supports_unicode_binds = True
            self._cx_oracle_with_unicode = True
            # Py2K
            # There's really no reason to run with WITH_UNICODE under Python 2.x.
            # Give the user a hint.
            util.warn("cx_Oracle is compiled under Python 2.xx using the "
                        "WITH_UNICODE flag.  Consider recompiling cx_Oracle without "
                        "this flag, which is in no way necessary for full support of Unicode. "
                        "Otherwise, all string-holding bind parameters must "
                        "be explicitly typed using SQLAlchemy's String type or one of its subtypes,"
                        "or otherwise be passed as Python unicode.  Plain Python strings "
                        "passed as bind parameters will be silently corrupted by cx_Oracle."
                        )
            self.execution_ctx_cls = OracleExecutionContext_cx_oracle_with_unicode
            # end Py2K
        else:
            self._cx_oracle_with_unicode = False

        if self.cx_oracle_ver is None or \
                    not self.auto_convert_lobs or \
                    not hasattr(self.dbapi, 'CLOB'):
            self.dbapi_type_map = {}
        else:
            # only use this for LOB objects.  using it for strings, dates
            # etc. leads to a little too much magic, reflection doesn't know if it should
            # expect encoded strings or unicodes, etc.
            self.dbapi_type_map = {
                self.dbapi.CLOB: oracle.CLOB(),
                self.dbapi.NCLOB:oracle.NCLOB(),
                self.dbapi.BLOB: oracle.BLOB(),
                self.dbapi.BINARY: oracle.RAW(),
            }

Example 28

Project: CouchPotatoV1 Source File: pg8000.py
    def post_process_text(self, text):
        if '%%' in text:
            util.warn("The SQLAlchemy postgresql dialect now automatically escapes '%' in text() "
                      "expressions to '%%'.")
        return text.replace('%', '%%')

Example 29

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        quote = self.identifier_preparer.quote_identifier
        if schema is not None:
            pragma = "PRAGMA %s." % quote(schema)
        else:
            pragma = "PRAGMA "
        qtable = quote(table_name)
        c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
        found_table = False
        columns = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            (name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])
            name = re.sub(r'^\"|\"$', '', name)
            if default:
                default = re.sub(r"^\'|\'$", '', default)
            match = re.match(r'(\w+)(\(.*?\))?', type_)
            if match:
                coltype = match.group(1)
                args = match.group(2)
            else:
                coltype = "VARCHAR"
                args = ''
            try:
                coltype = self.ischema_names[coltype]
            except KeyError:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (coltype, name))
                coltype = sqltypes.NullType
            if args is not None:
                args = re.findall(r'(\d+)', args)
                coltype = coltype(*[int(a) for a in args])

            columns.append({
                'name' : name,
                'type' : coltype,
                'nullable' : nullable,
                'default' : default,
                'primary_key': primary_key
            })
        return columns

Example 30

Project: CouchPotatoV1 Source File: default.py
    def __init__(self, convert_unicode=False, assert_unicode=False,
                 encoding='utf-8', paramstyle=None, dbapi=None,
                 implicit_returning=None,
                 label_length=None, **kwargs):
                 
        if not getattr(self, 'ported_sqla_06', True):
            util.warn(
                "The %s dialect is not yet ported to SQLAlchemy 0.6" %
                self.name)
        
        self.convert_unicode = convert_unicode
        if assert_unicode:
            util.warn_deprecated(
                "assert_unicode is deprecated. "
                "SQLAlchemy emits a warning in all cases where it "
                "would otherwise like to encode a Python unicode object "
                "into a specific encoding but a plain bytestring is "
                "received. "
                "This does *not* apply to DBAPIs that coerce Unicode "
                "natively.")
            
        self.encoding = encoding
        self.positional = False
        self._ischema = None
        self.dbapi = dbapi
        if paramstyle is not None:
            self.paramstyle = paramstyle
        elif self.dbapi is not None:
            self.paramstyle = self.dbapi.paramstyle
        else:
            self.paramstyle = self.default_paramstyle
        if implicit_returning is not None:
            self.implicit_returning = implicit_returning
        self.positional = self.paramstyle in ('qmark', 'format', 'numeric')
        self.identifier_preparer = self.preparer(self)
        self.type_compiler = self.type_compiler(self)

        if label_length and label_length > self.max_identifier_length:
            raise exc.ArgumentError(
                    "Label length of %d is greater than this dialect's"
                    " maximum identifier length of %d" %
                    (label_length, self.max_identifier_length))
        self.label_length = label_length

        if not hasattr(self, 'description_encoding'):
            self.description_encoding = getattr(
                                            self, 
                                            'description_encoding', 
                                            encoding)

Example 31

Project: CouchPotatoV1 Source File: reflection.py
    def reflecttable(self, table, include_columns):
        """Given a Table object, load its internal constructs based on introspection.
        
        This is the underlying method used by most dialects to produce 
        table reflection.  Direct usage is like::
        
            from sqlalchemy import create_engine, MetaData, Table
            from sqlalchemy.engine import reflection
            
            engine = create_engine('...')
            meta = MetaData()
            user_table = Table('user', meta)
            insp = Inspector.from_engine(engine)
            insp.reflecttable(user_table, None)
            
        :param table: a :class:`~sqlalchemy.schema.Table` instance.
        :param include_columns: a list of string column names to include
          in the reflection process.  If ``None``, all columns are reflected.
            
        """
        dialect = self.bind.dialect

        # MySQL dialect does this.  Applicable with other dialects?
        if hasattr(dialect, '_connection_charset') \
                                        and hasattr(dialect, '_adjust_casing'):
            charset = dialect._connection_charset
            dialect._adjust_casing(table)

        # table attributes we might need.
        reflection_options = dict(
            (k, table.kwargs.get(k)) for k in dialect.reflection_options if k in table.kwargs)

        schema = table.schema
        table_name = table.name

        # apply table options
        tbl_opts = self.get_table_options(table_name, schema, **table.kwargs)
        if tbl_opts:
            table.kwargs.update(tbl_opts)

        # table.kwargs will need to be passed to each reflection method.  Make
        # sure keywords are strings.
        tblkw = table.kwargs.copy()
        for (k, v) in tblkw.items():
            del tblkw[k]
            tblkw[str(k)] = v

        # Py2K
        if isinstance(schema, str):
            schema = schema.decode(dialect.encoding)
        if isinstance(table_name, str):
            table_name = table_name.decode(dialect.encoding)
        # end Py2K

        # columns
        found_table = False
        for col_d in self.get_columns(table_name, schema, **tblkw):
            found_table = True
            name = col_d['name']
            if include_columns and name not in include_columns:
                continue

            coltype = col_d['type']
            col_kw = {
                'nullable':col_d['nullable'],
            }
            if 'autoincrement' in col_d:
                col_kw['autoincrement'] = col_d['autoincrement']
            if 'quote' in col_d:
                col_kw['quote'] = col_d['quote']
                
            colargs = []
            if col_d.get('default') is not None:
                # the "default" value is assumed to be a literal SQL expression,
                # so is wrapped in text() so that no quoting occurs on re-issuance.
                colargs.append(sa_schema.DefaultClause(sql.text(col_d['default'])))
                
            if 'sequence' in col_d:
                # TODO: mssql, maxdb and sybase are using this.
                seq = col_d['sequence']
                sequence = sa_schema.Sequence(seq['name'], 1, 1)
                if 'start' in seq:
                    sequence.start = seq['start']
                if 'increment' in seq:
                    sequence.increment = seq['increment']
                colargs.append(sequence)
                
            col = sa_schema.Column(name, coltype, *colargs, **col_kw)
            table.append_column(col)

        if not found_table:
            raise exc.NoSuchTableError(table.name)

        # Primary keys
        pk_cons = self.get_pk_constraint(table_name, schema, **tblkw)
        if pk_cons:
            primary_key_constraint = sa_schema.PrimaryKeyConstraint(name=pk_cons.get('name'), 
                *[table.c[pk] for pk in pk_cons['constrained_columns']
                if pk in table.c]
            )

            table.append_constraint(primary_key_constraint)

        # Foreign keys
        fkeys = self.get_foreign_keys(table_name, schema, **tblkw)
        for fkey_d in fkeys:
            conname = fkey_d['name']
            constrained_columns = fkey_d['constrained_columns']
            referred_schema = fkey_d['referred_schema']
            referred_table = fkey_d['referred_table']
            referred_columns = fkey_d['referred_columns']
            refspec = []
            if referred_schema is not None:
                sa_schema.Table(referred_table, table.metadata,
                                autoload=True, schema=referred_schema,
                                autoload_with=self.bind,
                                **reflection_options
                                )
                for column in referred_columns:
                    refspec.append(".".join(
                        [referred_schema, referred_table, column]))
            else:
                sa_schema.Table(referred_table, table.metadata, autoload=True,
                                autoload_with=self.bind,
                                **reflection_options
                                )
                for column in referred_columns:
                    refspec.append(".".join([referred_table, column]))
            table.append_constraint(
                sa_schema.ForeignKeyConstraint(constrained_columns, refspec,
                                               conname, link_to_name=True))
        # Indexes 
        indexes = self.get_indexes(table_name, schema)
        for index_d in indexes:
            name = index_d['name']
            columns = index_d['column_names']
            unique = index_d['unique']
            flavor = index_d.get('type', 'unknown type')
            if include_columns and \
                            not set(columns).issubset(include_columns):
                util.warn(
                    "Omitting %s KEY for (%s), key covers omitted columns." %
                    (flavor, ', '.join(columns)))
                continue
            sa_schema.Index(name, *[table.columns[c] for c in columns], 
                         **dict(unique=unique))

Example 32

Project: kokoropy Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source,
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pk_constraint = self.get_pk_constraint(connection, table_name)
        pkey_cols = pk_constraint['constrained_columns']

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif issubclass(coltype, Integer) and row['fprec'] != 0:
                coltype = NUMERIC(
                    precision=row['fprec'],
                    scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype()

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                    'DEFAULT', "Unrecognized default value: %s" % \
                    defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name': name,
                'type': coltype,
                'nullable': not bool(row['null_flag']),
                'default': defvalue,
                'autoincrement': defvalue is None
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols) == 1 and name == pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 33

Project: kokoropy Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'

        params = {"table_name": table_name}
        text = "SELECT column_name, data_type, %(char_length_col)s, "\
            "data_precision, data_scale, "\
            "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\
            "WHERE table_name = :table_name"
        if schema is not None:
            params['owner'] = schema
            text += " AND owner = :owner "
        text += " ORDER BY column_id"
        text = text % {'dblink': dblink, 'char_length_col': char_length_col}

        c = connection.execute(sql.text(text), **params)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[
                 2], row[3], row[4], row[5] == 'Y', row[6])

            if coltype == 'NUMBER':
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype:
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'autoincrement': default is None
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns

Example 34

Project: kokoropy Source File: base.py
    def _get_column_info(self, name, type_, nullable, autoincrement, default,
                         precision, scale, length):

        coltype = self.ischema_names.get(type_, None)

        kwargs = {}

        if coltype in (NUMERIC, DECIMAL):
            args = (precision, scale)
        elif coltype == FLOAT:
            args = (precision,)
        elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
            args = (length,)
        else:
            args = ()

        if coltype:
            coltype = coltype(*args, **kwargs)
            # is this necessary
            # if is_array:
            #     coltype = ARRAY(coltype)
        else:
            util.warn("Did not recognize type '%s' of column '%s'" %
                      (type_, name))
            coltype = sqltypes.NULLTYPE

        if default:
            default = re.sub("DEFAULT", "", default).strip()
            default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
        else:
            default = None

        column_info = dict(name=name, type=coltype, nullable=nullable,
                           default=default, autoincrement=autoincrement)
        return column_info

Example 35

Project: maraschino Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source, 
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pkey_cols = self.get_primary_keys(connection, table_name)

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif colspec == 'INT64':
                coltype = coltype(
                                precision=row['fprec'], 
                                scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype()

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case 
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                            'DEFAULT', "Unrecognized default value: %s" % \
                            defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name' : name,
                'type' : coltype,
                'nullable' :  not bool(row['null_flag']),
                'default' : defvalue,
                'autoincrement':defvalue is None
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols)==1 and name==pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 36

Project: maraschino Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        schema = schema or self.default_schema_name
        c = connection.execute(
            """select colname, coltype, collength, t3.default, t1.colno from
                syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
                where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
                  and t3.tabid = t2.tabid and t3.colno = t1.colno
                order by t1.colno""", table_name, schema)

        primary_cols = self.get_primary_keys(connection, table_name, schema, **kw)

        columns = []
        rows = c.fetchall()
        for name, colattr, collength, default, colno in rows:
            name = name.lower()

            autoincrement = False
            primary_key = False

            if name in primary_cols:
                primary_key = True

            # in 7.31, coltype = 0x000
            #                       ^^-- column type
            #                      ^-- 1 not null, 0 null
            not_nullable, coltype = divmod(colattr, 256)
            if coltype not in (0, 13) and default:
                default = default.split()[-1]

            if coltype == 6: # Serial, mark as autoincrement
                autoincrement = True

            if coltype == 0 or coltype == 13: # char, varchar
                coltype = ischema_names[coltype](collength)
                if default:
                    default = "'%s'" % default
            elif coltype == 5: # decimal
                precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
                if scale == 255:
                    scale = 0
                coltype = sqltypes.Numeric(precision, scale)
            else:
                try:
                    coltype = ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, name))
                    coltype = sqltypes.NULLTYPE

            column_info = dict(name=name, type=coltype, nullable=not not_nullable,
                               default=default, autoincrement=autoincrement,
                               primary_key=primary_key)
            columns.append(column_info)
        return columns

Example 37

Project: maraschino Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'
 
        c = connection.execute(sql.text(
                "SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
                "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
                "WHERE table_name = :table_name AND owner = :owner " 
                "ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
                               table_name=table_name, owner=schema)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])

            if coltype == 'NUMBER' :
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype: 
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'autoincrement':default is None
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns

Example 38

Project: maraschino Source File: cx_oracle.py
Function: init
    def __init__(self, 
                auto_setinputsizes=True, 
                auto_convert_lobs=True, 
                threaded=True, 
                allow_twophase=True, 
                arraysize=50, **kwargs):
        OracleDialect.__init__(self, **kwargs)
        self.threaded = threaded
        self.arraysize = arraysize
        self.allow_twophase = allow_twophase
        self.supports_timestamp = self.dbapi is None or hasattr(self.dbapi, 'TIMESTAMP' )
        self.auto_setinputsizes = auto_setinputsizes
        self.auto_convert_lobs = auto_convert_lobs

        if hasattr(self.dbapi, 'version'):
            self.cx_oracle_ver = tuple([int(x) for x in self.dbapi.version.split('.')])
        else:
            self.cx_oracle_ver = (0, 0, 0)

        def types(*names):
            return set([
                        getattr(self.dbapi, name, None) for name in names
                    ]).difference([None])

        self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB")
        self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
        self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB") 
        self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
        self.supports_native_decimal = self.cx_oracle_ver >= (5, 0)
        self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)

        if self.cx_oracle_ver is None:
            # this occurs in tests with mock DBAPIs
            self._cx_oracle_string_types = set()
            self._cx_oracle_with_unicode = False
        elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'):
            # cx_Oracle WITH_UNICODE mode.  *only* python
            # unicode objects accepted for anything
            self.supports_unicode_statements = True
            self.supports_unicode_binds = True
            self._cx_oracle_with_unicode = True
            # Py2K
            # There's really no reason to run with WITH_UNICODE under Python 2.x.
            # Give the user a hint.
            util.warn("cx_Oracle is compiled under Python 2.xx using the "
                        "WITH_UNICODE flag.  Consider recompiling cx_Oracle without "
                        "this flag, which is in no way necessary for full support of Unicode. "
                        "Otherwise, all string-holding bind parameters must "
                        "be explicitly typed using SQLAlchemy's String type or one of its subtypes,"
                        "or otherwise be passed as Python unicode.  Plain Python strings "
                        "passed as bind parameters will be silently corrupted by cx_Oracle."
                        )
            self.execution_ctx_cls = OracleExecutionContext_cx_oracle_with_unicode
            # end Py2K
        else:
            self._cx_oracle_with_unicode = False

        if self.cx_oracle_ver is None or \
                    not self.auto_convert_lobs or \
                    not hasattr(self.dbapi, 'CLOB'):
            self.dbapi_type_map = {}
        else:
            # only use this for LOB objects.  using it for strings, dates
            # etc. leads to a little too much magic, reflection doesn't know if it should
            # expect encoded strings or unicodes, etc.
            self.dbapi_type_map = {
                self.dbapi.CLOB: oracle.CLOB(),
                self.dbapi.NCLOB:oracle.NCLOB(),
                self.dbapi.BLOB: oracle.BLOB(),
                self.dbapi.BINARY: oracle.RAW(),
            }

Example 39

Project: maraschino Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        quote = self.identifier_preparer.quote_identifier
        if schema is not None:
            pragma = "PRAGMA %s." % quote(schema)
        else:
            pragma = "PRAGMA "
        qtable = quote(table_name)
        c = _pragma_cursor(
                    connection.execute("%stable_info(%s)" % 
                    (pragma, qtable)))
        found_table = False
        columns = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            (name, type_, nullable, default, has_default, primary_key) = \
                (row[1], row[2].upper(), not row[3], 
                row[4], row[4] is not None, row[5])
            name = re.sub(r'^\"|\"$', '', name)
            match = re.match(r'(\w+)(\(.*?\))?', type_)
            if match:
                coltype = match.group(1)
                args = match.group(2)
            else:
                coltype = "VARCHAR"
                args = ''
            try:
                coltype = self.ischema_names[coltype]
                if args is not None:
                    args = re.findall(r'(\d+)', args)
                    coltype = coltype(*[int(a) for a in args])
            except KeyError:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (coltype, name))
                coltype = sqltypes.NullType()

            columns.append({
                'name' : name,
                'type' : coltype,
                'nullable' : nullable,
                'default' : default,
                'autoincrement':default is None,
                'primary_key': primary_key
            })
        return columns

Example 40

Project: maraschino Source File: default.py
    def __init__(self, convert_unicode=False, assert_unicode=False,
                 encoding='utf-8', paramstyle=None, dbapi=None,
                 implicit_returning=None,
                 label_length=None, **kwargs):

        if not getattr(self, 'ported_sqla_06', True):
            util.warn(
                "The %s dialect is not yet ported to SQLAlchemy 0.6/0.7" %
                self.name)

        self.convert_unicode = convert_unicode
        if assert_unicode:
            util.warn_deprecated(
                "assert_unicode is deprecated. "
                "SQLAlchemy emits a warning in all cases where it "
                "would otherwise like to encode a Python unicode object "
                "into a specific encoding but a plain bytestring is "
                "received. "
                "This does *not* apply to DBAPIs that coerce Unicode "
                "natively.")

        self.encoding = encoding
        self.positional = False
        self._ischema = None
        self.dbapi = dbapi
        if paramstyle is not None:
            self.paramstyle = paramstyle
        elif self.dbapi is not None:
            self.paramstyle = self.dbapi.paramstyle
        else:
            self.paramstyle = self.default_paramstyle
        if implicit_returning is not None:
            self.implicit_returning = implicit_returning
        self.positional = self.paramstyle in ('qmark', 'format', 'numeric')
        self.identifier_preparer = self.preparer(self)
        self.type_compiler = self.type_compiler(self)

        if label_length and label_length > self.max_identifier_length:
            raise exc.ArgumentError(
                    "Label length of %d is greater than this dialect's"
                    " maximum identifier length of %d" %
                    (label_length, self.max_identifier_length))
        self.label_length = label_length

        if self.description_encoding == 'use_encoding':
            self._description_decoder = processors.to_unicode_processor_factory(
                                            encoding
                                    )
        elif self.description_encoding is not None:
            self._description_decoder = processors.to_unicode_processor_factory(
                                            self.description_encoding
                                    )
        self._encoder = codecs.getencoder(self.encoding)
        self._decoder = processors.to_unicode_processor_factory(self.encoding)

Example 41

Project: maraschino Source File: default.py
Function: check_unicode_returns
    def _check_unicode_returns(self, connection):
        # Py2K
        if self.supports_unicode_statements:
            cast_to = unicode
        else:
            cast_to = str
        # end Py2K
        # Py3K
        #cast_to = str
        def check_unicode(formatstr, type_):
            cursor = connection.connection.cursor()
            try:
                try:
                    cursor.execute(
                        cast_to(
                            expression.select( 
                            [expression.cast(
                                expression.literal_column(
                                        "'test %s returns'" % formatstr), type_)
                            ]).compile(dialect=self)
                        )
                    )
                    row = cursor.fetchone()

                    return isinstance(row[0], unicode)
                except self.dbapi.Error, de:
                    util.warn("Exception attempting to "
                            "detect unicode returns: %r" % de)
                    return False
            finally:
                cursor.close()

        # detect plain VARCHAR
        unicode_for_varchar = check_unicode("plain", sqltypes.VARCHAR(60))

        # detect if there's an NVARCHAR type with different behavior available
        unicode_for_unicode = check_unicode("unicode", sqltypes.Unicode(60))

        if unicode_for_unicode and not unicode_for_varchar:
            return "conditional"
        else:
            return unicode_for_varchar

Example 42

Project: maraschino Source File: reflection.py
    def reflecttable(self, table, include_columns):
        """Given a Table object, load its internal constructs based on introspection.

        This is the underlying method used by most dialects to produce 
        table reflection.  Direct usage is like::

            from sqlalchemy import create_engine, MetaData, Table
            from sqlalchemy.engine import reflection

            engine = create_engine('...')
            meta = MetaData()
            user_table = Table('user', meta)
            insp = Inspector.from_engine(engine)
            insp.reflecttable(user_table, None)

        :param table: a :class:`~sqlalchemy.schema.Table` instance.
        :param include_columns: a list of string column names to include
          in the reflection process.  If ``None``, all columns are reflected.

        """
        dialect = self.bind.dialect

        # table attributes we might need.
        reflection_options = dict(
            (k, table.kwargs.get(k)) for k in dialect.reflection_options if k in table.kwargs)

        schema = table.schema
        table_name = table.name

        # apply table options
        tbl_opts = self.get_table_options(table_name, schema, **table.kwargs)
        if tbl_opts:
            table.kwargs.update(tbl_opts)

        # table.kwargs will need to be passed to each reflection method.  Make
        # sure keywords are strings.
        tblkw = table.kwargs.copy()
        for (k, v) in tblkw.items():
            del tblkw[k]
            tblkw[str(k)] = v

        # Py2K
        if isinstance(schema, str):
            schema = schema.decode(dialect.encoding)
        if isinstance(table_name, str):
            table_name = table_name.decode(dialect.encoding)
        # end Py2K

        # columns
        found_table = False
        for col_d in self.get_columns(table_name, schema, **tblkw):
            found_table = True
            table.dispatch.column_reflect(table, col_d)

            name = col_d['name']
            if include_columns and name not in include_columns:
                continue

            coltype = col_d['type']
            col_kw = {
                'nullable':col_d['nullable'],
            }
            for k in ('autoincrement', 'quote', 'info', 'key'):
                if k in col_d:
                    col_kw[k] = col_d[k]

            colargs = []
            if col_d.get('default') is not None:
                # the "default" value is assumed to be a literal SQL expression,
                # so is wrapped in text() so that no quoting occurs on re-issuance.
                colargs.append(
                    sa_schema.DefaultClause(
                        sql.text(col_d['default']), _reflected=True
                    )
                )

            if 'sequence' in col_d:
                # TODO: mssql, maxdb and sybase are using this.
                seq = col_d['sequence']
                sequence = sa_schema.Sequence(seq['name'], 1, 1)
                if 'start' in seq:
                    sequence.start = seq['start']
                if 'increment' in seq:
                    sequence.increment = seq['increment']
                colargs.append(sequence)

            col = sa_schema.Column(name, coltype, *colargs, **col_kw)
            table.append_column(col)

        if not found_table:
            raise exc.NoSuchTableError(table.name)

        # Primary keys
        pk_cons = self.get_pk_constraint(table_name, schema, **tblkw)
        if pk_cons:
            primary_key_constraint = sa_schema.PrimaryKeyConstraint(name=pk_cons.get('name'), 
                *[table.c[pk] for pk in pk_cons['constrained_columns']
                if pk in table.c]
            )

            table.append_constraint(primary_key_constraint)

        # Foreign keys
        fkeys = self.get_foreign_keys(table_name, schema, **tblkw)
        for fkey_d in fkeys:
            conname = fkey_d['name']
            constrained_columns = fkey_d['constrained_columns']
            referred_schema = fkey_d['referred_schema']
            referred_table = fkey_d['referred_table']
            referred_columns = fkey_d['referred_columns']
            refspec = []
            if referred_schema is not None:
                sa_schema.Table(referred_table, table.metadata,
                                autoload=True, schema=referred_schema,
                                autoload_with=self.bind,
                                **reflection_options
                                )
                for column in referred_columns:
                    refspec.append(".".join(
                        [referred_schema, referred_table, column]))
            else:
                sa_schema.Table(referred_table, table.metadata, autoload=True,
                                autoload_with=self.bind,
                                **reflection_options
                                )
                for column in referred_columns:
                    refspec.append(".".join([referred_table, column]))
            table.append_constraint(
                sa_schema.ForeignKeyConstraint(constrained_columns, refspec,
                                               conname, link_to_name=True))
        # Indexes 
        indexes = self.get_indexes(table_name, schema)
        for index_d in indexes:
            name = index_d['name']
            columns = index_d['column_names']
            unique = index_d['unique']
            flavor = index_d.get('type', 'unknown type')
            if include_columns and \
                            not set(columns).issubset(include_columns):
                util.warn(
                    "Omitting %s KEY for (%s), key covers omitted columns." %
                    (flavor, ', '.join(columns)))
                continue
            sa_schema.Index(name, *[table.columns[c] for c in columns], 
                         **dict(unique=unique))