sqlalchemy.engine.reflection.cache

Here are the examples of the python api sqlalchemy.engine.reflection.cache taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

97 Examples 7

Example 51

Project: SickGear Source File: base.py
    @reflection.cache
    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        PK_SQL = text("""
          SELECT object_name(i.id) AS table_name,
                 i.keycnt AS 'count',
                 i.name AS name,
                 index_col(object_name(i.id), i.indid, 1) AS pk_1,
                 index_col(object_name(i.id), i.indid, 2) AS pk_2,
                 index_col(object_name(i.id), i.indid, 3) AS pk_3,
                 index_col(object_name(i.id), i.indid, 4) AS pk_4,
                 index_col(object_name(i.id), i.indid, 5) AS pk_5,
                 index_col(object_name(i.id), i.indid, 6) AS pk_6,
                 index_col(object_name(i.id), i.indid, 7) AS pk_7,
                 index_col(object_name(i.id), i.indid, 8) AS pk_8,
                 index_col(object_name(i.id), i.indid, 9) AS pk_9,
                 index_col(object_name(i.id), i.indid, 10) AS pk_10,
                 index_col(object_name(i.id), i.indid, 11) AS pk_11,
                 index_col(object_name(i.id), i.indid, 12) AS pk_12,
                 index_col(object_name(i.id), i.indid, 13) AS pk_13,
                 index_col(object_name(i.id), i.indid, 14) AS pk_14,
                 index_col(object_name(i.id), i.indid, 15) AS pk_15,
                 index_col(object_name(i.id), i.indid, 16) AS pk_16
          FROM sysindexes i, sysobjects o
          WHERE o.id = i.id
            AND o.id = :table_id
            AND (i.status & 2048) = 2048
            AND i.indid BETWEEN 1 AND 254
        """)

        results = connection.execute(PK_SQL, table_id=table_id)
        pks = results.fetchone()
        results.close()

        constrained_columns = []
        for i in range(1, pks["count"] + 1):
            constrained_columns.append(pks["pk_%i" % (i,)])
        return {"constrained_columns": constrained_columns,
                "name": pks["name"]}

Example 52

Project: sqlalchemy-redshift Source File: dialect.py
Function: get_columns
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """
        Return information about columns in `table_name`.

        Overrides interface
        :meth:`~sqlalchemy.engine.interfaces.Dialect.get_columns`.
        """
        cols = self._get_redshift_columns(connection, table_name, schema, **kw)
        if not self._domains:
            self._domains = self._load_domains(connection)
        domains = self._domains
        columns = []
        for col in cols:
            column_info = self._get_column_info(
                name=col.name, format_type=col.format_type,
                default=col.default, notnull=col.notnull, domains=domains,
                enums=[], schema=col.schema, encode=col.encode)
            columns.append(column_info)
        return columns

Example 53

Project: sqlalchemy-redshift Source File: dialect.py
Function: get_pk_constraint
    @reflection.cache
    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        """
        Return information about the primary key constraint on `table_name`.

        Overrides interface
        :meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`.
        """
        constraints = self._get_redshift_constraints(connection, table_name,
                                                     schema, **kw)
        pk_constraints = [c for c in constraints if c.contype == 'p']
        if not pk_constraints:
            return {'constrained_columns': [], 'name': ''}
        pk_constraint = pk_constraints[0]
        m = PRIMARY_KEY_RE.match(pk_constraint.condef)
        colstring = m.group('columns')
        constrained_columns = SQL_IDENTIFIER_RE.findall(colstring)
        return {
            'constrained_columns': constrained_columns,
            'name': None,
        }

Example 54

Project: sqlalchemy-redshift Source File: dialect.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        """
        Return information about foreign keys in `table_name`.

        Overrides interface
        :meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`.
        """
        constraints = self._get_redshift_constraints(connection, table_name,
                                                     schema, **kw)
        fk_constraints = [c for c in constraints if c.contype == 'f']
        uniques = defaultdict(lambda: defaultdict(dict))
        for con in fk_constraints:
            uniques[con.conname]["key"] = con.conkey
            uniques[con.conname]["condef"] = con.condef
        fkeys = []
        for conname, attrs in uniques.items():
            m = FOREIGN_KEY_RE.match(attrs['condef'])
            colstring = m.group('referred_columns')
            referred_columns = SQL_IDENTIFIER_RE.findall(colstring)
            referred_table = m.group('referred_table')
            referred_schema = m.group('referred_schema')
            colstring = m.group('columns')
            constrained_columns = SQL_IDENTIFIER_RE.findall(colstring)
            fkey_d = {
                'name': None,
                'constrained_columns': constrained_columns,
                'referred_schema': referred_schema,
                'referred_table': referred_table,
                'referred_columns': referred_columns,
            }
            fkeys.append(fkey_d)
        return fkeys

Example 55

Project: sqlalchemy-redshift Source File: dialect.py
Function: get_unique_constraints
    @reflection.cache
    def get_unique_constraints(self, connection, table_name,
                               schema=None, **kw):
        """
        Return information about unique constraints in `table_name`.

        Overrides interface
        :meth:`~sqlalchemy.engine.interfaces.Dialect.get_unique_constraints`.
        """
        constraints = self._get_redshift_constraints(connection,
                                                     table_name, schema, **kw)
        constraints = [c for c in constraints if c.contype == 'u']
        uniques = defaultdict(lambda: defaultdict(dict))
        for con in constraints:
            uniques[con.conname]["key"] = con.conkey
            uniques[con.conname]["cols"][con.attnum] = con.attname

        return [
            {'name': None,
             'column_names': [uc["cols"][i] for i in uc["key"]]}
            for name, uc in uniques.items()
        ]

Example 56

Project: sqlalchemy-redshift Source File: dialect.py
Function: get_table_options
    @reflection.cache
    def get_table_options(self, connection, table_name, schema, **kw):
        """
        Return a dictionary of options specified when the table of the
        given name was created.

        Overrides interface
        :meth:`~sqlalchemy.engine.Inspector.get_table_options`.
        """
        def keyfunc(column):
            num = int(column.sortkey)
            # If sortkey is interleaved, column numbers alternate
            # negative values, so take abs.
            return abs(num)
        table = self._get_redshift_relation(connection, table_name,
                                            schema, **kw)
        columns = self._get_redshift_columns(connection, table_name,
                                             schema, **kw)
        sortkey_cols = sorted([col for col in columns if col.sortkey],
                              key=keyfunc)
        interleaved = any([int(col.sortkey) < 0 for col in sortkey_cols])
        sortkey = [col.name for col in sortkey_cols]
        interleaved_sortkey = None
        if interleaved:
            interleaved_sortkey = sortkey
            sortkey = None
        distkeys = [col.name for col in columns if col.distkey]
        distkey = distkeys[0] if distkeys else None
        return {
            'redshift_diststyle': table.diststyle,
            'redshift_distkey': distkey,
            'redshift_sortkey': sortkey,
            'redshift_interleaved_sortkey': interleaved_sortkey,
        }

Example 57

Project: sqlalchemy-redshift Source File: dialect.py
    @reflection.cache
    def _get_all_relation_info(self, connection, **kw):
        result = connection.execute("""
        SELECT
          c.relkind,
          n.oid as "schema_oid",
          n.nspname as "schema",
          c.oid as "rel_oid",
          c.relname,
          CASE c.reldiststyle
            WHEN 0 THEN 'EVEN' WHEN 1 THEN 'KEY' WHEN 8 THEN 'ALL' END
            AS "diststyle",
          c.relowner AS "owner_id",
          u.usename AS "owner_name",
          TRIM(TRAILING ';' FROM pg_catalog.pg_get_viewdef(c.oid, true))
            AS "view_definition",
          pg_catalog.array_to_string(c.relacl, '\n') AS "privileges"
        FROM pg_catalog.pg_class c
             LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
             JOIN pg_catalog.pg_user u ON u.usesysid = c.relowner
        WHERE c.relkind IN ('r', 'v', 'm', 'S', 'f')
          AND n.nspname !~ '^pg_'
        ORDER BY c.relkind, n.oid, n.nspname;
        """)
        relations = {}
        for rel in result:
            key = RelationKey(rel.relname, rel.schema, connection)
            relations[key] = rel
        return relations

Example 58

Project: sqlalchemy-redshift Source File: dialect.py
    @reflection.cache
    def _get_all_column_info(self, connection, **kw):
        all_columns = defaultdict(list)
        with connection.contextual_connect() as cc:
            # We fetch the current search_path, which may or may not quote
            # '$user' depending on whether other schemas need quoting.
            search_path = cc.execute("SHOW search_path").scalar()
            if '$user' in search_path and '"$user"' not in search_path:
                search_path = search_path.replace('$user', '"$user"')

            # Because pg_table_def only shows results for schemas on the
            # search_path, we explicitly include all non-system schemas, then
            # replace the original value for search_path.
            schema_names = ['"%s"' % r.name for r in cc.execute("""
            SELECT nspname AS "name"
            FROM pg_catalog.pg_namespace
            WHERE nspname !~ '^pg_' AND nspname <> 'information_schema'
            ORDER BY 1
            """)]
            modified_search_path = ','.join(schema_names)
            cc.execute("SET LOCAL search_path TO %s" % modified_search_path)

            result = cc.execute("""
            SELECT
              n.nspname as "schema",
              c.relname as "table_name",
              d.column as "name",
              encoding as "encode",
              type, distkey, sortkey, "notnull", adsrc, attnum,
              pg_catalog.format_type(att.atttypid, att.atttypmod),
              pg_catalog.pg_get_expr(ad.adbin, ad.adrelid) AS DEFAULT,
              n.oid as "schema_oid",
              c.oid as "table_oid"
            FROM pg_catalog.pg_class c
            LEFT JOIN pg_catalog.pg_namespace n
              ON n.oid = c.relnamespace
            JOIN pg_catalog.pg_table_def d
              ON (d.schemaname, d.tablename) = (n.nspname, c.relname)
            JOIN pg_catalog.pg_attribute att
              ON (att.attrelid, att.attname) = (c.oid, d.column)
            LEFT JOIN pg_catalog.pg_attrdef ad
              ON (att.attrelid, att.attnum) = (ad.adrelid, ad.adnum)
            WHERE n.nspname !~ '^pg_'
            ORDER BY n.nspname, c.relname, att.attnum
            """)
            for col in result:
                key = RelationKey(col.table_name, col.schema, connection)
                all_columns[key].append(col)

            cc.execute("SET LOCAL search_path TO %s" % search_path)

        return dict(all_columns)

Example 59

Project: sqlalchemy-redshift Source File: dialect.py
    @reflection.cache
    def _get_all_constraint_info(self, connection, **kw):
        result = connection.execute("""
        SELECT
          n.nspname as "schema",
          c.relname as "table_name",
          t.contype,
          t.conname,
          t.conkey,
          a.attnum,
          a.attname,
          pg_catalog.pg_get_constraintdef(t.oid, true) as condef,
          n.oid as "schema_oid",
          c.oid as "rel_oid"
        FROM pg_catalog.pg_class c
        LEFT JOIN pg_catalog.pg_namespace n
          ON n.oid = c.relnamespace
        JOIN pg_catalog.pg_constraint t
          ON t.conrelid = c.oid
        JOIN pg_catalog.pg_attribute a
          ON t.conrelid = a.attrelid AND a.attnum = ANY(t.conkey)
        WHERE n.nspname !~ '^pg_'
        ORDER BY n.nspname, c.relname
        """)
        all_constraints = defaultdict(list)
        for con in result:
            key = RelationKey(con.table_name, con.schema, connection)
            all_constraints[key].append(con)
        return all_constraints

Example 60

Project: sqlalchemy Source File: base.py
Function: get_column_sequence
    @reflection.cache
    def get_column_sequence(self, connection,
                            table_name, column_name,
                            schema=None, **kw):
        tablename = self.denormalize_name(table_name)
        colname = self.denormalize_name(column_name)
        # Heuristic-query to determine the generator associated to a PK field
        genqry = """
        SELECT trigdep.rdb$depended_on_name AS fgenerator
        FROM rdb$dependencies tabdep
             JOIN rdb$dependencies trigdep
                  ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
                     AND trigdep.rdb$depended_on_type=14
                     AND trigdep.rdb$dependent_type=2
             JOIN rdb$triggers trig ON
                    trig.rdb$trigger_name=tabdep.rdb$dependent_name
        WHERE tabdep.rdb$depended_on_name=?
          AND tabdep.rdb$depended_on_type=0
          AND trig.rdb$trigger_type=1
          AND tabdep.rdb$field_name=?
          AND (SELECT count(*)
           FROM rdb$dependencies trigdep2
           WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
        """
        genr = connection.execute(genqry, [tablename, colname]).first()
        if genr is not None:
            return dict(name=self.normalize_name(genr['fgenerator']))

Example 61

Project: sqlalchemy Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source,
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pk_constraint = self.get_pk_constraint(connection, table_name)
        pkey_cols = pk_constraint['constrained_columns']

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif issubclass(coltype, Integer) and row['fprec'] != 0:
                coltype = NUMERIC(
                    precision=row['fprec'],
                    scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype()

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                    'DEFAULT', "Unrecognized default value: %s" % \
                    defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name': name,
                'type': coltype,
                'nullable': not bool(row['null_flag']),
                'default': defvalue,
                'autoincrement': 'auto',
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols) == 1 and name == pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 62

Project: sqlalchemy Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of each UK/FK of the given table
        fkqry = """
        SELECT rc.rdb$constraint_name AS cname,
               cse.rdb$field_name AS fname,
               ix2.rdb$relation_name AS targetrname,
               se.rdb$field_name AS targetfname
        FROM rdb$relation_constraints rc
             JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
             JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
             JOIN rdb$index_segments cse ON
                        cse.rdb$index_name=ix1.rdb$index_name
             JOIN rdb$index_segments se
                  ON se.rdb$index_name=ix2.rdb$index_name
                     AND se.rdb$field_position=cse.rdb$field_position
        WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
        ORDER BY se.rdb$index_name, se.rdb$field_position
        """
        tablename = self.denormalize_name(table_name)

        c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
        fks = util.defaultdict(lambda: {
            'name': None,
            'constrained_columns': [],
            'referred_schema': None,
            'referred_table': None,
            'referred_columns': []
        })

        for row in c:
            cname = self.normalize_name(row['cname'])
            fk = fks[cname]
            if not fk['name']:
                fk['name'] = cname
                fk['referred_table'] = self.normalize_name(row['targetrname'])
            fk['constrained_columns'].append(
                self.normalize_name(row['fname']))
            fk['referred_columns'].append(
                self.normalize_name(row['targetfname']))
        return list(fks.values())

Example 63

Project: sqlalchemy Source File: base.py
Function: get_indexes
    @reflection.cache
    def get_indexes(self, connection, table_name, schema=None, **kw):
        qry = """
        SELECT ix.rdb$index_name AS index_name,
               ix.rdb$unique_flag AS unique_flag,
               ic.rdb$field_name AS field_name
        FROM rdb$indices ix
             JOIN rdb$index_segments ic
                  ON ix.rdb$index_name=ic.rdb$index_name
             LEFT OUTER JOIN rdb$relation_constraints
                  ON rdb$relation_constraints.rdb$index_name =
                        ic.rdb$index_name
        WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
          AND rdb$relation_constraints.rdb$constraint_type IS NULL
        ORDER BY index_name, ic.rdb$field_position
        """
        c = connection.execute(qry, [self.denormalize_name(table_name)])

        indexes = util.defaultdict(dict)
        for row in c:
            indexrec = indexes[row['index_name']]
            if 'name' not in indexrec:
                indexrec['name'] = self.normalize_name(row['index_name'])
                indexrec['column_names'] = []
                indexrec['unique'] = bool(row['unique_flag'])

            indexrec['column_names'].append(
                self.normalize_name(row['field_name']))

        return list(indexes.values())

Example 64

Project: sqlalchemy Source File: base.py
Function: get_columns
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        COLUMN_SQL = text("""
          SELECT col.name AS name,
                 t.name AS type,
                 (col.status & 8) AS nullable,
                 (col.status & 128) AS autoincrement,
                 com.text AS 'default',
                 col.prec AS precision,
                 col.scale AS scale,
                 col.length AS length
          FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
              col.cdefault = com.id
          WHERE col.usertype = t.usertype
              AND col.id = :table_id
          ORDER BY col.colid
        """)

        results = connection.execute(COLUMN_SQL, table_id=table_id)

        columns = []
        for (name, type_, nullable, autoincrement, default, precision, scale,
             length) in results:
            col_info = self._get_column_info(name, type_, bool(nullable),
                                             bool(autoincrement),
                                             default, precision, scale,
                                             length)
            columns.append(col_info)

        return columns

Example 65

Project: sqlalchemy Source File: base.py
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):

        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        table_cache = {}
        column_cache = {}
        foreign_keys = []

        table_cache[table_id] = {"name": table_name, "schema": schema}

        COLUMN_SQL = text("""
          SELECT c.colid AS id, c.name AS name
          FROM syscolumns c
          WHERE c.id = :table_id
        """)

        results = connection.execute(COLUMN_SQL, table_id=table_id)
        columns = {}
        for col in results:
            columns[col["id"]] = col["name"]
        column_cache[table_id] = columns

        REFCONSTRAINT_SQL = text("""
          SELECT o.name AS name, r.reftabid AS reftable_id,
            r.keycnt AS 'count',
            r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
            r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
            r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
            r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
            r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
            r.fokey16 AS fokey16,
            r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
            r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
            r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
            r.refkey10 AS refkey10, r.refkey11 AS refkey11,
            r.refkey12 AS refkey12, r.refkey13 AS refkey13,
            r.refkey14 AS refkey14, r.refkey15 AS refkey15,
            r.refkey16 AS refkey16
          FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
          WHERE r.tableid = :table_id
        """)
        referential_constraints = connection.execute(
            REFCONSTRAINT_SQL, table_id=table_id).fetchall()

        REFTABLE_SQL = text("""
          SELECT o.name AS name, u.name AS 'schema'
          FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
          WHERE o.id = :table_id
        """)

        for r in referential_constraints:
            reftable_id = r["reftable_id"]

            if reftable_id not in table_cache:
                c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
                reftable = c.fetchone()
                c.close()
                table_info = {"name": reftable["name"], "schema": None}
                if (schema is not None or
                        reftable["schema"] != self.default_schema_name):
                    table_info["schema"] = reftable["schema"]

                table_cache[reftable_id] = table_info
                results = connection.execute(COLUMN_SQL, table_id=reftable_id)
                reftable_columns = {}
                for col in results:
                    reftable_columns[col["id"]] = col["name"]
                column_cache[reftable_id] = reftable_columns

            reftable = table_cache[reftable_id]
            reftable_columns = column_cache[reftable_id]

            constrained_columns = []
            referred_columns = []
            for i in range(1, r["count"] + 1):
                constrained_columns.append(columns[r["fokey%i" % i]])
                referred_columns.append(reftable_columns[r["refkey%i" % i]])

            fk_info = {
                "constrained_columns": constrained_columns,
                "referred_schema": reftable["schema"],
                "referred_table": reftable["name"],
                "referred_columns": referred_columns,
                "name": r["name"]
            }

            foreign_keys.append(fk_info)

        return foreign_keys

Example 66

Project: sqlalchemy Source File: base.py
    @reflection.cache
    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        PK_SQL = text("""
          SELECT object_name(i.id) AS table_name,
                 i.keycnt AS 'count',
                 i.name AS name,
                 index_col(object_name(i.id), i.indid, 1) AS pk_1,
                 index_col(object_name(i.id), i.indid, 2) AS pk_2,
                 index_col(object_name(i.id), i.indid, 3) AS pk_3,
                 index_col(object_name(i.id), i.indid, 4) AS pk_4,
                 index_col(object_name(i.id), i.indid, 5) AS pk_5,
                 index_col(object_name(i.id), i.indid, 6) AS pk_6,
                 index_col(object_name(i.id), i.indid, 7) AS pk_7,
                 index_col(object_name(i.id), i.indid, 8) AS pk_8,
                 index_col(object_name(i.id), i.indid, 9) AS pk_9,
                 index_col(object_name(i.id), i.indid, 10) AS pk_10,
                 index_col(object_name(i.id), i.indid, 11) AS pk_11,
                 index_col(object_name(i.id), i.indid, 12) AS pk_12,
                 index_col(object_name(i.id), i.indid, 13) AS pk_13,
                 index_col(object_name(i.id), i.indid, 14) AS pk_14,
                 index_col(object_name(i.id), i.indid, 15) AS pk_15,
                 index_col(object_name(i.id), i.indid, 16) AS pk_16
          FROM sysindexes i, sysobjects o
          WHERE o.id = i.id
            AND o.id = :table_id
            AND (i.status & 2048) = 2048
            AND i.indid BETWEEN 1 AND 254
        """)

        results = connection.execute(PK_SQL, table_id=table_id)
        pks = results.fetchone()
        results.close()

        constrained_columns = []
        if pks:
            for i in range(1, pks["count"] + 1):
                constrained_columns.append(pks["pk_%i" % (i,)])
            return {"constrained_columns": constrained_columns,
                    "name": pks["name"]}
        else:
            return {"constrained_columns": [], "name": None}

Example 67

Project: sqlalchemy_exasol Source File: base.py
Function: get_schema_names
    @reflection.cache
    def get_schema_names(self, connection, **kw):
        sql_stmnt = "select SCHEMA_NAME from SYS.EXA_SCHEMAS"
        rs = connection.execute(sql.text(sql_stmnt))
        return [self.normalize_name(row[0]) for row in rs]

Example 68

Project: sqlalchemy_exasol Source File: base.py
Function: get_columns
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        schema = schema or connection.engine.url.database
        if schema is None:
            schema = connection.execute("select CURRENT_SCHEMA from dual").scalar()
        table_name=self.denormalize_name(table_name)
        schema=self.denormalize_name(schema)

        columns = []
        for row in self._get_all_columns(connection, schema, info_cache=kw.get("info_cache")):
            if row[9] != table_name and table_name is not None:
                continue
            (colname, coltype, length, precision, scale, nullable, default, identity, is_distribution_key) = \
                (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])

            # FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH

            # remove ASCII, UTF8 and spaces from char-like types
            coltype = re.sub(r'ASCII|UTF8| ', '', coltype)
            # remove precision and scale addition from numeric types
            coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype)
            try:
                if coltype == 'VARCHAR':
                    coltype = sqltypes.VARCHAR(length)
                elif coltype == 'DECIMAL':
                    # this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level
                    # thus, we need to convert DECIMAL(<=18,0) back to INTEGER type
                    if scale == 0 and precision <= 18:
                        coltype = sqltypes.INTEGER()
                    else:
                        coltype = sqltypes.DECIMAL(precision, scale)
                else:
                    coltype = self.ischema_names[coltype]
            except KeyError:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (coltype, colname))
                coltype = sqltypes.NULLTYPE

            cdict = {
                'name': self.normalize_name(colname),
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'is_distribution_key': is_distribution_key
            }
            # if we have a positive identity value add a sequence
            if identity is not None and identity >= 0:
                cdict['sequence'] = {'name':''}
                # TODO: we have to possibility to encode the current identity value count
                # into the column metadata. But the consequence is that it would also be used
                # as start value in CREATE statements. For now the current value is ignored.
                # Add it by changing the dict to: {'name':'', 'start': int(identity)}
            columns.append(cdict)
        return columns

Example 69

Project: sqlalchemy_exasol Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        schema_int = schema or connection.engine.url.database
        if schema_int is None:
            schema_int = connection.execute("select CURRENT_SCHEMA from dual").scalar()
        table_name=self.denormalize_name(table_name)
        def fkey_rec():
            return {
                'name': None,
                'constrained_columns': [],
                'referred_schema': None,
                'referred_table': None,
                'referred_columns': []
            }

        fkeys = util.defaultdict(fkey_rec)

        for row in self._get_all_constraints(connection, schema=schema, info_cache=kw.get("info_cache")):
            if (row[5] != table_name and table_name is not None) or row[6] != 'FOREIGN KEY':
                continue
            (cons_name, local_column, remote_schema, remote_table, remote_column) = \
                    (row[0], row[1], row[2], row[3], row[4])
            rec = fkeys[self.normalize_name(cons_name)]
            rec['name'] = self.normalize_name(cons_name)
            local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']

            if not rec['referred_table']:
                rec['referred_table'] = self.normalize_name(remote_table)
                # we need to take care of calls without schema. the sqla test suite
                # expects referred_schema to be None if None is passed in to this function
                if schema is None and self.normalize_name(schema_int) == self.normalize_name(remote_schema):
                    rec['referred_schema'] = None
                else:
                    rec['referred_schema'] = self.normalize_name(remote_schema)

            local_cols.append(self.normalize_name(local_column))
            remote_cols.append(self.normalize_name(remote_column))

        return list(fkeys.values())

Example 70

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT DISTINCT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source, 
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pkey_cols = self.get_primary_keys(connection, table_name)

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif colspec == 'INT64':
                coltype = coltype(
                                precision=row['fprec'], 
                                scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype(row)

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case 
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                            'DEFAULT', "Unrecognized default value: %s" % \
                            defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name' : name,
                'type' : coltype,
                'nullable' :  not bool(row['null_flag']),
                'default' : defvalue
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols)==1 and name==pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 71

Project: CouchPotatoV1 Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of each UK/FK of the given table
        fkqry = """
        SELECT rc.rdb$constraint_name AS cname,
               cse.rdb$field_name AS fname,
               ix2.rdb$relation_name AS targetrname,
               se.rdb$field_name AS targetfname
        FROM rdb$relation_constraints rc
             JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
             JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
             JOIN rdb$index_segments cse ON
                        cse.rdb$index_name=ix1.rdb$index_name
             JOIN rdb$index_segments se
                  ON se.rdb$index_name=ix2.rdb$index_name
                     AND se.rdb$field_position=cse.rdb$field_position
        WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
        ORDER BY se.rdb$index_name, se.rdb$field_position
        """
        tablename = self.denormalize_name(table_name)

        c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
        fks = util.defaultdict(lambda:{
            'name' : None,
            'constrained_columns' : [],
            'referred_schema' : None,
            'referred_table' : None,
            'referred_columns' : []
        })

        for row in c:
            cname = self.normalize_name(row['cname'])
            fk = fks[cname]
            if not fk['name']:
                fk['name'] = cname
                fk['referred_table'] = self.normalize_name(row['targetrname'])
            fk['constrained_columns'].append(
                                self.normalize_name(row['fname']))
            fk['referred_columns'].append(
                                self.normalize_name(row['targetfname']))
        return fks.values()

Example 72

Project: CouchPotatoV1 Source File: base.py
Function: get_columns
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        schema = schema or self.default_schema_name
        c = connection.execute(
            """select colname, coltype, collength, t3.default, t1.colno from
                syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
                where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
                  and t3.tabid = t2.tabid and t3.colno = t1.colno
                order by t1.colno""", table_name, schema)

        primary_cols = self.get_primary_keys(connection, table_name, schema, **kw)

        columns = []
        rows = c.fetchall()
        for name, colattr, collength, default, colno in rows:
            name = name.lower()

            autoincrement = False
            primary_key = False

            if name in primary_cols:
                primary_key = True

            # in 7.31, coltype = 0x000
            #                       ^^-- column type
            #                      ^-- 1 not null, 0 null
            not_nullable, coltype = divmod(colattr, 256)
            if coltype not in (0, 13) and default:
                default = default.split()[-1]

            if coltype == 6: # Serial, mark as autoincrement
                autoincrement = True

            if coltype == 0 or coltype == 13: # char, varchar
                coltype = ischema_names[coltype](collength)
                if default:
                    default = "'%s'" % default
            elif coltype == 5: # decimal
                precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
                if scale == 255:
                    scale = 0
                coltype = sqltypes.Numeric(precision, scale)
            else:
                try:
                    coltype = ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, name))
                    coltype = sqltypes.NULLTYPE
            
            column_info = dict(name=name, type=coltype, nullable=not not_nullable,
                               default=default, autoincrement=autoincrement,
                               primary_key=primary_key)
            columns.append(column_info)
        return columns

Example 73

Project: CouchPotatoV1 Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        schema_sel = schema or self.default_schema_name
        c = connection.execute(
        """select t1.constrname as cons_name,
                 t4.colname as local_column, t7.tabname as remote_table,
                 t6.colname as remote_column, t7.owner as remote_owner 
            from sysconstraints as t1 , systables as t2 ,
                 sysindexes as t3 , syscolumns as t4 ,
                 sysreferences as t5 , syscolumns as t6 , systables as t7 ,
                 sysconstraints as t8 , sysindexes as t9
           where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R'
             and t3.tabid = t2.tabid and t3.idxname = t1.idxname
             and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3,
             t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10,
             t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16) 
             and t5.constrid = t1.constrid and t8.constrid = t5.primary
             and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3,
             t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10,
             t9.part11, t9.part11, t9.part12, t9.part13, t9.part4, t9.part15, t9.part16) and t9.idxname =
             t8.idxname
             and t7.tabid = t5.ptabid""", table_name, schema_sel)


        def fkey_rec():
            return {
                 'name' : None,
                 'constrained_columns' : [],
                 'referred_schema' : None,
                 'referred_table' : None,
                 'referred_columns' : []
             }

        fkeys = util.defaultdict(fkey_rec)

        rows = c.fetchall()
        for cons_name, local_column, \
                    remote_table, remote_column, remote_owner in rows:

            rec = fkeys[cons_name]
            rec['name'] = cons_name
            local_cols, remote_cols = \
                        rec['constrained_columns'], rec['referred_columns']

            if not rec['referred_table']:
                rec['referred_table'] = remote_table
                if schema is not None:
                    rec['referred_schema'] = remote_owner

            if local_column not in local_cols:
                local_cols.append(local_column)
            if remote_column not in remote_cols:
                remote_cols.append(remote_column)

        return fkeys.values()

Example 74

Project: CouchPotatoV1 Source File: base.py
Function: get_primary_keys
    @reflection.cache
    def get_primary_keys(self, connection, table_name, schema=None, **kw):
        schema = schema or self.default_schema_name

        # Select the column positions from sysindexes for sysconstraints
        data = connection.execute(
            """select t2.* 
            from systables as t1, sysindexes as t2, sysconstraints as t3
            where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=?
            and t2.idxname=t3.idxname and t3.constrtype='P'""",
            table_name, schema
        ).fetchall()

        colpositions = set()

        for row in data:
            colpos = set([getattr(row, 'part%d' % x) for x in range(1,16)])
            colpositions |= colpos

        if not len(colpositions):
            return []

        # Select the column names using the columnpositions
        # TODO: Maybe cache a bit of those col infos (eg select all colnames for one table)
        place_holder = ','.join('?'*len(colpositions))
        c = connection.execute(
            """select t1.colname
            from syscolumns as t1, systables as t2
            where t2.tabname=? and t1.tabid = t2.tabid and 
            t1.colno in (%s)""" % place_holder,
            table_name, *colpositions
        ).fetchall()

        return reduce(lambda x,y: list(x)+list(y), c, [])

Example 75

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'
 
        c = connection.execute(sql.text(
                "SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
                "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
                "WHERE table_name = :table_name AND owner = :owner " 
                "ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
                               table_name=table_name, owner=schema)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])

            if coltype == 'NUMBER' :
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype: 
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns

Example 76

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_indexes(self, connection, table_name, schema=None,
                    resolve_synonyms=False, dblink='', **kw):

        
        info_cache = kw.get('info_cache')
        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        indexes = []
        q = sql.text("""
        SELECT a.index_name, a.column_name, b.uniqueness
        FROM ALL_IND_COLUMNS%(dblink)s a, 
        ALL_INDEXES%(dblink)s b 
        WHERE
            a.index_name = b.index_name
            AND a.table_owner = b.table_owner
            AND a.table_name = b.table_name
        
        AND a.table_name = :table_name
        AND a.table_owner = :schema
        ORDER BY a.index_name, a.column_position""" % {'dblink': dblink})
        rp = connection.execute(q, table_name=self.denormalize_name(table_name),
                                schema=self.denormalize_name(schema))
        indexes = []
        last_index_name = None
        pkeys = self.get_primary_keys(connection, table_name, schema,
                                      resolve_synonyms=resolve_synonyms,
                                      dblink=dblink,
                                      info_cache=kw.get('info_cache'))
        uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
        
        oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)

        def upper_name_set(names):
            return set([i.upper() for i in names])

        pk_names = upper_name_set(pkeys)

        def remove_if_primary_key(index):
            # don't include the primary key index
            if index is not None and \
               upper_name_set(index['column_names']) == pk_names:
                indexes.pop()

        index = None
        for rset in rp:
            if rset.index_name != last_index_name:
                remove_if_primary_key(index)
                index = dict(name=self.normalize_name(rset.index_name), column_names=[])
                indexes.append(index)
            index['unique'] = uniqueness.get(rset.uniqueness, False)

            # filter out Oracle SYS_NC names.  could also do an outer join
            # to the all_tab_columns table and check for real col names there.
            if not oracle_sys_col.match(rset.column_name):
                index['column_names'].append(self.normalize_name(rset.column_name))
            last_index_name = rset.index_name
        remove_if_primary_key(index)
        return indexes

Example 77

Project: CouchPotatoV1 Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        requested_schema = schema # to check later on
        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)

        constraint_data = self._get_constraint_data(connection, table_name,
                                                schema, dblink,
                                                info_cache=kw.get('info_cache'))

        def fkey_rec():
            return {
                'name' : None,
                'constrained_columns' : [],
                'referred_schema' : None,
                'referred_table' : None,
                'referred_columns' : []
            }

        fkeys = util.defaultdict(fkey_rec)
        
        for row in constraint_data:
            (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
                    row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])

            if cons_type == 'R':
                if remote_table is None:
                    # ticket 363
                    util.warn(
                        ("Got 'None' querying 'table_name' from "
                         "all_cons_columns%(dblink)s - does the user have "
                         "proper rights to the table?") % {'dblink':dblink})
                    continue

                rec = fkeys[cons_name]
                rec['name'] = cons_name
                local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']

                if not rec['referred_table']:
                    if resolve_synonyms:
                        ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
                                self._resolve_synonym(
                                    connection, 
                                    desired_owner=self.denormalize_name(remote_owner), 
                                    desired_table=self.denormalize_name(remote_table)
                                )
                        if ref_synonym:
                            remote_table = self.normalize_name(ref_synonym)
                            remote_owner = self.normalize_name(ref_remote_owner)
                    
                    rec['referred_table'] = remote_table
                    
                    if requested_schema is not None or self.denormalize_name(remote_owner) != schema:
                        rec['referred_schema'] = remote_owner
                
                local_cols.append(local_column)
                remote_cols.append(remote_column)

        return fkeys.values()

Example 78

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_table_oid(self, connection, table_name, schema=None, **kw):
        """Fetch the oid for schema.table_name.

        Several reflection methods require the table oid.  The idea for using
        this method is that it can be fetched one time and cached for
        subsequent calls.

        """
        table_oid = None
        if schema is not None:
            schema_where_clause = "n.nspname = :schema"
        else:
            schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
        query = """
            SELECT c.oid
            FROM pg_catalog.pg_class c
            LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
            WHERE (%s)
            AND c.relname = :table_name AND c.relkind in ('r','v')
        """ % schema_where_clause
        # Since we're binding to unicode, table_name and schema_name must be
        # unicode.
        table_name = unicode(table_name)
        if schema is not None:
            schema = unicode(schema)
        s = sql.text(query, bindparams=[
            sql.bindparam('table_name', type_=sqltypes.Unicode),
            sql.bindparam('schema', type_=sqltypes.Unicode)
            ],
            typemap={'oid':sqltypes.Integer}
        )
        c = connection.execute(s, table_name=table_name, schema=schema)
        table_oid = c.scalar()
        if table_oid is None:
            raise exc.NoSuchTableError(table_name)
        return table_oid

Example 79

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):

        table_oid = self.get_table_oid(connection, table_name, schema,
                                       info_cache=kw.get('info_cache'))
        SQL_COLS = """
            SELECT a.attname,
              pg_catalog.format_type(a.atttypid, a.atttypmod),
              (SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) 
                for 128) 
                FROM pg_catalog.pg_attrdef d
               WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum 
               AND a.atthasdef)
              AS DEFAULT,
              a.attnotnull, a.attnum, a.attrelid as table_oid
            FROM pg_catalog.pg_attribute a
            WHERE a.attrelid = :table_oid
            AND a.attnum > 0 AND NOT a.attisdropped
            ORDER BY a.attnum
        """
        s = sql.text(SQL_COLS, 
            bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)], 
            typemap={'attname':sqltypes.Unicode, 'default':sqltypes.Unicode}
        )
        c = connection.execute(s, table_oid=table_oid)
        rows = c.fetchall()
        domains = self._load_domains(connection)
        enums = self._load_enums(connection)
        
        # format columns
        columns = []
        for name, format_type, default, notnull, attnum, table_oid in rows:
            ## strip (5) from character varying(5), timestamp(5) 
            # with time zone, etc
            attype = re.sub(r'\([\d,]+\)', '', format_type)
            
            # strip '[]' from integer[], etc.
            attype = re.sub(r'\[\]', '', attype)
            
            nullable = not notnull
            is_array = format_type.endswith('[]')
            charlen = re.search('\(([\d,]+)\)', format_type)
            if charlen:
                charlen = charlen.group(1)
            kwargs = {}
                
            if attype == 'numeric':
                if charlen:
                    prec, scale = charlen.split(',')
                    args = (int(prec), int(scale))
                else:
                    args = ()
            elif attype == 'double precision':
                args = (53, )
            elif attype == 'integer':
                args = (32, 0)
            elif attype in ('timestamp with time zone', 
                            'time with time zone'):
                kwargs['timezone'] = True
                if charlen:
                    kwargs['precision'] = int(charlen)
                args = ()
            elif attype in ('timestamp without time zone', 
                            'time without time zone', 'time'):
                kwargs['timezone'] = False
                if charlen:
                    kwargs['precision'] = int(charlen)
                args = ()
            elif attype in ('interval','interval year to month',
                                'interval day to second'):
                if charlen:
                    kwargs['precision'] = int(charlen)
                args = ()
            elif charlen:
                args = (int(charlen),)
            else:
                args = ()
            
            while True:
                if attype in self.ischema_names:
                    coltype = self.ischema_names[attype]
                    break
                elif attype in enums:
                    enum = enums[attype]
                    coltype = ENUM
                    if "." in attype:
                        kwargs['schema'], kwargs['name'] = attype.split('.')
                    else:
                        kwargs['name'] = attype
                    args = tuple(enum['labels'])
                    break
                elif attype in domains:
                    domain = domains[attype]
                    attype = domain['attype']
                    # A table can't override whether the domain is nullable.
                    nullable = domain['nullable']
                    if domain['default'] and not default:
                        # It can, however, override the default 
                        # value, but can't set it to null.
                        default = domain['default']
                    continue
                else:
                    coltype = None
                    break
                
            if coltype:
                coltype = coltype(*args, **kwargs)
                if is_array:
                    coltype = ARRAY(coltype)
            else:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (attype, name))
                coltype = sqltypes.NULLTYPE
            # adjust the default value
            autoincrement = False
            if default is not None:
                match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
                if match is not None:
                    autoincrement = True
                    # the default is related to a Sequence
                    sch = schema
                    if '.' not in match.group(2) and sch is not None:
                        # unconditionally quote the schema name.  this could
                        # later be enhanced to obey quoting rules / 
                        # "quote schema"
                        default = match.group(1) + \
                                    ('"%s"' % sch) + '.' + \
                                    match.group(2) + match.group(3)

            column_info = dict(name=name, type=coltype, nullable=nullable,
                               default=default, autoincrement=autoincrement)
            columns.append(column_info)
        return columns

Example 80

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        preparer = self.identifier_preparer
        table_oid = self.get_table_oid(connection, table_name, schema,
                                       info_cache=kw.get('info_cache'))
        FK_SQL = """
          SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef
          FROM  pg_catalog.pg_constraint r
          WHERE r.conrelid = :table AND r.contype = 'f'
          ORDER BY 1
        """

        t = sql.text(FK_SQL, typemap={
                                'conname':sqltypes.Unicode,
                                'condef':sqltypes.Unicode})
        c = connection.execute(t, table=table_oid)
        fkeys = []
        for conname, condef in c.fetchall():
            m = re.search('FOREIGN KEY \((.*?)\) REFERENCES '
                            '(?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups()
            constrained_columns, referred_schema, \
                    referred_table, referred_columns = m
            constrained_columns = [preparer._unquote_identifier(x) 
                        for x in re.split(r'\s*,\s*', constrained_columns)]
            if referred_schema:
                referred_schema =\
                                preparer._unquote_identifier(referred_schema)
            elif schema is not None and schema == self.default_schema_name:
                # no schema (i.e. its the default schema), and the table we're
                # reflecting has the default schema explicit, then use that.
                # i.e. try to use the user's conventions
                referred_schema = schema
            referred_table = preparer._unquote_identifier(referred_table)
            referred_columns = [preparer._unquote_identifier(x) 
                        for x in re.split(r'\s*,\s', referred_columns)]
            fkey_d = {
                'name' : conname,
                'constrained_columns' : constrained_columns,
                'referred_schema' : referred_schema,
                'referred_table' : referred_table,
                'referred_columns' : referred_columns
            }
            fkeys.append(fkey_d)
        return fkeys

Example 81

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_indexes(self, connection, table_name, schema, **kw):
        table_oid = self.get_table_oid(connection, table_name, schema,
                                       info_cache=kw.get('info_cache'))
        IDX_SQL = """
          SELECT c.relname, i.indisunique, i.indexprs, i.indpred,
            a.attname
          FROM pg_index i, pg_class c, pg_attribute a
          WHERE i.indrelid = :table_oid AND i.indexrelid = c.oid
            AND a.attrelid = i.indexrelid AND i.indisprimary = 'f'
          ORDER BY c.relname, a.attnum
        """
        t = sql.text(IDX_SQL, typemap={'attname':sqltypes.Unicode})
        c = connection.execute(t, table_oid=table_oid)
        index_names = {}
        indexes = []
        sv_idx_name = None
        for row in c.fetchall():
            idx_name, unique, expr, prd, col = row
            if expr:
                if idx_name != sv_idx_name:
                    util.warn(
                      "Skipped unsupported reflection of "
                      "expression-based index %s"
                      % idx_name)
                sv_idx_name = idx_name
                continue
            if prd and not idx_name == sv_idx_name:
                util.warn(
                   "Predicate of partial index %s ignored during reflection"
                   % idx_name)
                sv_idx_name = idx_name
            if idx_name in index_names:
                index_d = index_names[idx_name]
            else:
                index_d = {'column_names':[]}
                indexes.append(index_d)
                index_names[idx_name] = index_d
            index_d['name'] = idx_name
            index_d['column_names'].append(col)
            index_d['unique'] = unique
        return indexes

Example 82

Project: CouchPotatoV1 Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        quote = self.identifier_preparer.quote_identifier
        if schema is not None:
            pragma = "PRAGMA %s." % quote(schema)
        else:
            pragma = "PRAGMA "
        qtable = quote(table_name)
        c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
        found_table = False
        columns = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            (name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])
            name = re.sub(r'^\"|\"$', '', name)
            if default:
                default = re.sub(r"^\'|\'$", '', default)
            match = re.match(r'(\w+)(\(.*?\))?', type_)
            if match:
                coltype = match.group(1)
                args = match.group(2)
            else:
                coltype = "VARCHAR"
                args = ''
            try:
                coltype = self.ischema_names[coltype]
            except KeyError:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (coltype, name))
                coltype = sqltypes.NullType
            if args is not None:
                args = re.findall(r'(\d+)', args)
                coltype = coltype(*[int(a) for a in args])

            columns.append({
                'name' : name,
                'type' : coltype,
                'nullable' : nullable,
                'default' : default,
                'primary_key': primary_key
            })
        return columns

Example 83

Project: CouchPotatoV1 Source File: base.py
Function: get_indexes
    @reflection.cache
    def get_indexes(self, connection, table_name, schema=None, **kw):
        quote = self.identifier_preparer.quote_identifier
        if schema is not None:
            pragma = "PRAGMA %s." % quote(schema)
        else:
            pragma = "PRAGMA "
        include_auto_indexes = kw.pop('include_auto_indexes', False)
        qtable = quote(table_name)
        c = _pragma_cursor(connection.execute("%sindex_list(%s)" % (pragma, qtable)))
        indexes = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            # ignore implicit primary key index.
            # http://www.mail-archive.com/[email protected]/msg30517.html
            elif not include_auto_indexes and row[1].startswith('sqlite_autoindex'):
                continue

            indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
        # loop thru unique indexes to get the column names.
        for idx in indexes:
            c = connection.execute("%sindex_info(%s)" % (pragma, quote(idx['name'])))
            cols = idx['column_names']
            while True:
                row = c.fetchone()
                if row is None:
                    break
                cols.append(row[2])
        return indexes

Example 84

Project: kokoropy Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source,
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pk_constraint = self.get_pk_constraint(connection, table_name)
        pkey_cols = pk_constraint['constrained_columns']

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif issubclass(coltype, Integer) and row['fprec'] != 0:
                coltype = NUMERIC(
                    precision=row['fprec'],
                    scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype()

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                    'DEFAULT', "Unrecognized default value: %s" % \
                    defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name': name,
                'type': coltype,
                'nullable': not bool(row['null_flag']),
                'default': defvalue,
                'autoincrement': defvalue is None
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols) == 1 and name == pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 85

Project: kokoropy Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'

        params = {"table_name": table_name}
        text = "SELECT column_name, data_type, %(char_length_col)s, "\
            "data_precision, data_scale, "\
            "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\
            "WHERE table_name = :table_name"
        if schema is not None:
            params['owner'] = schema
            text += " AND owner = :owner "
        text += " ORDER BY column_id"
        text = text % {'dblink': dblink, 'char_length_col': char_length_col}

        c = connection.execute(sql.text(text), **params)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[
                 2], row[3], row[4], row[5] == 'Y', row[6])

            if coltype == 'NUMBER':
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype:
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'autoincrement': default is None
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns

Example 86

Project: kokoropy Source File: base.py
    @reflection.cache
    def get_indexes(self, connection, table_name, schema=None,
                    resolve_synonyms=False, dblink='', **kw):

        info_cache = kw.get('info_cache')
        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        indexes = []

        params = {'table_name': table_name}
        text = \
            "SELECT a.index_name, a.column_name, b.uniqueness "\
            "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\
            "\nALL_INDEXES%(dblink)s b "\
            "\nWHERE "\
            "\na.index_name = b.index_name "\
            "\nAND a.table_owner = b.table_owner "\
            "\nAND a.table_name = b.table_name "\
            "\nAND a.table_name = :table_name "

        if schema is not None:
            params['schema'] = schema
            text += "AND a.table_owner = :schema "

        text += "ORDER BY a.index_name, a.column_position"

        text = text % {'dblink': dblink}

        q = sql.text(text)
        rp = connection.execute(q, **params)
        indexes = []
        last_index_name = None
        pk_constraint = self.get_pk_constraint(
            connection, table_name, schema, resolve_synonyms=resolve_synonyms,
            dblink=dblink, info_cache=kw.get('info_cache'))
        pkeys = pk_constraint['constrained_columns']
        uniqueness = dict(NONUNIQUE=False, UNIQUE=True)

        oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)

        def upper_name_set(names):
            return set([i.upper() for i in names])

        pk_names = upper_name_set(pkeys)

        def remove_if_primary_key(index):
            # don't include the primary key index
            if index is not None and \
               upper_name_set(index['column_names']) == pk_names:
                indexes.pop()

        index = None
        for rset in rp:
            if rset.index_name != last_index_name:
                remove_if_primary_key(index)
                index = dict(name=self.normalize_name(rset.index_name),
                             column_names=[])
                indexes.append(index)
            index['unique'] = uniqueness.get(rset.uniqueness, False)

            # filter out Oracle SYS_NC names.  could also do an outer join
            # to the all_tab_columns table and check for real col names there.
            if not oracle_sys_col.match(rset.column_name):
                index['column_names'].append(
                    self.normalize_name(rset.column_name))
            last_index_name = rset.index_name
        remove_if_primary_key(index)
        return indexes

Example 87

Project: kokoropy Source File: base.py
    @reflection.cache
    def _get_constraint_data(self, connection, table_name, schema=None,
                             dblink='', **kw):

        params = {'table_name': table_name}

        text = \
            "SELECT"\
            "\nac.constraint_name,"\
            "\nac.constraint_type,"\
            "\nloc.column_name AS local_column,"\
            "\nrem.table_name AS remote_table,"\
            "\nrem.column_name AS remote_column,"\
            "\nrem.owner AS remote_owner,"\
            "\nloc.position as loc_pos,"\
            "\nrem.position as rem_pos"\
            "\nFROM all_constraints%(dblink)s ac,"\
            "\nall_cons_columns%(dblink)s loc,"\
            "\nall_cons_columns%(dblink)s rem"\
            "\nWHERE ac.table_name = :table_name"\
            "\nAND ac.constraint_type IN ('R','P')"

        if schema is not None:
            params['owner'] = schema
            text += "\nAND ac.owner = :owner"

        text += \
            "\nAND ac.owner = loc.owner"\
            "\nAND ac.constraint_name = loc.constraint_name"\
            "\nAND ac.r_owner = rem.owner(+)"\
            "\nAND ac.r_constraint_name = rem.constraint_name(+)"\
            "\nAND (rem.position IS NULL or loc.position=rem.position)"\
            "\nORDER BY ac.constraint_name, loc.position"

        text = text % {'dblink': dblink}
        rp = connection.execute(sql.text(text), **params)
        constraint_data = rp.fetchall()
        return constraint_data

Example 88

Project: kokoropy Source File: base.py
Function: get_pk_constraint
    @reflection.cache
    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        pkeys = []
        constraint_name = None
        constraint_data = self._get_constraint_data(
            connection, table_name, schema, dblink,
            info_cache=kw.get('info_cache'))

        for row in constraint_data:
            (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
                row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
            if cons_type == 'P':
                if constraint_name is None:
                    constraint_name = self.normalize_name(cons_name)
                pkeys.append(local_column)
        return {'constrained_columns': pkeys, 'name': constraint_name}

Example 89

Project: kokoropy Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        requested_schema = schema  # to check later on
        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)

        constraint_data = self._get_constraint_data(
            connection, table_name, schema, dblink,
            info_cache=kw.get('info_cache'))

        def fkey_rec():
            return {
                'name': None,
                'constrained_columns': [],
                'referred_schema': None,
                'referred_table': None,
                'referred_columns': []
            }

        fkeys = util.defaultdict(fkey_rec)

        for row in constraint_data:
            (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
                row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])

            if cons_type == 'R':
                if remote_table is None:
                    # ticket 363
                    util.warn(
                        ("Got 'None' querying 'table_name' from "
                         "all_cons_columns%(dblink)s - does the user have "
                         "proper rights to the table?") % {'dblink': dblink})
                    continue

                rec = fkeys[cons_name]
                rec['name'] = cons_name
                local_cols, remote_cols = rec[
                    'constrained_columns'], rec['referred_columns']

                if not rec['referred_table']:
                    if resolve_synonyms:
                        ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
                            self._resolve_synonym(
                                connection,
                                desired_owner=self.denormalize_name(
                                    remote_owner),
                                desired_table=self.denormalize_name(
                                    remote_table)
                            )
                        if ref_synonym:
                            remote_table = self.normalize_name(ref_synonym)
                            remote_owner = self.normalize_name(
                                ref_remote_owner)

                    rec['referred_table'] = remote_table

                    if requested_schema is not None or \
                       self.denormalize_name(remote_owner) != schema:
                        rec['referred_schema'] = remote_owner

                local_cols.append(local_column)
                remote_cols.append(remote_column)

        return list(fkeys.values())

Example 90

Project: kokoropy Source File: base.py
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):

        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        table_cache = {}
        column_cache = {}
        foreign_keys = []

        table_cache[table_id] = {"name": table_name, "schema": schema}

        COLUMN_SQL = text("""
          SELECT c.colid AS id, c.name AS name
          FROM syscolumns c
          WHERE c.id = :table_id
        """)

        results = connection.execute(COLUMN_SQL, table_id=table_id)
        columns = {}
        for col in results:
            columns[col["id"]] = col["name"]
        column_cache[table_id] = columns

        REFCONSTRAINT_SQL = text("""
          SELECT o.name AS name, r.reftabid AS reftable_id,
            r.keycnt AS 'count',
            r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
            r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
            r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
            r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
            r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
            r.fokey16 AS fokey16,
            r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
            r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
            r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
            r.refkey10 AS refkey10, r.refkey11 AS refkey11,
            r.refkey12 AS refkey12, r.refkey13 AS refkey13,
            r.refkey14 AS refkey14, r.refkey15 AS refkey15,
            r.refkey16 AS refkey16
          FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
          WHERE r.tableid = :table_id
        """)
        referential_constraints = connection.execute(REFCONSTRAINT_SQL,
                                                     table_id=table_id)

        REFTABLE_SQL = text("""
          SELECT o.name AS name, u.name AS 'schema'
          FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
          WHERE o.id = :table_id
        """)

        for r in referential_constraints:
            reftable_id = r["reftable_id"]

            if reftable_id not in table_cache:
                c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
                reftable = c.fetchone()
                c.close()
                table_info = {"name": reftable["name"], "schema": None}
                if (schema is not None or
                        reftable["schema"] != self.default_schema_name):
                    table_info["schema"] = reftable["schema"]

                table_cache[reftable_id] = table_info
                results = connection.execute(COLUMN_SQL, table_id=reftable_id)
                reftable_columns = {}
                for col in results:
                    reftable_columns[col["id"]] = col["name"]
                column_cache[reftable_id] = reftable_columns

            reftable = table_cache[reftable_id]
            reftable_columns = column_cache[reftable_id]

            constrained_columns = []
            referred_columns = []
            for i in range(1, r["count"] + 1):
                constrained_columns.append(columns[r["fokey%i" % i]])
                referred_columns.append(reftable_columns[r["refkey%i" % i]])

            fk_info = {
                "constrained_columns": constrained_columns,
                "referred_schema": reftable["schema"],
                "referred_table": reftable["name"],
                "referred_columns": referred_columns,
                "name": r["name"]
            }

            foreign_keys.append(fk_info)

        return foreign_keys

Example 91

Project: maraschino Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        # Query to extract the details of all the fields of the given table
        tblqry = """
        SELECT r.rdb$field_name AS fname,
                        r.rdb$null_flag AS null_flag,
                        t.rdb$type_name AS ftype,
                        f.rdb$field_sub_type AS stype,
                        f.rdb$field_length/
                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,
                        f.rdb$field_precision AS fprec,
                        f.rdb$field_scale AS fscale,
                        COALESCE(r.rdb$default_source, 
                                f.rdb$default_source) AS fdefault
        FROM rdb$relation_fields r
             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
             JOIN rdb$types t
              ON t.rdb$type=f.rdb$field_type AND
                    t.rdb$field_name='RDB$FIELD_TYPE'
             LEFT JOIN rdb$character_sets cs ON
                    f.rdb$character_set_id=cs.rdb$character_set_id
        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
        ORDER BY r.rdb$field_position
        """
        # get the PK, used to determine the eventual associated sequence
        pkey_cols = self.get_primary_keys(connection, table_name)

        tablename = self.denormalize_name(table_name)
        # get all of the fields for this table
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row['fname'])
            orig_colname = row['fname']

            # get the data type
            colspec = row['ftype'].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (colspec, name))
                coltype = sqltypes.NULLTYPE
            elif colspec == 'INT64':
                coltype = coltype(
                                precision=row['fprec'], 
                                scale=row['fscale'] * -1)
            elif colspec in ('VARYING', 'CSTRING'):
                coltype = coltype(row['flen'])
            elif colspec == 'TEXT':
                coltype = TEXT(row['flen'])
            elif colspec == 'BLOB':
                if row['stype'] == 1:
                    coltype = TEXT()
                else:
                    coltype = BLOB()
            else:
                coltype = coltype()

            # does it have a default value?
            defvalue = None
            if row['fdefault'] is not None:
                # the value comes down as "DEFAULT 'value'": there may be
                # more than one whitespace around the "DEFAULT" keyword
                # and it may also be lower case 
                # (see also http://tracker.firebirdsql.org/browse/CORE-356)
                defexpr = row['fdefault'].lstrip()
                assert defexpr[:8].rstrip().upper() == \
                            'DEFAULT', "Unrecognized default value: %s" % \
                            defexpr
                defvalue = defexpr[8:].strip()
                if defvalue == 'NULL':
                    # Redundant
                    defvalue = None
            col_d = {
                'name' : name,
                'type' : coltype,
                'nullable' :  not bool(row['null_flag']),
                'default' : defvalue,
                'autoincrement':defvalue is None
            }

            if orig_colname.lower() == orig_colname:
                col_d['quote'] = True

            # if the PK is a single field, try to see if its linked to
            # a sequence thru a trigger
            if len(pkey_cols)==1 and name==pkey_cols[0]:
                seq_d = self.get_column_sequence(connection, tablename, name)
                if seq_d is not None:
                    col_d['sequence'] = seq_d

            cols.append(col_d)
        return cols

Example 92

Project: maraschino Source File: base.py
Function: get_columns
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        schema = schema or self.default_schema_name
        c = connection.execute(
            """select colname, coltype, collength, t3.default, t1.colno from
                syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
                where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
                  and t3.tabid = t2.tabid and t3.colno = t1.colno
                order by t1.colno""", table_name, schema)

        primary_cols = self.get_primary_keys(connection, table_name, schema, **kw)

        columns = []
        rows = c.fetchall()
        for name, colattr, collength, default, colno in rows:
            name = name.lower()

            autoincrement = False
            primary_key = False

            if name in primary_cols:
                primary_key = True

            # in 7.31, coltype = 0x000
            #                       ^^-- column type
            #                      ^-- 1 not null, 0 null
            not_nullable, coltype = divmod(colattr, 256)
            if coltype not in (0, 13) and default:
                default = default.split()[-1]

            if coltype == 6: # Serial, mark as autoincrement
                autoincrement = True

            if coltype == 0 or coltype == 13: # char, varchar
                coltype = ischema_names[coltype](collength)
                if default:
                    default = "'%s'" % default
            elif coltype == 5: # decimal
                precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
                if scale == 255:
                    scale = 0
                coltype = sqltypes.Numeric(precision, scale)
            else:
                try:
                    coltype = ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, name))
                    coltype = sqltypes.NULLTYPE

            column_info = dict(name=name, type=coltype, nullable=not not_nullable,
                               default=default, autoincrement=autoincrement,
                               primary_key=primary_key)
            columns.append(column_info)
        return columns

Example 93

Project: maraschino Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'
 
        c = connection.execute(sql.text(
                "SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
                "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
                "WHERE table_name = :table_name AND owner = :owner " 
                "ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
                               table_name=table_name, owner=schema)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])

            if coltype == 'NUMBER' :
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype: 
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'autoincrement':default is None
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns

Example 94

Project: maraschino Source File: base.py
    @reflection.cache
    def get_indexes(self, connection, table_name, schema=None,
                    resolve_synonyms=False, dblink='', **kw):


        info_cache = kw.get('info_cache')
        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        indexes = []
        q = sql.text("""
        SELECT a.index_name, a.column_name, b.uniqueness
        FROM ALL_IND_COLUMNS%(dblink)s a, 
        ALL_INDEXES%(dblink)s b 
        WHERE
            a.index_name = b.index_name
            AND a.table_owner = b.table_owner
            AND a.table_name = b.table_name

        AND a.table_name = :table_name
        AND a.table_owner = :schema
        ORDER BY a.index_name, a.column_position""" % {'dblink': dblink})
        rp = connection.execute(q, table_name=self.denormalize_name(table_name),
                                schema=self.denormalize_name(schema))
        indexes = []
        last_index_name = None
        pkeys = self.get_primary_keys(connection, table_name, schema,
                                      resolve_synonyms=resolve_synonyms,
                                      dblink=dblink,
                                      info_cache=kw.get('info_cache'))
        uniqueness = dict(NONUNIQUE=False, UNIQUE=True)

        oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)

        def upper_name_set(names):
            return set([i.upper() for i in names])

        pk_names = upper_name_set(pkeys)

        def remove_if_primary_key(index):
            # don't include the primary key index
            if index is not None and \
               upper_name_set(index['column_names']) == pk_names:
                indexes.pop()

        index = None
        for rset in rp:
            if rset.index_name != last_index_name:
                remove_if_primary_key(index)
                index = dict(name=self.normalize_name(rset.index_name), column_names=[])
                indexes.append(index)
            index['unique'] = uniqueness.get(rset.uniqueness, False)

            # filter out Oracle SYS_NC names.  could also do an outer join
            # to the all_tab_columns table and check for real col names there.
            if not oracle_sys_col.match(rset.column_name):
                index['column_names'].append(self.normalize_name(rset.column_name))
            last_index_name = rset.index_name
        remove_if_primary_key(index)
        return indexes

Example 95

Project: maraschino Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        requested_schema = schema # to check later on
        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)

        constraint_data = self._get_constraint_data(connection, table_name,
                                                schema, dblink,
                                                info_cache=kw.get('info_cache'))

        def fkey_rec():
            return {
                'name' : None,
                'constrained_columns' : [],
                'referred_schema' : None,
                'referred_table' : None,
                'referred_columns' : []
            }

        fkeys = util.defaultdict(fkey_rec)

        for row in constraint_data:
            (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
                    row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])

            if cons_type == 'R':
                if remote_table is None:
                    # ticket 363
                    util.warn(
                        ("Got 'None' querying 'table_name' from "
                         "all_cons_columns%(dblink)s - does the user have "
                         "proper rights to the table?") % {'dblink':dblink})
                    continue

                rec = fkeys[cons_name]
                rec['name'] = cons_name
                local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']

                if not rec['referred_table']:
                    if resolve_synonyms:
                        ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
                                self._resolve_synonym(
                                    connection, 
                                    desired_owner=self.denormalize_name(remote_owner), 
                                    desired_table=self.denormalize_name(remote_table)
                                )
                        if ref_synonym:
                            remote_table = self.normalize_name(ref_synonym)
                            remote_owner = self.normalize_name(ref_remote_owner)

                    rec['referred_table'] = remote_table

                    if requested_schema is not None or self.denormalize_name(remote_owner) != schema:
                        rec['referred_schema'] = remote_owner

                local_cols.append(local_column)
                remote_cols.append(remote_column)

        return fkeys.values()

Example 96

Project: maraschino Source File: base.py
    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        quote = self.identifier_preparer.quote_identifier
        if schema is not None:
            pragma = "PRAGMA %s." % quote(schema)
        else:
            pragma = "PRAGMA "
        qtable = quote(table_name)
        c = _pragma_cursor(
                    connection.execute("%stable_info(%s)" % 
                    (pragma, qtable)))
        found_table = False
        columns = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            (name, type_, nullable, default, has_default, primary_key) = \
                (row[1], row[2].upper(), not row[3], 
                row[4], row[4] is not None, row[5])
            name = re.sub(r'^\"|\"$', '', name)
            match = re.match(r'(\w+)(\(.*?\))?', type_)
            if match:
                coltype = match.group(1)
                args = match.group(2)
            else:
                coltype = "VARCHAR"
                args = ''
            try:
                coltype = self.ischema_names[coltype]
                if args is not None:
                    args = re.findall(r'(\d+)', args)
                    coltype = coltype(*[int(a) for a in args])
            except KeyError:
                util.warn("Did not recognize type '%s' of column '%s'" %
                          (coltype, name))
                coltype = sqltypes.NullType()

            columns.append({
                'name' : name,
                'type' : coltype,
                'nullable' : nullable,
                'default' : default,
                'autoincrement':default is None,
                'primary_key': primary_key
            })
        return columns

Example 97

Project: maraschino Source File: base.py
Function: get_foreign_keys
    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        quote = self.identifier_preparer.quote_identifier
        if schema is not None:
            pragma = "PRAGMA %s." % quote(schema)
        else:
            pragma = "PRAGMA "
        qtable = quote(table_name)
        c = _pragma_cursor(connection.execute("%sforeign_key_list(%s)" % (pragma, qtable)))
        fkeys = []
        fks = {}
        while True:
            row = c.fetchone()
            if row is None:
                break
            (constraint_name, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
            # sqlite won't return rcol if the table
            # was created with REFERENCES <tablename>, no col
            if rcol is None:
                rcol = lcol
            rtbl = re.sub(r'^\"|\"$', '', rtbl)
            lcol = re.sub(r'^\"|\"$', '', lcol)
            rcol = re.sub(r'^\"|\"$', '', rcol)
            try:
                fk = fks[constraint_name]
            except KeyError:
                fk = {
                    'name' : constraint_name,
                    'constrained_columns' : [],
                    'referred_schema' : None,
                    'referred_table' : rtbl,
                    'referred_columns' : []
                }
                fkeys.append(fk)
                fks[constraint_name] = fk

            # look up the table based on the given table's engine, not 'self',
            # since it could be a ProxyEngine
            if lcol not in fk['constrained_columns']:
                fk['constrained_columns'].append(lcol)
            if rcol not in fk['referred_columns']:
                fk['referred_columns'].append(rcol)
        return fkeys
See More Examples - Go to Next Page
Page 1 Page 2 Selected