sqlalchemy.create_engine.connect

Here are the examples of the python api sqlalchemy.create_engine.connect taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

8 Examples 7

3 Source : case_harvester.py
with GNU General Public License v3.0
from dismantl

def run_db_init_scripts(db_url):
    conn = create_engine(db_url).connect()
    scripts_path = os.path.join(os.path.dirname(__file__), '..', 'db', 'sql')
    for file in os.listdir(scripts_path):
        if file.endswith('.sql'):
            print(f'Running SQL initialization script {file}')
            with open(os.path.join(scripts_path, file), 'r') as script:
                commands = script.read()
            conn.execute(text(commands))
    conn.close()

def create_database_and_users(db_hostname, db_name, secrets, environment):

3 Source : test_mysql.py
with MIT License
from koxudaxi

def get_connection() -> Connection:
    return create_engine(
        'mysql+pymysql://root:[email protected]:13306/test?charset=utf8mb4'
    ).connect()


@pytest.fixture(scope='module')

3 Source : models.py
with MIT License
from limbov8

def get_db_session_instance():
    from sqlalchemy import create_engine
    from sqlalchemy.orm import sessionmaker

    engine = create_engine(get_postgre_connection()).connect()
    Session = sessionmaker(bind=engine)
    session = Session()
    return session


# class Blog(db.Model):
#     __tablename__='blog'
#     id = db.Column(db.Integer, primary_key=True, nullable=False)

0 Source : case_harvester.py
with GNU General Public License v3.0
from dismantl

def create_database_and_users(db_hostname, db_name, secrets, environment):
    master_username = secrets[environment]['DatabaseMasterUsername']
    master_password = secrets[environment]['DatabaseMasterPassword']
    username = secrets[environment]['DatabaseUsername']
    password = secrets[environment]['DatabasePassword']
    ro_username = secrets[environment]['DatabaseReadOnlyUsername']
    ro_password = secrets[environment]['DatabaseReadOnlyPassword']

    print("Creating database, roles, and users")
    postgres_url = f'postgresql://{master_username}:{master_password}@{db_hostname}/postgres'
    conn = create_engine(postgres_url).connect()
    conn.execute(text("COMMIT"))
    conn.execute(text(f"CREATE DATABASE {db_name}"))
    conn.execute(text(f"""
        -- Create roles
        CREATE ROLE mjcs_admin NOLOGIN CREATEROLE;
        GRANT ALL PRIVILEGES ON DATABASE {db_name} TO mjcs_admin;
        GRANT rds_superuser TO mjcs_admin;
        CREATE ROLE mjcs_ro NOLOGIN;
        CREATE ROLE mjcs_ro_redacted NOLOGIN;

        -- Create users
        CREATE USER {username} LOGIN PASSWORD :pw;
        GRANT mjcs_admin TO {username};
        CREATE USER {ro_username} LOGIN PASSWORD :ro_pw;
        GRANT mjcs_ro_redacted TO {ro_username};
        COMMIT;
    """), pw=password, ro_pw=ro_password)
    conn.close()
        
    print('Setting basic permissions')
    db_url = f'postgresql://{username}:{password}@{db_hostname}/{db_name}'
    conn = create_engine(db_url).connect()
    conn.execute(text(f"""
        -- Database level permissions
        REVOKE ALL ON DATABASE mjcs FROM mjcs_ro, mjcs_ro_redacted;
        GRANT TEMPORARY, CONNECT ON DATABASE mjcs TO PUBLIC;

        -- Schema level permissions
        CREATE SCHEMA redacted AUTHORIZATION mjcs_admin;
        REVOKE ALL ON SCHEMA public, redacted FROM mjcs_ro, mjcs_ro_redacted;
        GRANT USAGE ON SCHEMA public, redacted TO PUBLIC;

        -- Table level permissions
        GRANT SELECT ON ALL TABLES IN SCHEMA public, redacted TO mjcs_admin, mjcs_ro;
        ALTER DEFAULT PRIVILEGES IN SCHEMA public
            GRANT SELECT ON TABLES TO mjcs_admin, mjcs_ro;
        ALTER DEFAULT PRIVILEGES IN SCHEMA redacted
            GRANT SELECT ON TABLES TO PUBLIC;
        COMMIT;
    """))
    conn.close()

def write_env_file(env_long, env_short, exports, db_name, username, password):

0 Source : cli.py
with MIT License
from OneGov

def import_reservations(dsn, map, start_date):
    """ Imports reservations from a legacy seantis.reservation system.

    WARNING: Existing reservations and all associated tickets/submissions
    are deleted during the migration.

    Pending reservations are ignored. The expectation is that the old system
    is stopped, then migrated, then never started again.

    Also note that search reindexing is disabled during import. A manual
    reindex is necessary afterwards.

    Example:

    onegov-org --select '/orgs/govikon' import-reservations\
        --dsn postgresql://localhost:5432/legacy\
        --map ressource-map.csv

    The ressource map is a simple CSV file with the following columns:

    * Legacy URL (the URL pointing to the legacy ressource/room)
    * OGC URL (the URL pointing to the associated ressource in the OGC)
    * Type ('daypass' or 'room')

    The first row is expected to be the header row.

    """

    print("Connecting to remote")
    engine = create_engine(dsn).connect()

    @event.listens_for(engine, 'begin')
    def receive_begin(conn):
        conn.execute('SET TRANSACTION READ ONLY')

    remote = Session(bind=engine)

    # define the mapping between old/new resources
    class Mapping(object):

        def __init__(self, libres_context, old_url, new_url, type):
            self.resources = ResourceCollection(libres_context)
            self.old_url = old_url
            self.new_url = new_url
            self.type = type

        @property
        def name(self):
            return URL(self.new_url).path().rstrip('/').split('/')[-1]

        @cached_property
        def resource(self):
            return self.resources.by_name(name=self.name)

        @cached_property
        def old_uuid(self):
            return UUID(requests.get(f'{self.old_url}/@@uuid').text.strip())

        @property
        def new_uuid(self):
            return self.resource.id

    # run the given query returning the query and a count (if possible)
    def select_with_count(session, query, **params):
        query = query.strip(' \n')
        assert query.startswith("SELECT *")

        count = text(
            query
            .replace("SELECT *", "SELECT COUNT(*)", 1)
            .split('ORDER BY')[0]
        )
        count = session.execute(count, params).scalar()

        return count, session.execute(text(query), params)

    # takes a reservation data json and guesses if it was paid
    def was_paid(data):
        data = json.loads(data)

        for definition in data.values():
            for value in definition['values']:
                if is_payment_key(value['key']):
                    return value['value'] == True

        return False

    def is_payment_key(key):
        return key == 'bezahlt'

    print("Reading map")
    records = CSVFile(open(map, 'rb'), ('Old URL', 'New URL', 'Type')).lines
    records = tuple(records)

    # generate forms from the form-data found in the external system
    def get_form_class_and_data(data):

        if not data:
            return ReservationForm, {}

        def exclude(value):
            if as_internal_id(value['desc']) == 'email':
                return True

            if is_payment_key(value['key']):
                return True

        @lru_cache()
        def generate_form_class(formcode):
            return parse_form(formcode, ReservationForm)

        def separate_code_from_data(data):
            fields = []
            values = {}

            for form in sorted(data.values(), key=lambda f: f['desc']):
                fieldset = form['desc']
                fields.append(f"# {fieldset}")

                for v in sorted(form['values'], key=lambda r: r['sortkey']):
                    label = v['desc']
                    id = as_internal_id(f"{fieldset} {label}")

                    # defined on the reservation form
                    if id == 'email':
                        continue

                    if isinstance(v['value'], bool):
                        fields.append(f"{label} = ___")
                        values[id] = v['value'] and "Ja" or "Nein"
                    elif isinstance(v['value'], str):
                        fields.append(f"{label} = ___")
                        values[id] = v['value']
                    elif isinstance(v['value'], datetime):
                        fields.append(f"{label} = YYYY.MM.DD HH:MM")
                        values[id] = v['value']
                    elif isinstance(v['value'], date):
                        fields.append(f"{label} = YYYY.MM.DD")
                        values[id] = v['value']
                    elif isinstance(v['value'], int):
                        fields.append(f"{label} = ___")
                        values[id] = str(v['value'])
                    elif isinstance(v['value'], list):
                        fields.append(f"{label} = ___")
                        values[id] = ', '.join(v['value'])
                    else:
                        raise NotImplementedError((
                            f"No conversion for {v['value']} "
                            f" ({type(v['value'])}"
                        ))

            return '\n'.join(fields), values

        formcode, formdata = separate_code_from_data(data)
        return generate_form_class(formcode), formdata

    def handle_import(request, app):
        session = app.session()

        # disable search indexing during import
        if hasattr(app, 'es_orm_events'):
            app.es_orm_events.stopped = True

        # map the old UUIDs to the resources
        print("Mapping resources")
        mapping = {m.old_uuid: m for m in (
            Mapping(request.app.libres_context, r.old_url, r.new_url, r.type)
            for r in tqdm(records, unit=' resources')
        )}

        print("Clearing existing submissions")
        session.execute(text("""
            DELETE FROM submissions
             WHERE submissions.meta->>'origin' IN :resources
        """), {
            'resources': tuple(m.old_uuid.hex for m in mapping.values())
        })

        print("Clearing existing ticket messages")
        session.execute(text("""
            DELETE from messages
             WHERE channel_id IN (
                SELECT number
                  FROM tickets
                 WHERE tickets.handler_data->>'origin' IN :resources
             )
        """), {
            'resources': tuple(m.old_uuid.hex for m in mapping.values())
        })

        print("Clearing existing tickets")
        session.execute(text("""
            DELETE FROM tickets
             WHERE tickets.handler_data->>'origin' IN :resources
        """), {
            'resources': tuple(m.old_uuid.hex for m in mapping.values())
        })

        payment_ids = tuple(r[0] for r in session.execute(text("""
            SELECT payment_id
            FROM payments_for_reservations_payment
            WHERE reservations_id IN (
                SELECT id
                FROM reservations
                WHERE resource IN :resources
            )
        """), {
            'resources': tuple(m.resource.id for m in mapping.values())
        }))

        if payment_ids:
            print("Clearing existing payments")
            session.execute(text("""
                DELETE FROM payments_for_reservations_payment
                WHERE payment_id IN :payments
            """), {
                'payments': payment_ids
            })

            session.execute(text("""
                DELETE FROM payments
                WHERE id IN :payments
            """), {
                'payments': payment_ids
            })

        print("Clearing existing reservations")
        session.execute(text("""
            DELETE FROM reservations
            WHERE resource IN :resources
        """), {
            'resources': tuple(m.resource.id for m in mapping.values())
        })

        print("Clearing existing reserved slots")
        session.execute(text("""
            DELETE FROM reserved_slots
             WHERE allocation_id IN (
                SELECT id FROM allocations
                 WHERE allocations.mirror_of IN :resources
             )
        """), {
            'resources': tuple(m.resource.id for m in mapping.values())
        })

        print("Clearing existing allocations")
        session.execute(text("""
            DELETE FROM allocations
             WHERE allocations.mirror_of IN :resources
        """), {
            'resources': tuple(m.resource.id for m in mapping.values())
        })

        session.flush()

        print(f'Resources: {mapping.keys()}')

        print(f"Fetching remote allocations after {start_date}")
        count, rows = select_with_count(remote, f"""
            SELECT * FROM allocations
            WHERE mirror_of IN :resources
            AND _end >= '{start_date}'::date
            ORDER BY allocations.resource
        """, resources=tuple(mapping.keys()))

        # we use a separate id space, so we need to keep track
        allocation_ids = {}

        # the resource might be mapped, but it is not a given
        def row_resource(row):
            if row['resource'] not in mapping:
                return row['resource']

            return mapping[row['resource']].new_uuid

        # create the new allocations
        print("Writing allocations")
        for row in tqdm(rows, unit=' allocations', total=count):
            resource_type = mapping[row['mirror_of']].type

            if row['partly_available'] and resource_type != 'room':
                raise RuntimeError((
                    f"Cannot migrate partly_available allocation "
                    f"to a {resource_type} resource"
                ))

            if row['approve_manually']:
                raise RuntimeError((
                    f"Manually approved allocation found (id: {row['id']}), "
                    f"manually approved allocations are not supported"
                ))

            allocation = Allocation(
                resource=row_resource(row),
                mirror_of=mapping[row['mirror_of']].new_uuid,
                group=row['group'],
                quota=row['quota'],
                quota_limit=getattr(
                    row, 'quota_limit',
                    getattr(row, 'reservation_quota_limit')),
                partly_available=row['partly_available'],
                approve_manually=row['approve_manually'],

                # the timezone was ignored in seantis.reservation
                timezone='Europe/Zurich',
                _start=replace_timezone(row['_start'], 'Europe/Zurich'),
                _end=replace_timezone(row['_end'], 'Europe/Zurich'),

                data=json.loads(getattr(row, 'data', "{}")),
                _raster=row['_raster'],
                created=replace_timezone(row['created'], 'UTC'),
                modified=utcnow(),
                type='custom',
            )

            session.add(allocation)
            session.flush()

            allocation_ids[row['id']] = allocation.id

        # fetch the reserved slots that should be migrated
        count, rows = select_with_count(
            remote, """
            SELECT * FROM reserved_slots WHERE allocation_id IN (
                SELECT id FROM allocations WHERE mirror_of IN :resources
                AND id in :parsed
            )
            """,
            resources=tuple(mapping.keys()),
            parsed=tuple(allocation_ids.keys())
        )

        # create the reserved slots with the mapped values
        print("Writing reserved slots")
        known = set()
        for row in tqdm(rows, unit=" slots", total=count):
            r = row_resource(row)
            s = replace_timezone(row['start'], 'Europe/Zurich')
            e = replace_timezone(row['end'], 'Europe/Zurich')

            # it's possible for rows to become duplicated after replacing
            # the timezone, if the reserved slot passes over daylight
            # savings time changes
            if (r, s) in known:
                continue

            known.add((r, s))

            session.add(ReservedSlot(
                resource=r,
                start=s,
                end=e,
                allocation_id=allocation_ids[row['allocation_id']],
                reservation_token=row['reservation_token']
            ))

        session.flush()

        # fetch the reservations that should be migrated
        count, rows = select_with_count(
            remote, f"""
            SELECT * FROM reservations re
            WHERE resource IN :resources
              AND re.status = 'approved'
              AND re.end >= '{start_date}'::date
            ORDER BY re.resource
            """,
            resources=tuple(mapping.keys()),
        )

        def targeted_allocations(group):
            return session.query(Allocation).filter_by(group=group)

        # keep track of custom reservation data, for the creation of tickets
        reservation_data = {}
        payment_states = {}

        print(f"Writing reservations after {start_date}")
        for row in tqdm(rows, unit=' reservations', total=count):

            reservation_data[row['token']] = {
                'data': json.loads(row['data']),
                'email': row['email'],
                'origin': row['resource'],
                'origin_url': mapping[row['resource']].old_url,
                'created': replace_timezone(row['created'], 'UTC'),
                'modified': replace_timezone(row['modified'], 'UTC'),
            }

            if row['quota'] > 1:
                type_ = mapping[row.resource].type
                if type_ not in ('daypass', 'daily-item'):
                    raise RuntimeError(
                        "Reservations with multiple quotas for "
                        f"type {type_} cannot be migrated"
                    )

            # onegov.reservation does not support group targets, so we
            # translate those into normal allocations and create multiple
            # reservations with a shared token
            shared = dict(
                token=row['token'],
                target_type='allocation',
                resource=mapping[row['resource']].new_uuid,
                timezone='Europe/Zurich',
                status=row['status'],
                data={"accepted": True, "migrated": True},
                email=row['email'],
                quota=row['quota'],
                created=replace_timezone(row['created'], 'UTC'),
                modified=replace_timezone(row['modified'], 'UTC'),
                type='custom',
            )

            r = mapping[row['resource']].resource

            if r.pricing_method == 'per_item':
                payment = ManualPayment(
                    amount=r.price_per_item * row['quota'], currency='CHF')
            elif r.pricing_method == 'per_hour':
                raise NotImplementedError()
            else:
                payment = None

            if row['target_type'] == 'group':
                targets = tuple(targeted_allocations(group=row['target']))

                if not targets:
                    raise RuntimeError(f"No rows for target {row['target']}")

                for allocation in targets:
                    allocation.group = uuid4()

                    reservation = Reservation(
                        target=allocation.group,
                        start=allocation.start,
                        end=allocation.end,
                        **shared
                    )

                    if payment:
                        reservation.payment = payment

                    session.add(reservation)

            else:
                reservation = Reservation(
                    target=row['target'],
                    start=replace_timezone(row['start'], 'Europe/Zurich'),
                    end=replace_timezone(row['end'], 'Europe/Zurich'),
                    **shared
                )

                if payment:
                    reservation.payment = payment

                session.add(reservation)

            if reservation.payment:
                if was_paid(row['data']):
                    reservation.payment.state = 'paid'
                    payment_states[row['token']] = 'paid'
                else:
                    payment_states[row['token']] = 'unpaid'

        session.flush()

        # tie reservations to tickets/submissions
        tickets = TicketCollection(session)
        forms = FormCollection(session)

        # the responsible user is the first admin that was added
        user = session.query(User)\
            .filter_by(role='admin')\
            .order_by(User.created).first()

        print("Writing tickets")
        email_validator = Email('Invalid Email')
        for token, data in tqdm(reservation_data.items(), unit=" tickets"):
            form_class, form_data = get_form_class_and_data(data['data'])

            if form_data:

                # fix common form errors
                if data['email']:
                    if data['email'].endswith('.c'):
                        data['email'] = data['email'] + 'h'
                    try:
                        email_validator(None, Bunch(data=data['email']))
                    except Exception as e:
                        e.message = f'Email {data["email"]} not valid'
                        raise e

                form_data['email'] = data['email']
                form = form_class(data=form_data)

                # wtforms requires raw_data for some validators
                for key, value in form_data.items():
                    getattr(form, key).raw_data = [value]

                submission = forms.submissions.add_external(
                    form=form,
                    state='complete',
                    id=token
                )

                submission.meta['migrated'] = True
                submission.meta['origin'] = data['origin'].hex

            with session.no_autoflush:
                ticket = tickets.open_ticket(
                    handler_code='RSV', handler_id=token.hex)

                ticket.handler_data['migrated'] = True
                ticket.handler_data['origin'] = data['origin'].hex
                ticket.handler_data['origin_url'] = data['origin_url']
                ticket.muted = True
                ticket.state = 'closed'
                ticket.last_state_change = ticket.timestamp()
                ticket.reaction_time = 0
                ticket.user = user
                ticket.created = data['created']
                ticket.modified = data['modified']

                TicketNote.create(ticket, request, (
                    f"Migriert von {data['origin_url']}"
                    f"/reservations?token={token}",
                ), owner=user.username)

    return handle_import


@cli.command(context_settings={'default_selector': '*'})

0 Source : test_users.py
with MIT License
from Sentimentron

    def try_drop_existing_db(self):
        """
        Drop the existing test database, if it exists.
        :return: True on success
        """
        conn = create_engine("postgresql+psycopg2://annotatron:annotatron@localhost:5432/postgres").connect()
        conn = conn.execution_options(autocommit=False)
        conn.execute("ROLLBACK")
        try:
            conn.execute("DROP DATABASE %s" % self.db_name)
        except exc.ProgrammingError as e:
            # Could not drop the database, probably does not exist
            conn.execute("ROLLBACK")
            logging.error("Could not drop the test database: %s", e)
        except exc.OperationalError as e:
            # Could not drop database because it's being accessed by other users (psql prompt open?)
            conn.execute("ROLLBACK")
            logging.fatal("Could not drop the test database, due to concurrent access.")
            raise e
        return True

    @classmethod

0 Source : test_users.py
with MIT License
from Sentimentron

    def try_create_testing_db(self):
        """
        Copies the current Annotatron database schema to a blank new one.
        :return: An engine pointing at the new schema
        """

        gc.collect()
        # Create the testing database
        conn = create_engine("postgresql+psycopg2://annotatron:annotatron@localhost:5432/postgres",
                             isolation_level="AUTOCOMMIT").connect()

        self.db_name = self.generate_db_name()
        logging.info("Creating database %s...", self.db_name)

        conn.execute("CREATE DATABASE {}".format(self.db_name))
        conn.close()

        # Read the Annotatron SQL specification
        current_directory = os.path.dirname(os.path.realpath(__file__))
        docker_db_file = os.path.join(current_directory, "../docker/postgres/files/db.sql")
        with open(docker_db_file, "r") as fin:
            database_statements = fin.read()

        # Create the database tables with an up-to-date schema
        conn = create_engine("postgresql+psycopg2://annotatron:annotatron@localhost:5432/{}".format(self.db_name),
                             isolation_level="AUTOCOMMIT")
        conn.execute(database_statements)
        conn.dispose()

        # Create a connection with the default isolation level
        conn = create_engine("postgresql+psycopg2://annotatron:annotatron@localhost:5432/{}".format(self.db_name))
        return conn

    def setUp(self):

0 Source : cli.py
with Apache License 2.0
from simonw

def cli(
    connection,
    path,
    all,
    table,
    skip,
    redact,
    sql,
    output,
    pk,
    index_fks,
    progress,
    postgres_schema,
):
    """
    Load data from any database into SQLite.

    PATH is a path to the SQLite file to create, e.c. /tmp/my_database.db

    CONNECTION is a SQLAlchemy connection string, for example:

        postgresql://localhost/my_database
        postgresql://username:passwd@localhost/my_database

        mysql://root@localhost/my_database
        mysql://username:passwd@localhost/my_database

    More: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
    """
    if not all and not table and not sql:
        raise click.ClickException("--all OR --table OR --sql required")
    if skip and not all:
        raise click.ClickException("--skip can only be used with --all")
    redact_columns = {}
    for table_name, column_name in redact:
        redact_columns.setdefault(table_name, set()).add(column_name)
    db = Database(path)
    if postgres_schema:
        conn_args = {"options": "-csearch_path={}".format(postgres_schema)}
    else:
        conn_args = {}
    if connection.startswith("postgres://"):
        connection = connection.replace("postgres://", "postgresql://")
    db_conn = create_engine(connection, connect_args=conn_args).connect()
    inspector = inspect(db_conn)
    # Figure out which tables we are copying, if any
    tables = table
    if all:
        tables = inspector.get_table_names()
    if tables:
        foreign_keys_to_add = []
        for i, table in enumerate(tables):
            if progress:
                click.echo("{}/{}: {}".format(i + 1, len(tables), table), err=True)
            if table in skip:
                if progress:
                    click.echo("  ... skipping", err=True)
                continue
            pks = inspector.get_pk_constraint(table)["constrained_columns"]
            if len(pks) == 1:
                pks = pks[0]
            fks = inspector.get_foreign_keys(table)
            foreign_keys_to_add.extend(
                [
                    (
                        # table, column, other_table, other_column
                        table,
                        fk["constrained_columns"][0],
                        fk["referred_table"],
                        fk["referred_columns"][0],
                    )
                    for fk in fks
                ]
            )
            count = None
            table_quoted = db_conn.dialect.identifier_preparer.quote_identifier(table)
            if progress:
                count = db_conn.execute(
                    "select count(*) from {}".format(table_quoted)
                ).fetchone()[0]
            results = db_conn.execute("select * from {}".format(table_quoted))
            redact_these = redact_columns.get(table) or set()
            rows = (redacted_dict(r, redact_these) for r in results)
            # Make sure generator is not empty
            try:
                first = next(rows)
            except StopIteration:
                # This is an empty table - create an empty copy
                if not db[table].exists():
                    create_columns = {}
                    for column in inspector.get_columns(table):
                        try:
                            column_type = column["type"].python_type
                        except NotImplementedError:
                            column_type = str
                        create_columns[column["name"]] = column_type
                    db[table].create(create_columns)
            else:
                rows = itertools.chain([first], rows)
                if progress:
                    with click.progressbar(rows, length=count) as bar:
                        db[table].insert_all(bar, pk=pks, replace=True)
                else:
                    db[table].insert_all(rows, pk=pks, replace=True)
        foreign_keys_to_add_final = []
        for table, column, other_table, other_column in foreign_keys_to_add:
            # Make sure both tables exist and are not skipped - they may not
            # exist if they were empty and hence .insert_all() didn't have a
            # reason to create them.
            if (
                db[table].exists()
                and table not in skip
                and db[other_table].exists()
                and other_table not in skip
                # Also skip if this column is redacted
                and ((table, column) not in redact)
            ):
                foreign_keys_to_add_final.append(
                    (table, column, other_table, other_column)
                )
        if foreign_keys_to_add_final:
            # Add using .add_foreign_keys() to avoid running multiple VACUUMs
            if progress:
                click.echo(
                    "\nAdding {} foreign key{}\n{}".format(
                        len(foreign_keys_to_add_final),
                        "s" if len(foreign_keys_to_add_final) != 1 else "",
                        "\n".join(
                            "  {}.{} => {}.{}".format(*fk)
                            for fk in foreign_keys_to_add_final
                        ),
                    ),
                    err=True,
                )
            db.add_foreign_keys(foreign_keys_to_add_final)
    if sql:
        if not output:
            raise click.ClickException("--sql must be accompanied by --output")
        results = db_conn.execute(sql)
        rows = (dict(r) for r in results)
        db[output].insert_all(rows, pk=pk)
    if index_fks:
        db.index_foreign_keys()


def detect_primary_key(db_conn, table):