Here are the examples of the python api sqlalchemy.types.Boolean taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
10 Examples
3
Example 1
Project: ironic Source File: test_migrations.py
def _check_3cb628139ea4(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('console_enabled', col_names)
# in some backends bool type is integer
self.assertIsInstance(nodes.c.console_enabled.type,
(sqlalchemy.types.Boolean,
sqlalchemy.types.Integer))
3
Example 2
Project: ironic Source File: test_migrations.py
def _check_60cf717201bc(self, engine, data):
portgroups = db_utils.get_table(engine, 'portgroups')
col_names = [column.name for column in portgroups.c]
self.assertIn('standalone_ports_supported', col_names)
self.assertIsInstance(portgroups.c.standalone_ports_supported.type,
(sqlalchemy.types.Boolean,
sqlalchemy.types.Integer))
0
Example 3
Project: pokedex Source File: load.py
def load(session, tables=[], directory=None, drop_tables=False, verbose=False, safe=True, recursive=True, langs=None):
"""Load data from CSV files into the given database session.
Tables are created automatically.
`session`
SQLAlchemy session to use.
`tables`
List of tables to load. If omitted, all tables are loaded.
`directory`
Directory the CSV files reside in. Defaults to the `pokedex` data
directory.
`drop_tables`
If set to True, existing `pokedex`-related tables will be dropped.
`verbose`
If set to True, status messages will be printed to stdout.
`safe`
If set to False, load can be faster, but can corrupt the database if
it crashes or is interrupted.
`recursive`
If set to True, load all dependent tables too.
`langs`
List of identifiers of extra language to load, or None to load them all
"""
# First take care of verbosity
print_start, print_status, print_done = _get_verbose_prints(verbose)
if directory is None:
directory = get_default_csv_dir()
# XXX why isn't this done in command_load
table_names = _get_table_names(metadata, tables)
table_objs = [metadata.tables[name] for name in table_names]
if recursive:
table_objs.extend(find_dependent_tables(table_objs))
table_objs = sqlalchemy.sql.util.sort_tables(table_objs)
engine = session.get_bind()
# Limit table names to 30 characters for Oracle
oracle = (engine.dialect.name == 'oracle')
if oracle:
rewrite_long_table_names()
# SQLite speed tweaks
if not safe and engine.dialect.name == 'sqlite':
session.execute("PRAGMA synchronous=OFF")
session.execute("PRAGMA journal_mode=OFF")
# Drop all tables if requested
if drop_tables:
print_start('Dropping tables')
for n, table in enumerate(reversed(table_objs)):
table.drop(bind=engine, checkfirst=True)
# Drop columns' types if appropriate; needed for enums in
# postgresql
for column in table.c:
try:
drop = column.type.drop
except AttributeError:
pass
else:
drop(bind=engine, checkfirst=True)
print_status('%s/%s' % (n, len(table_objs)))
print_done()
print_start('Creating tables')
for n, table in enumerate(table_objs):
table.create()
print_status('%s/%s' % (n, len(table_objs)))
print_done()
# Okay, run through the tables and actually load the data now
for table_obj in table_objs:
if oracle:
table_name = table_obj._original_name
else:
table_name = table_obj.name
insert_stmt = table_obj.insert()
print_start(table_name)
try:
csvpath = "%s/%s.csv" % (directory, table_name)
csvfile = open(csvpath, 'r')
except IOError:
# File doesn't exist; don't load anything!
print_done('missing?')
continue
# XXX This is wrong for files with multi-line fields, but Python 3
# doesn't allow .tell() on a file that's currently being iterated
# (because the result is completely bogus). Oh well.
csvsize = sum(1 for line in csvfile)
csvfile.seek(0)
reader = csv.reader(csvfile, lineterminator='\n')
column_names = [six.text_type(column) for column in next(reader)]
if not safe and engine.dialect.name == 'postgresql':
# Postgres' CSV dialect works with our data, if we mark the not-null
# columns with FORCE NOT NULL.
not_null_cols = [c for c in column_names if not table_obj.c[c].nullable]
if not_null_cols:
force_not_null = 'FORCE NOT NULL ' + ','.join('"%s"' % c for c in not_null_cols)
else:
force_not_null = ''
# Grab the underlying psycopg2 cursor so we can use COPY FROM STDIN
raw_conn = engine.raw_connection()
command = "COPY %(table_name)s (%(columns)s) FROM STDIN CSV HEADER %(force_not_null)s"
csvfile.seek(0)
raw_conn.cursor().copy_expert(
command % dict(
table_name=table_name,
columns=','.join('"%s"' % c for c in column_names),
force_not_null=force_not_null,
),
csvfile,
)
raw_conn.commit()
print_done()
continue
# Self-referential tables may contain rows with foreign keys of other
# rows in the same table that do not yet exist. Pull these out and
# insert them last
# ASSUMPTION: Self-referential tables have a single PK called "id"
deferred_rows = [] # ( row referring to id, [foreign ids we need] )
seen_ids = set() # primary keys we've seen
# Fetch foreign key columns that point at this table, if any
self_ref_columns = []
for column in table_obj.c:
if any(x.references(table_obj) for x in column.foreign_keys):
self_ref_columns.append(column)
new_rows = []
def insert_and_commit():
if not new_rows:
return
session.execute(insert_stmt, new_rows)
session.commit()
new_rows[:] = []
progress = "%d%%" % (100 * csvpos // csvsize)
print_status(progress)
csvpos = 0
for csvs in reader:
csvpos += 1
row_data = {}
for column_name, value in zip(column_names, csvs):
column = table_obj.c[column_name]
if column.nullable and value == '':
# Empty string in a nullable column really means NULL
value = None
elif isinstance(column.type, sqlalchemy.types.Boolean):
# Boolean values are stored as string values 0/1, but both
# of those evaluate as true; SQLA wants True/False
if value == '0':
value = False
else:
value = True
elif isinstance(value, bytes):
# Otherwise, unflatten from bytes
value = value.decode('utf-8')
# nb: Dictionaries flattened with ** have to have string keys
row_data[ str(column_name) ] = value
# May need to stash this row and add it later if it refers to a
# later row in this table
if self_ref_columns:
foreign_ids = set(row_data[x.name] for x in self_ref_columns)
foreign_ids.discard(None) # remove NULL ids
if not foreign_ids:
# NULL key. Remember this row and add as usual.
seen_ids.add(row_data['id'])
elif foreign_ids.issubset(seen_ids):
# Non-NULL key we've already seen. Remember it and commit
# so we know the old row exists when we add the new one
insert_and_commit()
seen_ids.add(row_data['id'])
else:
# Non-NULL future id. Save this and insert it later!
deferred_rows.append((row_data, foreign_ids))
continue
# Insert row!
new_rows.append(row_data)
# Remembering some zillion rows in the session consumes a lot of
# RAM. Let's not do that. Commit every 1000 rows
if len(new_rows) >= 1000:
insert_and_commit()
insert_and_commit()
# Attempt to add any spare rows we've collected
for row_data, foreign_ids in deferred_rows:
if not foreign_ids.issubset(seen_ids):
# Could happen if row A refers to B which refers to C.
# This is ridiculous and doesn't happen in my data so far
raise ValueError("Too many levels of self-reference! "
"Row was: " + str(row))
session.execute(
insert_stmt.values(**row_data)
)
seen_ids.add(row_data['id'])
session.commit()
print_done()
print_start('Translations')
transl = translations.Translations(csv_directory=directory)
new_row_count = 0
for translation_class, rows in transl.get_load_data(langs):
table_obj = translation_class.__table__
if table_obj in table_objs:
insert_stmt = table_obj.insert()
session.execute(insert_stmt, rows)
session.commit()
# We don't have a total, but at least show some increasing number
new_row_count += len(rows)
print_status(str(new_row_count))
# SQLite check
if engine.dialect.name == 'sqlite':
session.execute("PRAGMA integrity_check")
print_done()
0
Example 4
Project: crate-python Source File: types.py
def __init__(self, left, right, operator=operators.eq):
self.type = sqltypes.Boolean()
self.left = expression._literal_as_binds(left)
self.right = right
self.operator = operator
0
Example 5
def is_boolean(self, col_name):
try:
return isinstance(self.list_columns[col_name].type, sa.types.Boolean)
except:
return False
0
Example 6
Project: mittn Source File: dbtools.py
def open_database(context):
"""Opens the database specified in the feature file and creates
tables if not already created
:param context: The Behave context
:return: A database handle, or None if no database in use
"""
if hasattr(context, 'dburl') is False:
return None # No false positives database is in use
dbconn = None
# Try to connect to the database
try:
db_engine = create_engine(context.dburl)
dbconn = db_engine.connect()
except (IOError, exc.OperationalError):
assert False, "Cannot connect to database '%s'" % context.dburl
# Set up the database table to store new findings and false positives.
# We use LargeBinary to store the message, because it can potentially
# be big.
db_metadata = MetaData()
db_metadata.bind = db_engine
context.headlessscanner_issues = Table(
'headlessscanner_issues',
db_metadata,
Column('new_issue', types.Boolean),
Column('issue_no', types.Integer, primary_key=True, nullable=False), # Implicit autoincrement
Column('timestamp', types.DateTime(timezone=True)),
Column('test_runner_host', types.Text),
Column('scenario_id', types.Text),
Column('url', types.Text),
Column('severity', types.Text),
Column('issuetype', types.Text),
Column('issuename', types.Text),
Column('issuedetail', types.Text),
Column('confidence', types.Text),
Column('host', types.Text),
Column('port', types.Text),
Column('protocol', types.Text),
Column('messages', types.LargeBinary)
)
# Create the table if it doesn't exist
# and otherwise no effect
db_metadata.create_all(db_engine)
return dbconn
0
Example 7
def test_add_false_positive(self):
# Add a false positive to database and check that all fields
# get populated and can be compared back originals
issue = {'scenario_id': '1',
'url': 'testurl',
'severity': 'testseverity',
'issuetype': 'testissuetype',
'issuename': 'testissuename',
'issuedetail': 'testissuedetail',
'confidence': 'testconfidence',
'host': 'testhost',
'port': 'testport',
'protocol': 'testprotocol',
'messages': '{foo=bar}'}
dbtools.add_false_positive(self.context, issue)
# Connect directly to the database and check the data is there
db_engine = sqlalchemy.create_engine(self.context.dburl)
dbconn = db_engine.connect()
db_metadata = sqlalchemy.MetaData()
headlessscanner_issues = Table(
'headlessscanner_issues',
db_metadata,
Column('new_issue', types.Boolean),
Column('issue_no', types.Integer, primary_key=True, nullable=False), # Implicit autoincrement
Column('timestamp', types.DateTime(timezone=True)),
Column('test_runner_host', types.Text),
Column('scenario_id', types.Text),
Column('url', types.Text),
Column('severity', types.Text),
Column('issuetype', types.Text),
Column('issuename', types.Text),
Column('issuedetail', types.Text),
Column('confidence', types.Text),
Column('host', types.Text),
Column('port', types.Text),
Column('protocol', types.Text),
Column('messages', types.LargeBinary)
)
db_select = sqlalchemy.sql.select([headlessscanner_issues])
db_result = dbconn.execute(db_select)
result = db_result.fetchone()
for key, value in issue.iteritems():
if key == 'messages':
self.assertEqual(result[key], json.dumps(value))
else:
self.assertEqual(result[key], value,
'%s not found in database after add' % key)
self.assertEqual(result['test_runner_host'], socket.gethostbyname(socket.getfqdn()),
'Test runner host name not correct in database')
self.assertLessEqual(result['timestamp'], datetime.datetime.utcnow(),
'Timestamp not correctly stored in database')
dbconn.close()
0
Example 8
Project: mittn Source File: dbtools.py
def open_database(context):
"""Opens the database specified in the feature file and creates
tables if not already created
:param context: The Behave context
:return: A database handle, or None if no database in use
"""
if hasattr(context, 'dburl') is False:
return None # No false positives database is in use
dbconn = None
# Try to connect to the database
try:
db_engine = create_engine(context.dburl)
dbconn = db_engine.connect()
except (IOError, exc.OperationalError):
assert False, "Cannot connect to database '%s'" % context.dburl
# Set up the database table to store new findings and false positives.
# We use LargeBinary to store those fields that could contain somehow
# bad Unicode, just in case some component downstream tries to parse
# a string provided as Unicode.
db_metadata = MetaData()
db_metadata.bind = db_engine
context.httpfuzzer_issues = Table('httpfuzzer_issues', db_metadata,
Column('new_issue', types.Boolean),
Column('issue_no', types.Integer, primary_key=True, nullable=False),
Column('timestamp', types.DateTime(timezone=True)),
Column('test_runner_host', types.Text),
Column('scenario_id', types.Text),
Column('url', types.Text),
Column('server_protocol_error', types.Text),
Column('server_timeout', types.Boolean),
Column('server_error_text_detected', types.Boolean),
Column('server_error_text_matched', types.Text),
Column('req_method', types.Text),
Column('req_headers', types.LargeBinary),
Column('req_body', types.LargeBinary),
Column('resp_statuscode', types.Text),
Column('resp_headers', types.LargeBinary),
Column('resp_body', types.LargeBinary),
Column('resp_history', types.LargeBinary))
# Create the table if it doesn't exist
# and otherwise no effect
db_metadata.create_all(db_engine)
return dbconn
0
Example 9
def test_add_false_positive(self):
# Add a false positive to database and check that all fields
# get populated and can be compared back originals
response = {'scenario_id': '1',
'req_headers': 'headers',
'req_body': 'body',
'url': 'url',
'timestamp': datetime.datetime.utcnow(),
'req_method': 'method',
'server_protocol_error': None,
'server_timeout': False,
'server_error_text_detected': False,
'server_error_text_matched': 'matched_text',
'resp_statuscode': 'statuscode',
'resp_headers': 'resp_headers',
'resp_body': 'resp_body',
'resp_history': 'resp_history'}
dbtools.add_false_positive(self.context, response)
# Connect directly to the database and check the data is there
db_engine = sqlalchemy.create_engine(self.context.dburl)
dbconn = db_engine.connect()
db_metadata = sqlalchemy.MetaData()
httpfuzzer_issues = Table('httpfuzzer_issues', db_metadata,
Column('new_issue', types.Boolean),
Column('issue_no', types.Integer, primary_key=True, nullable=False),
Column('timestamp', types.DateTime(timezone=True)),
Column('test_runner_host', types.Text),
Column('scenario_id', types.Text),
Column('url', types.Text),
Column('server_protocol_error', types.Text),
Column('server_timeout', types.Boolean),
Column('server_error_text_detected', types.Boolean),
Column('server_error_text_matched', types.Text),
Column('req_method', types.Text),
Column('req_headers', types.LargeBinary),
Column('req_body', types.LargeBinary),
Column('resp_statuscode', types.Text),
Column('resp_headers', types.LargeBinary),
Column('resp_body', types.LargeBinary),
Column('resp_history', types.LargeBinary))
db_select = sqlalchemy.sql.select([httpfuzzer_issues])
db_result = dbconn.execute(db_select)
result = db_result.fetchone()
for key, value in response.iteritems():
self.assertEqual(result[key], value,
'%s not found in database after add' % key)
self.assertEqual(result['test_runner_host'], socket.gethostbyname(socket.getfqdn()),
'Test runner host name not correct in database')
self.assertLessEqual(result['timestamp'], datetime.datetime.utcnow(),
'Timestamp not correctly stored in database')
dbconn.close()
0
Example 10
Project: ironic Source File: test_migrations.py
def _check_487deb87cc9d(self, engine, data):
conductors = db_utils.get_table(engine, 'conductors')
column_names = [column.name for column in conductors.c]
self.assertIn('online', column_names)
self.assertIsInstance(conductors.c.online.type,
(sqlalchemy.types.Boolean,
sqlalchemy.types.Integer))
nodes = db_utils.get_table(engine, 'nodes')
column_names = [column.name for column in nodes.c]
self.assertIn('conductor_affinity', column_names)
self.assertIsInstance(nodes.c.conductor_affinity.type,
sqlalchemy.types.Integer)
data_conductor = {'hostname': 'test_host'}
conductors.insert().execute(data_conductor)
conductor = conductors.select(
conductors.c.hostname ==
data_conductor['hostname']).execute().first()
data_node = {'uuid': uuidutils.generate_uuid(),
'conductor_affinity': conductor['id']}
nodes.insert().execute(data_node)
node = nodes.select(
nodes.c.uuid == data_node['uuid']).execute().first()
self.assertEqual(conductor['id'], node['conductor_affinity'])