Here are the examples of the python api nose.tools.eq_ taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
156 Examples
0
Example 51
Project: pyensembl Source File: test_transcript_objects.py
@test_ensembl_releases(75, 77)
def test_sequence_parts(genome):
# Ensure that the UTRs and coding sequence can be
# combined to make the full transcript.
transcript = genome.transcript_by_id(FOXP3_001_transcript_id)
# The combined lengths of the upstream untranslated region,
# coding sequence, and downstream untranslated region
full_sequence = transcript.sequence
assert_greater(len(full_sequence), 0)
utr5 = transcript.five_prime_utr_sequence
assert_greater(len(utr5), 0)
cds = transcript.coding_sequence
assert_greater(len(cds), 0)
utr3 = transcript.three_prime_utr_sequence
assert_greater(len(utr3), 0)
# need to use `seq` property of Sequence objects to get underlying
# strings which can be concatenated and compared
combined_string = utr5 + cds + utr3
combined_sequence_length = len(combined_string)
# make sure length property of transcript matches the sequence length
eq_(
combined_sequence_length,
len(transcript),
"Length 5' UTR(%dnt) + CDS(%dnt) + 3' UTR(%d) = %d, expected %d" % (
len(utr5),
len(cds),
len(utr3),
combined_sequence_length,
len(transcript)))
eq_(
combined_string,
full_sequence,
"Expected FOXP3-001 sequence:\n%s\n\n5' UTR + CDS + 3' UTR:\n%s" % (
full_sequence,
combined_string))
0
Example 52
Project: jstestnet Source File: test_views.py
def test_get_job_result(self):
ts = create_ts()
token = Token.create(ts)
worker = create_worker()
r = self.client.post(reverse('system.start_tests'),
data={'browsers': 'firefox', 'token': token,
'name': ts.slug})
eq_(r.status_code, 200)
data = json.loads(r.content)
test_run_id = data['test_run_id']
r = self.client.get(reverse('system.test_result', args=[test_run_id]))
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['finished'], False)
eq_(data['results'], [])
data = self.query(worker)
queue_id = data['work_queue_id']
results = {
'failures': 0,
'total': 1,
'tests': [
{'module':'Bar', 'test':'foo',
'message':'1 equals 2', 'result':False},
{'module':'Bar', 'test':'foo',
'message':'ok', 'result':True},
{'module':'Zebo', 'test':'zee',
'message':'ok', 'result':True},
]
}
r = self.client.post(reverse('work.submit_results'),
dict(work_queue_id=queue_id,
results=json.dumps(results)))
eq_(r.status_code, 200)
r = self.client.get(reverse('system.test_result', args=[test_run_id]))
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['finished'], True)
tests = sorted(data['results'])
eq_(tests[0]['module'], 'Bar')
eq_(tests[0]['test'], 'foo')
eq_(tests[0]['assertions'], [
{'module':'Bar', 'test':'foo', 'worker_id': worker.id,
'worker_user_agent': worker.user_agent,
'browser': 'firefox/3.6.12, gecko/1.9.2.12',
'message':'1 equals 2', 'result':False},
{'module':'Bar', 'test':'foo', 'worker_id': worker.id,
'worker_user_agent': worker.user_agent,
'browser': 'firefox/3.6.12, gecko/1.9.2.12',
'message':'ok', 'result':True},
])
eq_(tests[1]['module'], 'Zebo')
eq_(tests[1]['test'], 'zee')
eq_(tests[1]['assertions'], [
{'module':'Zebo', 'test':'zee', 'worker_id': worker.id,
'worker_user_agent': worker.user_agent,
'browser': 'firefox/3.6.12, gecko/1.9.2.12',
'message':'ok', 'result':True},
])
0
Example 53
Project: flask_injector Source File: flask_injector_tests.py
def test_injections():
l = [1, 2, 3]
counter = [0]
def inc():
counter[0] += 1
def conf(binder):
binder.bind(str, to="something")
binder.bind(list, to=l)
app = Flask(__name__)
@app.route('/view1')
def view1(content: str):
inc()
return render_template_string(content)
class View2(View):
def __init__(self, *args, content: list, **kwargs):
self.content = content
super().__init__(*args, **kwargs)
def dispatch_request(self):
inc()
return render_template_string('%s' % self.content)
@app.before_request
def br(c: list):
inc()
eq_(c, l)
@app.after_request
def ar(response_class, c: list):
inc()
eq_(c, l)
return response_class
@app.context_processor
def cp(c: list):
inc()
eq_(c, l)
return {}
@app.teardown_request
def tr(sender, exc=None, c: list = None):
inc()
eq_(c, l)
app.add_url_rule('/view2', view_func=View2.as_view('view2'))
FlaskInjector(app=app, modules=[conf], use_annotations=True)
with app.test_client() as c:
response = c.get('/view1')
eq_(response.get_data(as_text=True), "something")
with app.test_client() as c:
response = c.get('/view2')
eq_(response.get_data(as_text=True), '%s' % (l,))
eq_(counter[0], 10)
0
Example 54
Project: flask-admin Source File: test_model.py
def test_export_csv():
app, admin = setup()
client = app.test_client()
# test redirect when csv export is disabled
view = MockModelView(Model, column_list=['col1', 'col2'], endpoint="test")
admin.add_view(view)
rv = client.get('/admin/test/export/csv/')
eq_(rv.status_code, 302)
# basic test of csv export with a few records
view_data = {
1: Model(1, "col1_1", "col2_1"),
2: Model(2, "col1_2", "col2_2"),
3: Model(3, "col1_3", "col2_3"),
}
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'])
admin.add_view(view)
rv = client.get('/admin/model/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,col2_1\r\n"
"col1_2,col2_2\r\n"
"col1_3,col2_3\r\n" == data)
# test explicit use of column_export_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_list=['id','col1','col2'],
endpoint='exportinclusion')
admin.add_view(view)
rv = client.get('/admin/exportinclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Id,Col1,Col2\r\n"
"1,col1_1,col2_1\r\n"
"2,col1_2,col2_2\r\n"
"3,col1_3,col2_3\r\n" == data)
# test explicit use of column_export_exclude_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_exclude_list=['col2'],
endpoint='exportexclusion')
admin.add_view(view)
rv = client.get('/admin/exportexclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1\r\n"
"col1_1\r\n"
"col1_2\r\n"
"col1_3\r\n" == data)
# test utf8 characters in csv export
view_data[4] = Model(1, u'\u2013ut8_1\u2013', u'\u2013utf8_2\u2013')
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'], endpoint="utf8")
admin.add_view(view)
rv = client.get('/admin/utf8/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_(u'\u2013ut8_1\u2013,\u2013utf8_2\u2013\r\n' in data)
# test None type, integer type, column_labels, and column_formatters
view_data = {
1: Model(1, "col1_1", 1),
2: Model(2, "col1_2", 2),
3: Model(3, None, 3),
}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_labels={'col1': 'Str Field', 'col2': 'Int Field'},
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2),
endpoint="types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Str Field,Int Field\r\n"
"col1_1,2\r\n"
"col1_2,4\r\n"
",6\r\n" == data)
# test column_formatters_export and column_formatters_export
type_formatters = {type(None): lambda view, value: "null"}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters_export=dict(col2=lambda v, c, m, p: m.col2*3),
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2), # overridden
column_type_formatters_export=type_formatters,
endpoint="export_types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/export_types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,3\r\n"
"col1_2,6\r\n"
"null,9\r\n" == data)
# Macros are not implemented for csv export yet and will throw an error
view = MockModelView(
Model, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
endpoint="macro_exception"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# We should be able to specify column_formatters_export
# and not get an exception if a column_formatter is using a macro
def export_formatter(v, c, m, p):
return m.col1 if m else ''
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
column_formatters_export=dict(col1=export_formatter),
endpoint="macro_exception_formatter_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_formatter_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,1\r\n"
"col1_2,2\r\n"
",3\r\n" == data)
# We should not get an exception if a column_formatter is
# using a macro but it is on the column_export_exclude_list
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
column_export_exclude_list=['col1'],
endpoint="macro_exception_exclude_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_exclude_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col2\r\n"
"1\r\n"
"2\r\n"
"3\r\n" == data)
# When we use column_export_list to hide the macro field
# we should not get an exception
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
column_export_list=['col2'],
endpoint="macro_exception_list_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_list_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col2\r\n"
"1\r\n"
"2\r\n"
"3\r\n" == data)
# If they define a macro on the column_formatters_export list
# then raise an exception
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
endpoint="macro_exception_macro_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_macro_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
0
Example 55
Project: datanommer Source File: test_commands.py
@freezegun.freeze_time('2013-03-01')
def test_latest_timestamp_human(self):
with patch('datanommer.commands.LatestCommand.get_config') as gc:
self.config['overall'] = False
self.config['timestamp'] = True
self.config['human'] = True
gc.return_value = self.config
time1 = datetime(2013,02,14)
time2 = datetime(2013,02,15,15,15,15,15)
time3 = datetime(2013,02,16,16,16,16,16)
msg1 = m.Message(
topic='org.fedoraproject.prod.git.branch.valgrind.master',
timestamp=time1,
i=1
)
msg2 = m.Message(
topic='org.fedoraproject.stg.fas.user.create',
timestamp=time2,
i=1
)
msg3 = m.Message(
topic='org.fedoraproject.prod.git.receive.valgrind.master',
timestamp=time3,
i=1
)
msg1.msg = 'Message 1'
msg2.msg = 'Message 2'
msg3.msg = 'Message 3'
m.session.add_all([msg1, msg2, msg3])
m.session.flush()
logged_info = []
def info(data):
logged_info.append(data)
command = datanommer.commands.LatestCommand()
command.log.info = info
command.run()
json_object = json.loads(logged_info[0])
eq_(json_object[0], "2013-02-16 16:16:16.000016")
eq_(json_object[1], "2013-02-15 15:15:15.000015")
eq_(len(json_object), 2)
0
Example 56
def test_image_upload_field():
app = Flask(__name__)
path = _create_temp()
def _remove_testimages():
safe_delete(path, 'test1.png')
safe_delete(path, 'test1_thumb.jpg')
safe_delete(path, 'test2.png')
safe_delete(path, 'test2_thumb.jpg')
safe_delete(path, 'test1.jpg')
class TestForm(form.BaseForm):
upload = form.ImageUploadField('Upload',
base_path=path,
thumbnail_size=(100, 100, True))
class TestNoResizeForm(form.BaseForm):
upload = form.ImageUploadField('Upload', base_path=path, endpoint='test')
class TestAutoResizeForm(form.BaseForm):
upload = form.ImageUploadField('Upload',
base_path=path,
max_size=(64, 64, True))
class Dummy(object):
pass
my_form = TestForm()
eq_(my_form.upload.base_path, path)
eq_(my_form.upload.endpoint, 'static')
_remove_testimages()
dummy = Dummy()
# Check upload
filename = op.join(op.dirname(__file__), 'data', 'copyleft.png')
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.png')}):
my_form = TestForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.png')
ok_(op.exists(op.join(path, 'test1.png')))
ok_(op.exists(op.join(path, 'test1_thumb.jpg')))
# Check replace
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test2.png')}):
my_form = TestForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test2.png')
ok_(op.exists(op.join(path, 'test2.png')))
ok_(op.exists(op.join(path, 'test2_thumb.jpg')))
ok_(not op.exists(op.join(path, 'test1.png')))
ok_(not op.exists(op.join(path, 'test1_thumb.jpg')))
# Check delete
with app.test_request_context(method='POST', data={'_upload-delete': 'checked'}):
my_form = TestForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, None)
ok_(not op.exists(op.join(path, 'test2.png')))
ok_(not op.exists(op.join(path, 'test2_thumb.jpg')))
# Check upload no-resize
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.png')}):
my_form = TestNoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.png')
ok_(op.exists(op.join(path, 'test1.png')))
ok_(not op.exists(op.join(path, 'test1_thumb.jpg')))
# Check upload, auto-resize
filename = op.join(op.dirname(__file__), 'data', 'copyleft.png')
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.png')}):
my_form = TestAutoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.png')
ok_(op.exists(op.join(path, 'test1.png')))
filename = op.join(op.dirname(__file__), 'data', 'copyleft.tiff')
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.tiff')}):
my_form = TestAutoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.jpg')
ok_(op.exists(op.join(path, 'test1.jpg')))
0
Example 57
Project: diffscuss Source File: test_generate.py
def test_gen_diffscuss_basics():
testy_mc = "Testy McTesterson <[email protected]>"
with fleeting_repo() as repo:
repo.do_git(["config", "user.name", "Testy McTesterson"])
repo.do_git(["config", "user.email", "[email protected]"])
repo.commit([('README.txt', 'dweezel')],
commit_msg="Initial commit")
repo.commit([('test.txt', '\n'.join(['this is the first line',
'this is the second line',
'']))],
author=testy_mc,
commit_msg="First commit message.")
repo.commit([('test.txt', '\n'.join(['this is the changed first line',
'this is the second line',
'this is the new third line',
'']))],
author=testy_mc,
commit_msg="Second commit message.")
diffscussion = _run_gen_diffscuss(cwd=repo.repo_root,
revs="HEAD~2..HEAD")
# do some cheesy tests to make sure strings we expect have /
# haven't have made it into the diffscussion, before we do
# line by line comparison, to make it easier to see what's
# actually going wrong
# we shouldn't have the initial commit msg
ok_("Initial commit" not in diffscussion)
# or the initial commit's diffs
ok_('README.txt' not in diffscussion)
ok_('dweezel' not in diffscussion)
# both included commit logs msgs should be in there
ok_("First commit message" in diffscussion)
ok_("Second commit message" in diffscussion)
# and they should be in chrono order
ok_(diffscussion.find("First commit message") <
diffscussion.find("Second commit message"))
# make sure the diffs came through
ok_("+this is the changed first line" in diffscussion)
ok_("+this is the second line" in diffscussion)
ok_("+this is the new third line" in diffscussion)
# make sure the author was picked up
ok_("author: Testy McTesterson" in diffscussion)
# and the email
ok_("email: [email protected]" in diffscussion)
# and some cheesy line by line structure
lines = diffscussion.split("\n")
eq_("#* ", lines[0])
eq_("#* author: Testy McTesterson", lines[1])
eq_("#* email: [email protected]", lines[2])
ok_(lines[3].startswith("#* date: "))
eq_("#* ", lines[4])
eq_("#- First commit message.", lines[5])
eq_("#- ", lines[6])
eq_("#- ", lines[7])
eq_("#- Second commit message.", lines[8])
eq_("#- ", lines[9])
eq_("#- ", lines[10])
ok_(lines[11].startswith("diff --git"))
ok_(lines[12].startswith("new file mode"))
ok_(lines[13].startswith("index"))
ok_(lines[14].startswith("---"))
ok_(lines[15].startswith("+++"))
ok_(lines[16].startswith("@@"))
eq_("+this is the changed first line", lines[17])
eq_("+this is the second line", lines[18])
eq_("+this is the new third line", lines[19])
0
Example 58
Project: editxt Source File: test_textcommand.py
def test_CommandBar_on_key_press():
@command(arg_parser=CommandParser(
TitleChoice(*"test_1 test_2 test_3 text".split(), title="tests"),
Choice(('forward', False), ('reverse xyz', True), name='reverse'),
Regex('sort_regex', True),
))
def cmd(editor, args):
pass
@command(arg_parser=CommandParser(Int('number')))
def count(editor, args):
raise NotImplementedError("should not get here")
@command(arg_parser=CommandParser(IllBehaved("bang")))
def ill(editor, args):
raise NotImplementedError("should not get here")
NA = object()
@gentest
def test(text, command_key, *,
new_text=NA,
complete=None,
default_complete=None,
new_complete=None,
new_default_complete=None,
completions_select_range=None,
completions_title=None,
sel=None,
new_sel=None,
expect=True,
has_command=True,
output="",
new_output=NA
):
view = CommandView()
bar = CommandTester(cmd, count, ill, textview=object, command_view=view)
bar.activate(text)
if complete:
view.completions.items = complete
if default_complete:
print("selected completion:", complete.index(default_complete))
view.completions.select(complete.index(default_complete))
view.command_text = text
if sel:
view.command_text_selected_range = sel
if completions_select_range:
view.completions.select_range = completions_select_range
if output:
view.message(output)
result = bar.on_key_command(command_key, view)
eq_(result, expect)
eq_(view.completions.items,
(complete or []) if new_complete is None else new_complete)
eq_(view.completions.selected_item, new_default_complete)
eq_(view.completions.title, completions_title)
eq_(view.command_text, text if new_text is NA else new_text)
eq_(view.output_text, output if new_output is NA else new_output)
if sel is not None or new_sel is not None:
eq_(view.command_text_selected_range, sel if new_sel is None else new_sel)
if complete == new_complete:
eq_(view.completions.select_range, sel if new_sel is None else new_sel)
else:
eq_(view.completions.select_range, None)
eq_(view.command, bar.bar if has_command else None)
SEL = CommandView.KEYS.SELECTION_CHANGED
ESC = CommandView.KEYS.ESC
TAB = CommandView.KEYS.TAB
BACK_TAB = CommandView.KEYS.BACK_TAB
UP = CommandView.KEYS.UP
DOWN = CommandView.KEYS.DOWN
ENTER = CommandView.KEYS.ENTER
yield test("c", SEL)
yield test("c", SEL, complete=["cmd", "count", "ill"], new_complete=["cmd", "count"])
yield test("c", SEL, completions_select_range=(5, 4), new_sel=(1, 0))
yield test("", TAB, new_complete=["cmd", "count", "ill"])
yield test("", TAB, complete=["cmd", "count", "ill"])
yield test("c", TAB, new_complete=["cmd", "count"])
yield test("c", TAB, complete=["cmd", "count"], new_complete=["cmd", "count"])
yield test("cm", TAB, new_text="cmd ")
yield test("cm", TAB, complete=["cmd"], new_text="cmd ", new_complete=[])
yield test("cmd t", TAB,
complete=[CompleteWord(w, start=4) for w in ["test_1", "test_2"]],
new_text="cmd test_",
new_complete=["test_1", "test_2"])
yield test("cmd ", TAB,
new_text="cmd te",
new_complete=["test_1", "test_2", 'test_3', 'text'],
completions_title="tests")
yield test("cmd test_1", TAB,
complete=[CompleteWord("test_1", start=4)],
new_text="cmd test_1 ",
new_complete=[])
#yield test("", DOWN)
yield test("", DOWN, new_text="cmd ",
complete=["cmd", "count", "ill"], new_default_complete="cmd")
yield test("cmd ", DOWN, new_text="count ",
complete=["cmd", "count", "ill"], completions_select_range=(0, 4),
default_complete="cmd", new_default_complete="count")
#yield test("", UP)
yield test("", UP, new_text="ill ",
complete=["cmd", "count", "ill"], new_default_complete="ill")
yield test("ill ", UP, new_text="count ",
complete=["cmd", "count", "ill"], completions_select_range=(0, 4),
default_complete="ill", new_default_complete="count")
yield test("cmd ", ENTER, new_text=None, has_command=False)
yield test("c", ESC, new_text=None, has_command=False)
yield test("c", ESC, output="abc", new_output="")
yield test("c", ESC,
completions_select_range=(0, 1), sel=(0, 1), new_sel=(0, 1),
complete=["cmd", "count", "ill"], new_complete=[])
yield test("c", BACK_TAB)
0
Example 59
Project: simples3 Source File: test_bucket.py
def test_listdir(self):
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix></Prefix>
<Marker></Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>my-image.jpg</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>434234</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef</ID>
<DisplayName>johndoe</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>my-third-image.jpg</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"1b2cf535f27731c974343645a3985328"</ETag>
<Size>64994</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef</ID>
<DisplayName>johndoe</DisplayName>
</Owner>
</Contents>
</ListBucketResult>
""".lstrip()
g.bucket.add_resp("/", g.H("application/xml"), xml)
reftups = (
('my-image.jpg', datetime.datetime(2009, 10, 12, 17, 50, 30),
'"fba9dede5f27731c9771645a39863328"', 434234),
('my-third-image.jpg', datetime.datetime(2009, 10, 12, 17, 50, 30),
'"1b2cf535f27731c974343645a3985328"', 64994))
next_reftup = iter(reftups).next
for tup in g.bucket.listdir():
eq_(len(tup), 4)
eq_(tup, next_reftup())
key, mtype, etag, size = tup
0
Example 60
Project: skll Source File: test_output.py
def check_summary_score(use_feature_hashing=False):
# Test to validate summary file scores
make_summary_data()
cfgfile = ('test_summary_feature_hasher.template.cfg' if
use_feature_hashing else 'test_summary.template.cfg')
config_template_path = join(_my_dir, 'configs', cfgfile)
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True)
outprefix = ('test_summary_feature_hasher_test_summary' if
use_feature_hashing else 'test_summary_test_summary')
summprefix = ('test_summary_feature_hasher' if use_feature_hashing else
'test_summary')
with open(join(_my_dir, 'output', ('{}_LogisticRegression.results.'
'json'.format(outprefix)))) as f:
outd = json.loads(f.read())
logistic_result_score = outd[0]['score']
with open(join(_my_dir, 'output',
'{}_SVC.results.json'.format(outprefix))) as f:
outd = json.loads(f.read())
svm_result_score = outd[0]['score']
# note that Naive Bayes doesn't work with feature hashing
if not use_feature_hashing:
with open(join(_my_dir, 'output', ('{}_MultinomialNB.results.'
'json'.format(outprefix)))) as f:
outd = json.loads(f.read())
naivebayes_result_score = outd[0]['score']
with open(join(_my_dir, 'output', '{}_summary.tsv'.format(summprefix)),
'r') as f:
reader = csv.DictReader(f, dialect='excel-tab')
for row in reader:
# the learner results dictionaries should have 29 rows,
# and all of these except results_table
# should be printed (though some columns will be blank).
eq_(len(row), 29)
assert row['model_params']
assert row['grid_score']
assert row['score']
if row['learner_name'] == 'LogisticRegression':
logistic_summary_score = float(row['score'])
elif row['learner_name'] == 'MultinomialNB':
naivebayes_summary_score = float(row['score'])
elif row['learner_name'] == 'SVC':
svm_summary_score = float(row['score'])
test_tuples = [(logistic_result_score,
logistic_summary_score,
'LogisticRegression'),
(svm_result_score,
svm_summary_score,
'SVC')]
if not use_feature_hashing:
test_tuples.append((naivebayes_result_score,
naivebayes_summary_score,
'MultinomialNB'))
for result_score, summary_score, learner_name in test_tuples:
assert_almost_equal(result_score, summary_score,
err_msg=('mismatched scores for {} '
'(result:{}, summary:'
'{})').format(learner_name, result_score,
summary_score))
# We itereate over each model with an expected
# accuracy score. T est proves that the report
# written out at least as a correct format for
# this line. See _print_fancy_output
for report_name, val in (("LogisticRegression", .5),
("MultinomialNB", .5),
("SVC", .7)):
filename = "test_summary_test_summary_{}.results".format(report_name)
results_path = join(_my_dir, 'output', filename)
with open(results_path) as results_file:
report = results_file.read()
expected_string = "Accuracy = {:.1f}".format(val)
eq_(expected_string in report, # approximate
True,
msg="{} is not in {}".format(expected_string,
report))
0
Example 61
Project: flask-admin Source File: test_basic.py
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
fill_db(Model1, Model2)
# Test string filter
view = CustomModelView(Model1, column_filters=['test1'])
admin.add_view(view)
eq_(len(view._filters), 7)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, 'contains'),
(1, 'not contains'),
(2, 'equals'),
(3, 'not equal'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# Make some test clients
client = app.test_client()
# string - equals
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not equal
rv = client.get('/admin/model1/?flt0_1=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - contains
rv = client.get('/admin/model1/?flt0_2=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not contains
rv = client.get('/admin/model1/?flt0_3=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - empty
rv = client.get('/admin/model1/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' in data)
ok_('test1_val_1' not in data)
ok_('test1_val_2' not in data)
# string - not empty
rv = client.get('/admin/model1/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' not in data)
ok_('test1_val_1' in data)
ok_('test1_val_2' in data)
# string - in list
rv = client.get('/admin/model1/?flt0_5=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test1_val_3' not in data)
ok_('test1_val_4' not in data)
# string - not in list
rv = client.get('/admin/model1/?flt0_6=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test1_val_3' in data)
ok_('test1_val_4' in data)
# Test int filter
view = CustomModelView(Model2, column_filters=['int_field'])
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# integer - equals (huge number)
rv = client.get('/admin/model2/?flt0_0=6169453081680413441')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_5' in data)
ok_('char_field_val_4' not in data)
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# integer - in list (huge number)
rv = client.get('/admin/model2/?flt0_5=6169453081680413441')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_5' in data)
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# Test boolean filter
view = CustomModelView(Model2, column_filters=['bool_field'],
endpoint="_bools")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Bool Field']],
[
(0, 'equals'),
(1, 'not equal'),
])
# boolean - equals - Yes
rv = client.get('/admin/_bools/?flt0_0=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' not in data)
# boolean - equals - No
rv = client.get('/admin/_bools/?flt0_0=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' in data)
# boolean - not equals - Yes
rv = client.get('/admin/_bools/?flt0_1=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' in data)
# boolean - not equals - No
rv = client.get('/admin/_bools/?flt0_1=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' not in data)
# Test float filter
view = CustomModelView(Model2, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# Test date, time, and datetime filters
view = CustomModelView(Model1,
column_filters=['date_field', 'datetime_field', 'timeonly_field'],
endpoint="_datetime")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Date Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']],
[
(7, 'equals'),
(8, 'not equal'),
(9, 'greater than'),
(10, 'smaller than'),
(11, 'between'),
(12, 'not between'),
(13, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Timeonly Field']],
[
(14, 'equals'),
(15, 'not equal'),
(16, 'greater than'),
(17, 'smaller than'),
(18, 'between'),
(19, 'not between'),
(20, 'empty'),
])
# date - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - between
rv = client.get('/admin/_datetime/?flt0_4=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('date_obj1' not in data)
ok_('date_obj2' not in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('date_obj1' in data)
ok_('date_obj2' in data)
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_7=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_8=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_9=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_10=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - between
rv = client.get('/admin/_datetime/?flt0_11=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_12=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_13=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' not in data)
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_13=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('datetime_obj1' in data)
ok_('datetime_obj2' in data)
# time - equals
rv = client.get('/admin/_datetime/?flt0_14=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not equal
rv = client.get('/admin/_datetime/?flt0_15=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - greater
rv = client.get('/admin/_datetime/?flt0_16=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - smaller
rv = client.get('/admin/_datetime/?flt0_17=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - between
rv = client.get('/admin/_datetime/?flt0_18=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not between
rv = client.get('/admin/_datetime/?flt0_19=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - empty
rv = client.get('/admin/_datetime/?flt0_20=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' not in data)
# time - not empty
rv = client.get('/admin/_datetime/?flt0_20=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' in data)
0
Example 62
def test_real(self):
msg = {
"i": 2,
"msg": {
"thread": {
"tagnames": [
"town"
],
"pk": 2,
"title": "alskdjflaksjdf lakjsf a"
},
"created": False,
"timestamp": 1359947640.0,
"topmost_post_id": 2,
"agent": "ralph",
"newly_mentioned_users": [],
"diff": "<p>alskdfj... the diff is actually here",
"post": {
"vote_up_count": 0,
"text": "alskdfjalskdjf alkjasdalskdjf ...",
"summary": "alskdfjalskdjf alkjasdalskdjf ...",
"comment_count": 0,
"vote_down_count": 0,
"pk": 2,
"post_type": "question"
}
},
"topic": "org.fedoraproject.dev.askbot.post.edit",
"username": "threebean",
"timestamp": 1359947640.986208
}
target = {
'username': 'threebean',
'msg.post.text': 'alskdfjalskdjf alkjasdalskdjf ...',
'msg.thread.title': 'alskdjflaksjdf lakjsf a',
'msg.post.vote_down_count': 0,
'msg.post.post_type': 'question',
'msg.thread.pk': 2,
'msg.newly_mentioned_users': [],
'msg.diff': '<p>alskdfj... the diff is actually here',
'topic': 'org.fedoraproject.dev.askbot.post.edit',
'msg.agent': 'ralph',
'msg.post.comment_count': 0,
'msg.post': {
'vote_up_count': 0,
'text': 'alskdfjalskdjf alkjasdalskdjf ...',
'summary': 'alskdfjalskdjf alkjasdalskdjf ...',
'comment_count': 0,
'vote_down_count': 0,
'pk': 2,
'post_type': 'question'},
'msg.timestamp': 1359947640.0,
'timestamp': 1359947640.986208,
'msg.topmost_post_id': 2,
'i': 2,
'msg.post.pk': 2,
'msg.post.vote_up_count': 0,
'msg.post.summary': 'alskdfjalskdjf alkjasdalskdjf ...',
'msg.thread.tagnames': ['town'],
'msg.thread': {'tagnames': ['town'],
'pk': 2,
'title': 'alskdjflaksjdf lakjsf a'},
'msg': {'newly_mentioned_users': [],
'thread': {'tagnames': ['town'],
'pk': 2,
'title': 'alskdjflaksjdf lakjsf a'},
'created': False,
'topmost_post_id': 2,
'timestamp': 1359947640.0,
'post': {'vote_up_count': 0,
'text': 'alskdfjalskdjf alkjasdalskdjf ...',
'summary': 'alskdfjalskdjf alkjasdalskdjf ...',
'comment_count': 0,
'vote_down_count': 0,
'pk': 2,
'post_type': 'question'},
'diff': '<p>alskdfj... the diff is actually here',
'agent': 'ralph'},
'msg.created': False,
}
actual = construct_substitutions(msg)
eq_(actual, target)
0
Example 63
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model1, db.session,
column_filters=['test1']
)
admin.add_view(view)
eq_(len(view._filters), 4)
eq_(view._filter_dict, {
u'Test1': [
(0, u'equals'),
(1, u'not equal'),
(2, u'contains'),
(3, u'not contains')
]})
# Test filter that references property
view = CustomModelView(Model2, db.session,
column_filters=['model1'])
eq_(view._filter_dict, {
u'Model1 / Test1': [
(0, u'equals'),
(1, u'not equal'),
(2, u'contains'),
(3, u'not contains')
],
'Model1 / Test2': [
(4, 'equals'),
(5, 'not equal'),
(6, 'contains'),
(7, 'not contains')
],
u'Model1 / Test3': [
(8, u'equals'),
(9, u'not equal'),
(10, u'contains'),
(11, u'not contains')
],
u'Model1 / Test4': [
(12, u'equals'),
(13, u'not equal'),
(14, u'contains'),
(15, u'not contains')
],
u'Model1 / Bool Field': [
(16, u'equals'),
(17, u'not equal'),
],
u'Model1 / Enum Field': [
(18, u'equals'),
(19, u'not equal'),
]})
# Test filter with a dot
view = CustomModelView(Model2, db.session,
column_filters=['model1.bool_field'])
eq_(view._filter_dict, {
'Model1 / Bool Field': [
(0, 'equals'),
(1, 'not equal'),
]})
# Fill DB
model1_obj1 = Model1('model1_obj1', bool_field=True)
model1_obj2 = Model1('model1_obj2')
model1_obj3 = Model1('model1_obj3')
model1_obj4 = Model1('model1_obj4')
model2_obj1 = Model2('model2_obj1', model1=model1_obj1)
model2_obj2 = Model2('model2_obj2', model1=model1_obj1)
model2_obj3 = Model2('model2_obj3')
model2_obj4 = Model2('model2_obj4')
db.session.add_all([
model1_obj1, model1_obj2, model1_obj3, model1_obj4,
model2_obj1, model2_obj2, model2_obj3, model2_obj4,
])
db.session.commit()
client = app.test_client()
rv = client.get('/admin/model1view/?flt0_0=model1_obj1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('model1_obj1' in data)
ok_('model1_obj2' not in data)
rv = client.get('/admin/model1view/?flt0_5=model1_obj1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('model1_obj1' in data)
ok_('model1_obj2' in data)
# Test different filter types
view = CustomModelView(Model2, db.session,
column_filters=['int_field'])
admin.add_view(view)
eq_(view._filter_dict, {'Int Field': [(0, 'equals'), (1, 'not equal'),
(2, 'greater than'), (3, 'smaller than')]})
#Test filters to joined table field
view = CustomModelView(
Model2, db.session,
endpoint='_model2',
column_filters=['model1.bool_field'],
column_list=[
'string_field',
'model1.id',
'model1.bool_field',
]
)
admin.add_view(view)
rv = client.get('/admin/_model2/?flt1_0=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('model2_obj1' in data)
ok_('model2_obj2' in data)
ok_('model2_obj3' not in data)
ok_('model2_obj4' not in data)
0
Example 64
Project: vcfnp Source File: test_array.py
def test_variants_transformers_ann():
def _test(v):
eq_(b'T', v['ANN']['Allele'][0])
eq_(b'intergenic_region', v['ANN']['Annotation'][0])
eq_(b'MODIFIER', v['ANN']['Annotation_Impact'][0])
eq_(b'AGAP004677', v['ANN']['Gene_Name'][0])
eq_(b'AGAP004677', v['ANN']['Gene_ID'][0])
eq_(b'intergenic_region', v['ANN']['Feature_Type'][0])
eq_(b'AGAP004677', v['ANN']['Feature_ID'][0])
eq_(b'.', v['ANN']['Transcript_BioType'][0])
eq_(-1, v['ANN']['Rank'][0])
eq_(b'.', v['ANN']['HGVS_c'][0])
eq_(b'.', v['ANN']['HGVS_p'][0])
eq_(-1, v['ANN']['cDNA_pos'][0])
eq_(-1, v['ANN']['cDNA_length'][0])
eq_(-1, v['ANN']['CDS_pos'][0])
eq_(-1, v['ANN']['CDS_length'][0])
eq_(-1, v['ANN']['AA_pos'][0])
eq_(-1, v['ANN']['AA_length'][0])
eq_(-1, v['ANN']['Distance'][0])
eq_(b'.', v['ANN']['Allele'][1])
eq_(b'.', v['ANN']['Annotation'][1])
eq_(b'.', v['ANN']['Annotation_Impact'][1])
eq_(b'.', v['ANN']['Gene_Name'][1])
eq_(b'.', v['ANN']['Gene_ID'][1])
eq_(b'.', v['ANN']['Feature_Type'][1])
eq_(b'.', v['ANN']['Feature_ID'][1])
eq_(b'.', v['ANN']['Transcript_BioType'][1])
eq_(-1, v['ANN']['Rank'][1])
eq_(b'.', v['ANN']['HGVS_c'][1])
eq_(b'.', v['ANN']['HGVS_p'][1])
eq_(-1, v['ANN']['cDNA_pos'][1])
eq_(-1, v['ANN']['cDNA_length'][1])
eq_(-1, v['ANN']['CDS_pos'][1])
eq_(-1, v['ANN']['CDS_length'][1])
eq_(-1, v['ANN']['AA_pos'][1])
eq_(-1, v['ANN']['AA_length'][1])
eq_(-1, v['ANN']['Distance'][1])
eq_(b'T', v['ANN']['Allele'][2])
eq_(b'missense_variant', v['ANN']['Annotation'][2])
eq_(b'MODERATE', v['ANN']['Annotation_Impact'][2])
eq_(b'AGAP005273', v['ANN']['Gene_Name'][2])
eq_(b'AGAP005273', v['ANN']['Gene_ID'][2])
eq_(b'transcript', v['ANN']['Feature_Type'][2])
eq_(b'AGAP005273-RA', v['ANN']['Feature_ID'][2])
eq_(b'VectorBase', v['ANN']['Transcript_BioType'][2])
eq_(1, v['ANN']['Rank'][2])
eq_(b'n.17A>T', v['ANN']['HGVS_c'][2])
eq_(b'p.Asp6Val', v['ANN']['HGVS_p'][2])
eq_(17, v['ANN']['cDNA_pos'][2])
eq_(4788, v['ANN']['cDNA_length'][2])
eq_(17, v['ANN']['CDS_pos'][2])
eq_(-1, v['ANN']['CDS_length'][2])
eq_(6, v['ANN']['AA_pos'][2])
eq_(-1, v['ANN']['AA_length'][2])
eq_(-1, v['ANN']['Distance'][2])
varr = variants('fixture/test_ann.vcf',
dtypes={'ANN': vcfnp.eff.ANN_DEFAULT_DTYPE},
arities={'ANN': 1},
transformers={'ANN': vcfnp.eff.ann_default_transformer()})
_test(varr)
# test ANN is included in defaults
varr = variants('fixture/test_ann.vcf')
_test(varr)
0
Example 65
Project: editxt Source File: test_parser.py
def test_Regex():
field = Regex('regex')
eq_(str(field), 'regex')
eq_(repr(field), "Regex('regex')")
def regex_test(text, start, expect, flags=0):
if isinstance(expect, Exception):
def check(err):
eq_(err, expect)
with assert_raises(type(expect), msg=check):
field.consume(text, start)
return
value = field.consume(text, start)
if expect[0] in [None, (None, None)]:
eq_(value, expect)
return
expr, index = value
if field.replace:
(expr, replace) = expr
got = ((expr, replace), index)
else:
got = (expr, index)
eq_(got, expect)
eq_(expr.flags, flags | re.UNICODE | re.MULTILINE)
test = regex_test
yield test, '', 0, (None, 1)
yield test, '/abc/', 0, ('abc', 6)
yield test, '/abc/ def', 0, ('abc', 6)
yield test, '/abc/ def', 0, ('abc', 6)
yield test, '/abc/i def', 0, ('abc', 7), re.I
yield test, '/abc/is def', 0, ('abc', 8), re.I | re.S
yield test, '/abc/is def', 0, ('abc', 8), re.I | re.S
yield test, 'abc', 0, ('abc', 4)
yield test, '^abc$', 0, ('^abc$', 6)
yield test, '^abc$ def', 0, ('^abc$', 6)
yield test, '/abc/X def', 0, \
ParseError('unknown flag: X', field, 5, 5)
test = make_placeholder_checker(field)
yield test, "", 0, "regex"
yield test, "/", 0, "/"
yield test, "//", 0, ""
#yield test, "// ", 0, None
test = make_placeholder_checker(Regex('regex', default="1 2"))
yield test, "", 0, "/1 2/"
test = make_arg_string_checker(field)
yield test, RegexPattern("str"), "/str/"
yield test, RegexPattern("str", re.I), "/str/i"
yield test, RegexPattern("/usr/bin"), ":/usr/bin:"
yield test, RegexPattern("/usr/bin:"), '"/usr/bin:"'
yield test, RegexPattern(r'''//''\:""'''), r'''://''\:"":'''
yield test, RegexPattern(r'''//''\\:""'''), r'''://''\\\:"":''', False
yield test, RegexPattern(r'''\://''""'''), r''':\://''"":'''
yield test, RegexPattern(r'''\\://''""'''), r''':\\\://''"":''', False
# pedantic cases with three or more of all except ':'
yield test, RegexPattern(r'''///'"'::"'"'''), r''':///'"'\:\:"'":''', False
yield test, RegexPattern(r'''///'"':\\:"'"'''), r''':///'"'\:\\\:"'":''', False
yield test, "str", Error("invalid value: regex='str'")
field = Regex('regex', replace=True)
eq_(repr(field), "Regex('regex', replace=True)")
test = regex_test
yield test, '', 0, ((None, None), 1)
yield test, '/abc', 0, (('abc', None), 5)
yield test, '/abc ', 0, (('abc ', None), 6)
yield test, '/\\\\', 0, (('\\\\', None), 4)
yield test, '/\\/', 0, (('\\/', None), 4)
yield test, '"abc', 0, (('abc', None), 5)
yield test, '"abc"', 0, (('abc', ''), 6)
yield test, '"abc""', 0, (('abc', ''), 7)
yield test, '/abc def', 0, (('abc def', None), 9)
yield test, '/abc/def', 0, (('abc', 'def'), 9)
yield test, '/abc/def/', 0, (('abc', 'def'), 10)
yield test, '/abc/def/ def', 0, (('abc', 'def'), 10)
yield test, '/abc/def/ def', 0, (('abc', 'def'), 10)
yield test, '/abc/def/i def', 0, (('abc', 'def'), 11), re.I
yield test, '/abc/def/is def', 0, (('abc', 'def'), 12), re.I | re.S
yield test, '/(', 0, (("(", None), 3)
yield test, 'abc', 0, \
ParseError("invalid search pattern: 'abc'", field, 0, 0)
yield test, 'abc def', 0, \
ParseError("invalid search pattern: 'abc def'", field, 0, 0)
yield test, '/abc/def/y def', 0, \
ParseError('unknown flag: y', field, 9, 9)
msg = 'invalid regular expression: unbalanced parenthesis'
test = make_placeholder_checker(field)
yield test, "", 0, "regex"
yield test, "/", 0, "//"
yield test, "/x/", 0, "/"
yield test, "/\\//", 0, "/"
yield test, "/x//", 0, ""
field = Regex('regex', replace=True, default=("", ""))
test = make_placeholder_checker(field)
yield test, "", 0, "regex"
yield test, "/", 0, "//"
yield test, "/x/", 0, "/"
yield test, "/\\//", 0, "/"
yield test, "/x//", 0, ""
test = make_arg_string_checker(field)
yield test, (RegexPattern("str"), 'abc'), "/str/abc/"
yield test, (RegexPattern("str", re.I), 'abc'), "/str/abc/i"
yield test, (RegexPattern("/usr/bin"), "abc"), ":/usr/bin:abc:"
yield test, (RegexPattern("/usr/bin:"), ":"), '"/usr/bin:":"'
yield test, (RegexPattern(r'''//''\:""'''), r'''/"'\:'''), r'''://''\:"":/"'\::'''
yield test, (RegexPattern(r'''//''\:""'''), r'''/"'\\:'''), r'''://''\:"":/"'\\\::''', False
yield test, ("str", "abc"), Error("invalid value: regex=('str', 'abc')")
yield test, ("str", 42), Error("invalid value: regex=('str', 42)")
0
Example 66
Project: varlens Source File: test_read_evidence.py
def test_read_evidence_gatk_mini_bundle_extract():
loci = [
Locus.from_inclusive_coordinates("20", 9999996, 9999996), # 0
Locus.from_inclusive_coordinates("20", 10260442), # 1
Locus.from_inclusive_coordinates("20", 10006823), # 2
Locus.from_inclusive_coordinates("20", 10006819, 10006823), # 3
Locus.from_inclusive_coordinates("20", 10006819, 10006825), # 4
Locus.from_inclusive_coordinates("20", 10006822, 10006827), # 5
Locus.from_inclusive_coordinates("20", 10007175), # 6
Locus.from_inclusive_coordinates("20", 10007174, 10007176), # 7
Locus.from_inclusive_coordinates("20", 1, 3), # 8
Locus.from_inclusive_coordinates("20", 10008796), # 9
Locus.from_inclusive_coordinates("20", 10008921), # 10
]
handle = Samfile(data_path("gatk_mini_bundle_extract.bam"))
evidence = PileupCollection.from_bam(handle, loci)
eq_(evidence.allele_summary(loci[0]), [("ACT", 9)])
eq_(evidence.filter(drop_duplicates=True).allele_summary(loci[0]),
[("ACT", 8)])
eq_(evidence.allele_summary(loci[1]), [("T", 7)])
eq_(evidence.filter().allele_summary(loci[2]), [("", 6), ("C", 2)])
eq_(evidence.filter(
drop_duplicates=True, min_base_quality=50).allele_summary(loci[2]),
[])
eq_(evidence.filter(drop_duplicates=True).allele_summary(loci[2]),
[("", 5), ("C", 1)])
eq_(evidence.filter(
drop_duplicates=True, min_mapping_quality=60).allele_summary(
loci[2]),
[("", 5), ("C", 1)])
eq_(evidence.filter(drop_duplicates=True,
min_mapping_quality=61).allele_summary(loci[2]), [("", 2)])
eq_(evidence.filter(drop_duplicates=True,
min_mapping_quality=61).allele_summary(loci[3]), [("A", 2)])
eq_(evidence.filter(drop_duplicates=True,
min_mapping_quality=61).allele_summary(loci[4]), [("AAA", 2)])
eq_(evidence.filter(drop_duplicates=True,
min_mapping_quality=61).allele_summary(loci[5]), [("AAAC", 2)])
eq_(evidence.filter().allele_summary(loci[6]), [("T", 5), ("C", 3)])
eq_(evidence.filter(min_base_quality=30).allele_summary(loci[6]),
[("T", 4), ("C", 3)])
eq_(evidence.filter().allele_summary(loci[7]),
[("CTT", 5), ("CCT", 3)])
eq_(evidence.filter(min_base_quality=30).allele_summary(loci[7]),
[("CTT", 3), ("CCT", 2)])
eq_(evidence.filter(min_base_quality=32).allele_summary(loci[2]),
[("", 6), ("C", 1)])
eq_(filtered_read_names(evidence.at(loci[2]).filter(min_base_quality=32)),
{'20GAVAAXX100126:4:3:18352:43857'})
eq_(evidence.allele_summary(loci[8]), [])
eq_(evidence.filter(drop_duplicates=True).allele_summary(loci[8]), [])
assert_raises(KeyError,
evidence.allele_summary,
Locus.from_inclusive_coordinates("20", 10009174, 10009176))
eq_(filtered_read_names(
evidence.at(loci[9]).filter(drop_improper_mate_pairs=True)),
{'20FUKAAXX100202:8:68:1530:49310'})
eq_(len(evidence.at(loci[8]).read_attribute('mapping_quality')), 0)
eq_(list(evidence.at(loci[9]).read_attribute('mapping_quality')),
list(evidence.at(loci[9]).read_attributes().mapping_quality))
eq_(evidence.filter(drop_duplicates=True).allele_summary(loci[10]),
[('C', 2), ('CA', 1), ('CAA', 1)])
eq_(evidence.filter(drop_duplicates=True).allele_summary(
Locus.from_interbase_coordinates(
loci[10].contig, loci[10].start, loci[10].start)),
[('', 2), ('A', 1), ('AA', 1)])
0
Example 67
Project: catsnap Source File: test_tag_batch.py
@patch('catsnap.batch.tag_batch.BatchWriteList')
@patch('catsnap.batch.tag_batch.Client')
@patch('catsnap.batch.tag_batch.get_item_batch')
def test_add_image_to_tags(self, get_item_batch, Client, BatchWriteList):
existing_tag_item = MagicMock()
def existing_getitem(key):
if key == 'filenames':
return '["facade"]'
elif key == HASH_KEY:
return 'bleep'
else:
raise ValueError(key)
existing_tag_item.__getitem__.side_effect = existing_getitem
get_item_batch.return_value = [ existing_tag_item ]
new_tag_item = MagicMock()
new_tag_item.__getitem__.return_value = 'bloop'
table = Mock()
table.new_item.return_value = new_tag_item
table.name = 'thetablename'
client = Mock()
client.table.return_value = table
dynamo = Mock()
client.get_dynamodb.return_value = dynamo
Client.return_value = client
write_list = Mock()
first_response = {
'UnprocessedItems': { 'thetablename': [
{'PutRequest': {
'Item': {
'tag': 'bloop',
'filenames': '["beefcafe"]'}}}]},
'Responses': {'thetablename': {'ConsumedCapacityUnits': 5.0}}}
second_response = {'Responses': {'thetablename':
{'ConsumedCapacityUnits': 5.0}}}
write_list.submit.side_effect = [first_response, second_response]
BatchWriteList.return_value = write_list
add_image_to_tags('beefcafe', ['bleep', 'bloop'])
existing_tag_item.__setitem__.assert_called_with('filenames',
'["facade", "beefcafe"]')
get_item_batch.assert_called_with(
['bleep', 'bloop'], 'tag', ['filenames'])
table.new_item.assert_called_with(hash_key='bloop',
attrs={'filenames':'["beefcafe"]'})
BatchWriteList.assert_called_with(dynamo)
write_list.add_batch.assert_has_calls([
call(table, puts=[existing_tag_item, new_tag_item]),
call(table, puts=[new_tag_item])])
eq_(write_list.submit.call_count, 2)
0
Example 68
Project: jingo-minify Source File: tests.py
@patch('jingo_minify.helpers.time.time')
@patch('jingo_minify.helpers.os.path.getmtime')
def test_css_helper(getmtime, time):
"""
Given the css() tag if we return the assets that make up that bundle
as defined in settings.MINIFY_BUNDLES.
If we're not in debug mode, we just return a minified url
"""
getmtime.return_value = 1
time.return_value = 1
env = jingo.env
t = env.from_string("{{ css('common', debug=True) }}")
s = t.render()
expected = "\n".join(
['<link rel="stylesheet" media="screen,projection,tv" '
'href="%s?build=1" />' % (settings.STATIC_URL + j)
for j in settings.MINIFY_BUNDLES['css']['common']])
eq_(s, expected)
t = env.from_string("{{ css('common', debug=False) }}")
s = t.render()
eq_(s,
'<link rel="stylesheet" media="screen,projection,tv" '
'href="%scss/common-min.css?build=%s" />'
% (settings.STATIC_URL, BUILD_ID_CSS))
t = env.from_string("{{ css('common_url', debug=True) }}")
s = t.render()
eq_(s, '<link rel="stylesheet" media="screen,projection,tv" '
'href="http://example.com/test.css?build=1" />')
t = env.from_string("{{ css('common_url', debug=False) }}")
s = t.render()
eq_(s,
'<link rel="stylesheet" media="screen,projection,tv" '
'href="%scss/common_url-min.css?build=%s" />'
% (settings.STATIC_URL, BUILD_ID_CSS))
t = env.from_string("{{ css('common_protocol_less_url', debug=True) }}")
s = t.render()
eq_(s, '<link rel="stylesheet" media="screen,projection,tv" '
'href="//example.com/test.css?build=1" />')
t = env.from_string("{{ css('common_protocol_less_url', debug=False) }}")
s = t.render()
eq_(s,
'<link rel="stylesheet" media="screen,projection,tv" '
'href="%scss/common_protocol_less_url-min.css?build=%s" />'
% (settings.STATIC_URL, BUILD_ID_CSS))
t = env.from_string("{{ css('common_bundle', debug=True) }}")
s = t.render()
eq_(s, '<link rel="stylesheet" media="screen,projection,tv" '
'href="css/test.css?build=1" />\n'
'<link rel="stylesheet" media="screen,projection,tv" '
'href="http://example.com/test.css?build=1" />\n'
'<link rel="stylesheet" media="screen,projection,tv" '
'href="//example.com/test.css?build=1" />\n'
'<link rel="stylesheet" media="screen,projection,tv" '
'href="https://example.com/test.css?build=1" />')
t = env.from_string("{{ css('common_bundle', debug=False) }}")
s = t.render()
eq_(s, '<link rel="stylesheet" media="screen,projection,tv" '
'href="%scss/common_bundle-min.css?build=%s" />' %
(settings.STATIC_URL, BUILD_ID_CSS))
0
Example 69
Project: spinoff Source File: actor_test.py
def test_fully_qualified_uri():
#
uri = Uri.parse('localhost:123')
eq_(str(uri), 'localhost:123')
eq_(uri.name, '')
eq_(uri.path, '')
eq_(uri.url, 'tcp://localhost:123')
eq_(uri, Uri.parse('localhost:123'))
eq_(uri, 'localhost:123')
ok_(not uri.parent)
eq_(uri.node, 'localhost:123')
eq_(uri.root, uri)
eq_(list(uri.steps), [''])
eq_(uri.local, '')
#
uri = Uri.parse('localhost:123/foo')
eq_(str(uri), 'localhost:123/foo')
eq_(uri.name, 'foo')
eq_(uri.path, '/foo')
eq_(uri.url, 'tcp://localhost:123/foo')
eq_(uri, Uri.parse('localhost:123/foo'))
eq_(uri, 'localhost:123/foo', "Uri.__eq__ supports str")
eq_(uri.parent, 'localhost:123')
eq_(uri.node, 'localhost:123')
eq_(uri.root, 'localhost:123')
eq_(list(uri.steps), ['', 'foo'])
eq_(uri.local, '/foo')
#
uri = Uri.parse('localhost:123/foo/bar')
eq_(str(uri), 'localhost:123/foo/bar')
eq_(uri.name, 'bar')
eq_(uri.path, '/foo/bar')
eq_(uri.url, 'tcp://localhost:123/foo/bar')
eq_(uri, Uri.parse('localhost:123/foo/bar'))
eq_(uri, 'localhost:123/foo/bar', "Uri.__eq__ supports str")
eq_(uri.parent, 'localhost:123/foo')
eq_(uri.node, 'localhost:123')
eq_(uri.root, 'localhost:123')
eq_(list(uri.steps), ['', 'foo', 'bar'])
eq_(uri.local, '/foo/bar')
0
Example 70
Project: PyExcelerate Source File: test_Workbook.py
def test_number_precision():
try:
import xlrd
except ImportError:
raise nose.SkipTest('xlrd not installed')
filename = get_output_path('precision.xlsx')
sheetname = 'Sheet1'
nums = [
1,
1.2,
1.23,
1.234,
1.2345,
1.23456,
1.234567,
1.2345678,
1.23456789,
1.234567890,
1.2345678901,
1.23456789012,
1.234567890123,
1.2345678901234,
1.23456789012345,
]
write_workbook = Workbook()
write_worksheet = write_workbook.new_sheet(sheetname)
for index, value in enumerate(nums):
write_worksheet[index + 1][1].value = value
write_workbook.save(filename)
read_workbook = xlrd.open_workbook(filename)
read_worksheet = read_workbook.sheet_by_name(sheetname)
for row_num in range(len(nums)):
expected = nums[row_num]
got = read_worksheet.cell(row_num, 0).value
eq_(got, expected)
if os.path.exists(filename):
os.remove(filename)
0
Example 71
Project: bodhi Source File: test_models.py
def test_update_bugs(self):
update = self.obj
eq_(len(update.bugs), 2)
session = self.db
# try just adding bugs
bugs = ['1234']
update.update_bugs(bugs, session)
eq_(len(update.bugs), 1)
eq_(update.bugs[0].bug_id, 1234)
# try just removing
bugs = []
update.update_bugs(bugs, session)
eq_(len(update.bugs), 0)
eq_(self.db.query(model.Bug)
.filter_by(bug_id=1234).first(), None)
# Test new duplicate bugs
bugs = ['1234', '1234']
update.update_bugs(bugs, session)
assert len(update.bugs) == 1
# Try adding a new bug, and removing the rest
bugs = ['4321']
update.update_bugs(bugs, session)
assert len(update.bugs) == 1
assert update.bugs[0].bug_id == 4321
eq_(self.db.query(model.Bug)
.filter_by(bug_id=1234).first(), None)
# Try removing a bug when it already has BugKarma
karma = BugKarma(bug_id=4321, karma=1)
self.db.add(karma)
self.db.flush()
bugs = ['5678']
update.update_bugs(bugs, session)
assert len(update.bugs) == 1
assert update.bugs[0].bug_id == 5678
eq_(self.db.query(model.Bug)
.filter_by(bug_id=4321).count(), 1)
0
Example 72
Project: datanommer Source File: test_commands.py
@freezegun.freeze_time('2013-03-01')
def test_dump_timespan(self):
with patch('datanommer.commands.DumpCommand.get_config') as gc:
self.config['before'] = '2013-02-16'
self.config['since'] = '2013-02-14T08:00:00'
gc.return_value = self.config
time1 = datetime(2013,02,14)
time2 = datetime(2013,02,15)
time3 = datetime(2013,02,16,8)
msg1 = m.Message(
topic='org.fedoraproject.prod.git.branch.valgrind.master',
timestamp=time1,
i=4
)
msg2 = m.Message(
topic='org.fedoraproject.prod.git.receive.valgrind.master',
timestamp=time2,
i=3
)
msg3 = m.Message(
topic='org.fedoraproject.prod.log.receive.valgrind.master',
timestamp=time3,
i=2
)
msg1.msg = 'Message 1'
msg2.msg = 'Message 2'
msg3.msg = 'Message 3'
m.session.add_all([msg1, msg2, msg3])
m.session.flush()
logged_info = []
def info(data):
logged_info.append(data)
command = datanommer.commands.DumpCommand()
command.log.info = info
command.run()
json_object = json.loads(logged_info[0])
eq_(json_object[0]['topic'],
'org.fedoraproject.prod.git.receive.valgrind.master')
eq_(len(json_object), 1)
0
Example 73
Project: django-wsgi Source File: test_embedded_wsgi.py
def test_cookies_sent(self):
environ = complete_environ(SCRIPT_NAME="/dev", PATH_INFO="/trac/wiki")
request = _make_request(**environ)
headers = [
("Set-Cookie", "arg1=val1"),
("Set-Cookie",
"arg2=val2; expires=Fri,%2031-Dec-2010%2023:59:59%20GMT"),
("Set-Cookie", "arg3=val3; path=/"),
("Set-Cookie", "arg4=val4; path=/wiki"),
("Set-Cookie", "arg5=val5; domain=.example.org"),
("Set-Cookie", "arg6=val6; max-age=3600"),
(
"Set-Cookie",
"arg7=val7; expires=Fri,%2031-Dec-2010%2023:59:59%20GMT; "
"max-age=3600; domain=.example.org; path=/wiki",
),
# Now let's try an Unicode cookie:
("Set-Cookie", u"arg8=val8; max-age=3600"),
# TODO: The "secure" cookie *attribute* is broken in SimpleCookie.
# See: http://bugs.python.org/issue1028088
#("Set-Cookie", "arg9=val9; secure"),
]
expected_cookies = {
'arg1': {'value': "val1"},
'arg2': {
'value': "val2",
'expires': "Fri,%2031-Dec-2010%2023:59:59%20GMT",
},
'arg3': {'value': "val3", 'path': "/"},
'arg4': {'value': "val4", 'path': "/wiki"},
'arg5': {'value': "val5", 'domain': ".example.org"},
'arg6': {'value': "val6", 'max-age': 3600},
'arg7': {
'value': "val7",
'expires': "Fri,%2031-Dec-2010%2023:59:59%20GMT",
'path': "/wiki",
'domain': ".example.org",
'max-age': 3600,
},
'arg8': {'value': "val8", 'max-age': 3600},
# Why the next item as disabled? Check the `headers` variable above
#'arg9': {'value': "val9", 'secure': True},
}
# Running the app:
app = MockApp("200 OK", headers)
django_response = call_wsgi_app(app, request, "/wiki")
# Checking the cookies:
eq_(len(expected_cookies), len(django_response.cookies))
# Finally, let's check each cookie:
for (cookie_set_name, cookie_set) in django_response.cookies.items():
expected_cookie = expected_cookies[cookie_set_name]
expected_cookie_value = expected_cookie.pop("value")
eq_(expected_cookie_value, cookie_set.value,
'Cookie "%s" has a wrong value ("%s")' %
(cookie_set_name, cookie_set.value))
for (attr_key, attr_val) in expected_cookie.items():
eq_(
cookie_set[attr_key],
attr_val,
'Attribute "%s" in cookie %r is wrong (%r)' %
(attr_key, cookie_set_name, cookie_set[attr_key]),
)
0
Example 74
Project: fedbadges Source File: test_complicated_trigger.py
@patch('datanommer.models.Message.grep')
@patch('tahrir_api.dbapi.TahrirDatabase.get_person')
@patch('tahrir_api.dbapi.TahrirDatabase.assertion_exists')
def test_complicated_trigger_against_full_match(self,
assertion_exists,
get_person,
grep,
):
msg = {
'i': 2,
'msg': {
'tag': {
'dislike': 0,
'like': 1,
'package': 'mattd',
'tag': 'awesome',
'total': 1,
'votes': 1},
'user': {
'anonymous': False,
'rank': -1,
'username': 'ralph',
'votes': 4},
'vote': {
'like': True,
'tag': {
'dislike': 0,
'like': 1,
'package': 'mattd',
'tag': 'awesome',
'total': 1,
'votes': 1},
'user': {
'anonymous': False,
'rank': -1,
'username': 'ralph',
'votes': 4}}},
'timestamp': 1365444411.924043,
'topic': 'org.fedoraproject.prod.fedoratagger.tag.create',
'username': 'threebean'}
# Set up some mock stuff
class MockQuery(object):
def count(self):
return float("inf") # Master tagger
class MockPerson(object):
opt_out = False
grep.return_value = float("inf"), 1, MockQuery()
get_person.return_value = MockPerson()
assertion_exists.return_value = False
with patch("fedbadges.rules.user_exists_in_fas") as g:
g.return_value = True
eq_(self.rule.matches(msg), set(['ralph']))
0
Example 75
Project: skll Source File: test_utilities.py
def check_filter_features_no_arff_argparse(extension, filter_type,
label_col='y', id_col='id',
inverse=False, quiet=False):
"""
A utility function to check that we are setting up argument parsing
correctly for filter_features for ALL file types except ARFF.
We are not checking whether the results are correct because we
have separate tests for that.
"""
# replace the run_configuration function that's called
# by the main() in filter_feature with a mocked up version
reader_class = EXT_TO_READER[extension]
writer_class = EXT_TO_WRITER[extension]
# create some dummy input and output filenames
infile = 'foo{}'.format(extension)
outfile = 'bar{}'.format(extension)
# create a simple featureset with actual ids, labels and features
fs, _ = make_classification_data(num_labels=3, train_test_ratio=1.0)
ff_cmd_args = [infile, outfile]
if filter_type == 'feature':
if inverse:
features_to_keep = ['f01', 'f04', 'f07', 'f10']
else:
features_to_keep = ['f02', 'f03', 'f05', 'f06', 'f08', 'f09']
ff_cmd_args.append('-f')
for f in features_to_keep:
ff_cmd_args.append(f)
elif filter_type == 'id':
if inverse:
ids_to_keep = ['EXAMPLE_{}'.format(x) for x in range(1, 100, 2)]
else:
ids_to_keep = ['EXAMPLE_{}'.format(x) for x in range(2, 102, 2)]
ff_cmd_args.append('-I')
for idee in ids_to_keep:
ff_cmd_args.append(idee)
elif filter_type == 'label':
if inverse:
labels_to_keep = ['0', '1']
else:
labels_to_keep = ['2']
ff_cmd_args.append('-L')
for lbl in labels_to_keep:
ff_cmd_args.append(lbl)
ff_cmd_args.extend(['-l', label_col])
ff_cmd_args.extend(['--id_col', id_col])
if inverse:
ff_cmd_args.append('-i')
if quiet:
ff_cmd_args.append('-q')
# substitute mock methods for the three main methods that get called by
# filter_features: the __init__() method of the appropriate reader,
# FeatureSet.filter() and the __init__() method of the appropriate writer.
# We also need to mock the read() and write() methods to prevent actual
# reading and writing.
with patch.object(reader_class, '__init__', autospec=True,
return_value=None) as read_init_mock, \
patch.object(reader_class, 'read', autospec=True, return_value=fs),\
patch.object(FeatureSet, 'filter', autospec=True) as filter_mock, \
patch.object(writer_class, '__init__', autospec=True,
return_value=None) as write_init_mock, \
patch.object(writer_class, 'write', autospec=True):
ff.main(argv=ff_cmd_args)
# get the various arguments from the three mocked up methods
read_pos_arguments, read_kw_arguments = read_init_mock.call_args
filter_pos_arguments, filter_kw_arguments = filter_mock.call_args
write_pos_arguments, write_kw_arguments = write_init_mock.call_args
# make sure that the arguments they got were the ones we specified
eq_(read_pos_arguments[1], infile)
eq_(read_kw_arguments['quiet'], quiet)
eq_(read_kw_arguments['label_col'], label_col)
eq_(read_kw_arguments['id_col'], id_col)
eq_(write_pos_arguments[1], outfile)
eq_(write_kw_arguments['quiet'], quiet)
# Note that we cannot test the label_col column for the writer.
# The reason is that it is set conditionally and those conditions
# do not execute with mocking.
eq_(filter_pos_arguments[0], fs)
eq_(filter_kw_arguments['inverse'], inverse)
if filter_type == 'feature':
eq_(filter_kw_arguments['features'], features_to_keep)
elif filter_type == 'id':
eq_(filter_kw_arguments['ids'], ids_to_keep)
elif filter_type == 'label':
eq_(filter_kw_arguments['labels'], labels_to_keep)
0
Example 76
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
# fill DB with values
fill_db(Model1, Model2)
# Test string filter
view = CustomModelView(Model1, column_filters=['test1'])
admin.add_view(view)
eq_(len(view._filters), 7)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, 'contains'),
(1, 'not contains'),
(2, 'equals'),
(3, 'not equal'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# Make some test clients
client = app.test_client()
# string - equals
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not equal
rv = client.get('/admin/model1/?flt0_1=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - contains
rv = client.get('/admin/model1/?flt0_2=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not contains
rv = client.get('/admin/model1/?flt0_3=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - empty
rv = client.get('/admin/model1/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' in data)
ok_('test1_val_1' not in data)
ok_('test1_val_2' not in data)
# string - not empty
rv = client.get('/admin/model1/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' not in data)
ok_('test1_val_1' in data)
ok_('test1_val_2' in data)
# string - in list
rv = client.get('/admin/model1/?flt0_5=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test1_val_3' not in data)
ok_('test1_val_4' not in data)
# string - not in list
rv = client.get('/admin/model1/?flt0_6=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test1_val_3' in data)
ok_('test1_val_4' in data)
# Test numeric filter
view = CustomModelView(Model2, column_filters=['int_field'])
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# integer - equals (huge number)
rv = client.get('/admin/model2/?flt0_0=6169453081680413441')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_5' in data)
ok_('string_field_val_4' not in data)
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# integer - in list (huge number)
rv = client.get('/admin/model2/?flt0_5=6169453081680413441')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_5' in data)
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# Test boolean filter
view = CustomModelView(Model2, column_filters=['bool_field'],
endpoint="_bools")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Bool Field']],
[
(0, 'equals'),
(1, 'not equal'),
])
# boolean - equals - Yes
rv = client.get('/admin/_bools/?flt0_0=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' not in data)
#ok_('string_field_val_3' not in data)
# boolean - equals - No
rv = client.get('/admin/_bools/?flt0_0=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' in data)
#ok_('string_field_val_3' in data)
# boolean - not equals - Yes
rv = client.get('/admin/_bools/?flt0_1=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' in data)
#ok_('string_field_val_3' in data)
# boolean - not equals - No
rv = client.get('/admin/_bools/?flt0_1=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' not in data)
#ok_('string_field_val_3' not in data)
# Test float filter
view = CustomModelView(Model2, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# Test datetime filter
view = CustomModelView(Model1,
column_filters=['datetime_field'],
endpoint="_datetime")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
])
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - between
rv = client.get('/admin/_datetime/?flt0_4=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' not in data)
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_6=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('datetime_obj1' in data)
ok_('datetime_obj2' in data)
0
Example 77
Project: flask-admin Source File: test_fileadmin.py
def test_file_admin():
app, admin, view = create_view()
client = app.test_client()
# index
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
# edit
rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/edit/?path=dummy.txt', data=dict(
content='new_string'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
ok_('new_string' in rv.data.decode('utf-8'))
# rename
rv = client.get('/admin/myfileadmin/rename/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/rename/?path=dummy.txt', data=dict(
name='dummy_renamed.txt',
path='dummy.txt'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))
ok_('path=dummy.txt' not in rv.data.decode('utf-8'))
# upload
rv = client.get('/admin/myfileadmin/upload/')
eq_(rv.status_code, 200)
rv = client.post('/admin/myfileadmin/upload/', data=dict(
upload=(StringIO(""), 'dummy.txt'),
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))
# delete
rv = client.post('/admin/myfileadmin/delete/', data=dict(
path='dummy_renamed.txt'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed.txt' not in rv.data.decode('utf-8'))
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
# mkdir
rv = client.get('/admin/myfileadmin/mkdir/')
eq_(rv.status_code, 200)
rv = client.post('/admin/myfileadmin/mkdir/', data=dict(
name='dummy_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
ok_('path=dummy_dir' in rv.data.decode('utf-8'))
# rename - directory
rv = client.get('/admin/myfileadmin/rename/?path=dummy_dir')
eq_(rv.status_code, 200)
ok_('dummy_dir' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/rename/?path=dummy_dir', data=dict(
name='dummy_renamed_dir',
path='dummy_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed_dir' in rv.data.decode('utf-8'))
ok_('path=dummy_dir' not in rv.data.decode('utf-8'))
# delete - directory
rv = client.post('/admin/myfileadmin/delete/', data=dict(
path='dummy_renamed_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed_dir' not in rv.data.decode('utf-8'))
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
0
Example 78
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
db.create_all()
view = CustomModelView(Model1, db.session)
admin.add_view(view)
eq_(view.model, Model1)
eq_(view.name, 'Model1')
eq_(view.endpoint, 'model1view')
eq_(view._primary_key, 'id')
ok_('test1' in view._sortable_columns)
ok_('test2' in view._sortable_columns)
ok_('test3' in view._sortable_columns)
ok_('test4' in view._sortable_columns)
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view._search_supported, False)
eq_(view._filters, None)
# Verify form
eq_(view._create_form_class.test1.field_class, fields.TextField)
eq_(view._create_form_class.test2.field_class, fields.TextField)
eq_(view._create_form_class.test3.field_class, fields.TextAreaField)
eq_(view._create_form_class.test4.field_class, fields.TextAreaField)
# Make some test clients
client = app.test_client()
rv = client.get('/admin/model1view/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1view/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model1view/new/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, u'test1large')
eq_(model.test2, u'test2')
eq_(model.test3, u'')
eq_(model.test4, u'')
rv = client.get('/admin/model1view/')
eq_(rv.status_code, 200)
ok_(u'test1large' in rv.data.decode('utf-8'))
url = '/admin/model1view/edit/?id=%s' % model.id
rv = client.get(url)
eq_(rv.status_code, 200)
rv = client.post(url,
data=dict(test1='test1small', test2='test2large'))
eq_(rv.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, 'test1small')
eq_(model.test2, 'test2large')
eq_(model.test3, '')
eq_(model.test4, '')
url = '/admin/model1view/delete/?id=%s' % model.id
rv = client.post(url)
eq_(rv.status_code, 302)
eq_(db.session.query(Model1).count(), 0)
0
Example 79
def test_image_upload_field():
app = Flask(__name__)
path = _create_temp()
def _remove_testimages():
safe_delete(path, 'test1.png')
safe_delete(path, 'test1_thumb.jpg')
safe_delete(path, 'test2.png')
safe_delete(path, 'test2_thumb.jpg')
safe_delete(path, 'test1.jpg')
safe_delete(path, 'test1.jpeg')
safe_delete(path, 'test1.gif')
safe_delete(path, 'test1.png')
safe_delete(path, 'test1.tiff')
class TestForm(form.BaseForm):
upload = form.ImageUploadField('Upload',
base_path=path,
thumbnail_size=(100, 100, True))
class TestNoResizeForm(form.BaseForm):
upload = form.ImageUploadField('Upload', base_path=path, endpoint='test')
class TestAutoResizeForm(form.BaseForm):
upload = form.ImageUploadField('Upload',
base_path=path,
max_size=(64, 64, True))
class Dummy(object):
pass
my_form = TestForm()
eq_(my_form.upload.base_path, path)
eq_(my_form.upload.endpoint, 'static')
_remove_testimages()
dummy = Dummy()
# Check upload
filename = op.join(op.dirname(__file__), 'data', 'copyleft.png')
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.png')}):
my_form = TestForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.png')
ok_(op.exists(op.join(path, 'test1.png')))
ok_(op.exists(op.join(path, 'test1_thumb.png')))
# Check replace
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test2.png')}):
my_form = TestForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test2.png')
ok_(op.exists(op.join(path, 'test2.png')))
ok_(op.exists(op.join(path, 'test2_thumb.png')))
ok_(not op.exists(op.join(path, 'test1.png')))
ok_(not op.exists(op.join(path, 'test1_thumb.jpg')))
# Check delete
with app.test_request_context(method='POST', data={'_upload-delete': 'checked'}):
my_form = TestForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, None)
ok_(not op.exists(op.join(path, 'test2.png')))
ok_(not op.exists(op.join(path, 'test2_thumb.png')))
# Check upload no-resize
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.png')}):
my_form = TestNoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.png')
ok_(op.exists(op.join(path, 'test1.png')))
ok_(not op.exists(op.join(path, 'test1_thumb.png')))
# Check upload, auto-resize
filename = op.join(op.dirname(__file__), 'data', 'copyleft.png')
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.png')}):
my_form = TestAutoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.png')
ok_(op.exists(op.join(path, 'test1.png')))
filename = op.join(op.dirname(__file__), 'data', 'copyleft.tiff')
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'test1.tiff')}):
my_form = TestAutoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, 'test1.jpg')
ok_(op.exists(op.join(path, 'test1.jpg')))
# check allowed extensions
for extension in ('gif', 'jpg', 'jpeg', 'png', 'tiff'):
filename = 'copyleft.' + extension
filepath = op.join(op.dirname(__file__), 'data', filename)
with open(filepath, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, filename)}):
my_form = TestNoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
my_form.populate_obj(dummy)
eq_(dummy.upload, my_form.upload.data.filename)
# check case-sensitivity for extensions
filename = op.join(op.dirname(__file__), 'data', 'copyleft.jpg')
with open(filename, 'rb') as fp:
with app.test_request_context(method='POST', data={'upload': (fp, 'copyleft.JPG')}):
my_form = TestNoResizeForm(helpers.get_form_data())
ok_(my_form.validate())
0
Example 80
Project: editxt Source File: test_parser.py
def test_File():
from editxt.test.util import test_app
field = File('path')
eq_(str(field), 'path')
eq_(repr(field), "File('path')")
with test_app("project(/dir) editor") as app:
tmp = test_app(app).tmp
os.mkdir(join(tmp, "dir"))
os.mkdir(join(tmp, "space dir"))
for path in [
"dir/a.txt",
"dir/b.txt",
"dir/B file",
".hidden",
"file.txt",
"file.doc",
"space dir/file",
#"x y",
]:
assert not isabs(path), path
with open(join(tmp, path), "w") as fh:
pass
test = make_consume_checker(field)
yield test, "relative.txt", 0, Error("cannot make absolute path (no context): relative.txt")
test = make_completions_checker(field)
yield test, "", []
editor = app.windows[0].projects[0].editors[0]
field = field.with_context(editor)
test = make_completions_checker(field)
yield test, ".../", ["a.txt", "B file", "b.txt"], 4
with replattr(editor.project, "path", editor.project.path + "/"):
yield test, ".../", ["a.txt", "B file", "b.txt"], 4
yield test, "...//", ["a.txt", "B file", "b.txt"], 5
test = make_arg_string_checker(field)
yield test, "/str", "/str"
yield test, "/a b", '"/a b"', 6
yield test, os.path.expanduser("~/a b"), '"~/a b"', 7
yield test, join(tmp, "dir/file"), "file"
yield test, join(tmp, "dir/a b"), '"a b"', 5
yield test, join(tmp, "file"), join(tmp, "file")
yield test, "arg/", Error("not a file: path='arg/'")
test = make_consume_checker(field)
yield test, '', 0, (None, 0)
yield test, 'a', 0, (join(tmp, 'dir/a'), 2)
yield test, 'abc', 0, (join(tmp, 'dir/abc'), 4)
yield test, 'abc ', 0, (join(tmp, 'dir/abc'), 4)
yield test, 'file.txt', 0, (join(tmp, 'dir/file.txt'), 9)
yield test, '../file.txt', 0, (join(tmp, 'dir/../file.txt'), 12)
yield test, '/file.txt', 0, ('/file.txt', 10)
yield test, '~/file.txt', 0, (os.path.expanduser('~/file.txt'), 11)
yield test, '...', 0, (join(tmp, 'dir'), 4)
yield test, '.../file.txt', 0, (join(tmp, 'dir/file.txt'), 13)
yield test, '"ab c"', 0, (join(tmp, 'dir/ab c'), 6)
yield test, "'ab c'", 0, (join(tmp, 'dir/ab c'), 6)
yield test, "'ab c/'", 0, (join(tmp, 'dir/ab c/'), 7)
# completions
def expanduser(path):
if path.startswith("~"):
if len(path) == 1:
return tmp
assert path.startswith("~/"), path
return tmp + path[1:]
return path
def test(input, output):
if input.startswith("/"):
input = tmp + "/"
with replattr(os.path, "expanduser", expanduser):
arg = mod.Arg(field, input, 0, None)
eq_(field.get_completions(arg), output)
yield test, "", ["a.txt", "B file", "b.txt"]
yield test, "a", ["a.txt"]
yield test, "a.txt", ["a.txt"]
yield test, "b", ["B file", "b.txt"]
yield test, "B", ["B file"]
yield test, "..", ["../"]
yield test, "../", ["dir", "file.doc", "file.txt", "space dir"]
yield test, "../.", [".hidden"]
yield test, "...", [".../"]
yield test, ".../", ["a.txt", "B file", "b.txt"]
yield test, "../dir", ["dir/"]
yield test, "../dir/", ["a.txt", "B file", "b.txt"]
yield test, "../sp", ["space dir"]
yield test, "../space\\ d", ["space dir"]
yield test, "../space\\ dir", ["space dir/"]
yield test, "../space\\ dir/", ["file"]
yield test, "val", []
yield test, "/", ["dir", "file.doc", "file.txt", "space dir"]
yield test, "~", ["~/"]
yield test, "~/", ["dir", "file.doc", "file.txt", "space dir"]
# delimiter completion
def test(input, output, start=0):
arg = mod.Arg(field, input, 0, None)
words = field.get_completions(arg)
assert all(isinstance(w, CompleteWord) for w in words), \
repr([w for w in words if not isinstance(w, CompleteWord)])
eq_([w.complete() for w in words], output)
eq_([w.start for w in words], [start] * len(words), words)
yield test, "", ["a.txt ", "B\\ file ", "b.txt "]
yield test, "x", []
yield test, "..", ["../"]
yield test, "../", ["dir/", "file.doc ", "file.txt ", "space\\ dir/"], 3
yield test, "../dir", ["dir/"], 3
yield test, "../di", ["dir/"], 3
yield test, "../sp", ["space\\ dir/"], 3
yield test, "../space\\ d", ["space\\ dir/"], 3
yield test, "../space\\ dir", ["space\\ dir/"], 3
yield test, ".../", ["a.txt ", "B\\ file ", "b.txt "], 4
yield test, "../space\\ dir/", ["file "], 14
yield test, "~", ["~/"], None
field = File('dir', directory=True)
eq_(str(field), 'dir')
eq_(repr(field), "File('dir', directory=True)")
field = field.with_context(editor)
test = make_consume_checker(field)
yield test, '', 0, (None, 0)
yield test, 'a', 0, (join(tmp, 'dir/a'), 2)
yield test, 'abc', 0, (join(tmp, 'dir/abc'), 4)
yield test, 'abc ', 0, (join(tmp, 'dir/abc'), 4)
yield test, 'abc/', 0, (join(tmp, 'dir/abc/'), 5)
yield test, '...', 0, (join(tmp, 'dir'), 4)
yield test, '.../abc/', 0, (join(tmp, 'dir/abc/'), 9)
test = make_completions_checker(field)
yield test, "", [], 0
yield test, "a", [], 0
yield test, "..", ["../"], 0
yield test, "../", ["dir", "space dir"], 3
field = File('dir', default="~/dir")
check = make_completions_checker(field)
def test(input, output, *args):
if input.startswith("/"):
input = tmp + "/"
with replattr(os.path, "expanduser", expanduser):
check(input, output, *args)
yield test, "", [], 0
project = app.windows[0].projects[0]
check = make_completions_checker(field.with_context(project))
yield test, "", ["a.txt", "B file", "b.txt"], 0
yield test, "./", ["a.txt", "B file", "b.txt"], 2
with replattr(project, "path", project.path + "/"):
check = make_completions_checker(field.with_context(project))
yield test, "", ["a.txt", "B file", "b.txt"], 0
yield test, "./", ["a.txt", "B file", "b.txt"], 2
0
Example 81
Project: bang Source File: test_util.py
def test_deep_merge_dicts():
a = {
'a': 1,
'b': 2,
'c': 3,
'd': {
'd1': 'one',
'd2': 'two',
'd3': 'three',
},
'e': {
'eA': 'ay',
'eB': {
'eB1': 'uno',
'eB2': 'dos',
'eB3': {
'eB3a': 'ah',
'eB3b': 'bay',
'eB3f': 'same',
},
},
}
}
b = {
'c': 42,
'd': {
'd1': 'new',
'd3': ['replace', 'the', 'scalar'],
'd4': 4,
},
'e': {
'eB': {
'eB3': {
'eB3a': 'alpha',
'eB3e': 'echo',
'eB3f': 'same',
},
},
},
'z': 26,
}
exp = {
'a': 1,
'b': 2,
'c': 42,
'd': {
'd1': 'new',
'd2': 'two',
'd3': ['replace', 'the', 'scalar'],
'd4': 4,
},
'e': {
'eA': 'ay',
'eB': {
'eB1': 'uno',
'eB2': 'dos',
'eB3': {
'eB3a': 'alpha',
'eB3b': 'bay',
'eB3e': 'echo',
'eB3f': 'same',
},
},
},
'z': 26,
}
U.deep_merge_dicts(a, b)
T.eq_(exp, a)
0
Example 82
def test_inline_form():
app, db, admin = setup()
client = app.test_client()
# Set up models and database
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
def __init__(self, name=None):
self.name = name
class UserInfo(db.Model):
__tablename__ = 'user_info'
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.String, nullable=False)
val = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User, backref=db.backref('info', cascade="all, delete-orphan", single_parent=True))
db.create_all()
# Set up Admin
class UserModelView(ModelView):
inline_models = (UserInfo,)
view = UserModelView(User, db.session)
admin.add_view(view)
# Basic tests
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view.endpoint, 'userview')
# Verify form
eq_(view._create_form_class.name.field_class, fields.TextField)
eq_(view._create_form_class.info.field_class, InlineModelFormList)
rv = client.get('/admin/userview/')
eq_(rv.status_code, 200)
rv = client.get('/admin/userview/new/')
eq_(rv.status_code, 200)
# Create
rv = client.post('/admin/userview/new/', data=dict(name=u'äõüxyz'))
eq_(rv.status_code, 302)
eq_(User.query.count(), 1)
eq_(UserInfo.query.count(), 0)
rv = client.post('/admin/userview/new/', data={'name': u'fbar', \
'info-0-key': 'foo', 'info-0-val' : 'bar'})
eq_(rv.status_code, 302)
eq_(User.query.count(), 2)
eq_(UserInfo.query.count(), 1)
# Edit
rv = client.get('/admin/userview/edit/?id=2')
eq_(rv.status_code, 200)
# Edit - update
rv = client.post('/admin/userview/edit/?id=2', data={'name': u'barfoo', \
'info-0-id': 1, 'info-0-key': u'xxx', 'info-0-val':u'yyy'})
eq_(UserInfo.query.count(), 1)
eq_(UserInfo.query.one().key, u'xxx')
# Edit - add & delete
rv = client.post('/admin/userview/edit/?id=2', data={'name': u'barf', \
'del-info-0': 'on', 'info-0-id': '1', 'info-0-key': 'yyy', 'info-0-val': 'xxx',
'info-1-id': None, 'info-1-key': u'bar', 'info-1-val' : u'foo'})
eq_(rv.status_code, 302)
eq_(User.query.count(), 2)
eq_(User.query.get(2).name, u'barf')
eq_(UserInfo.query.count(), 1)
eq_(UserInfo.query.one().key, u'bar')
# Delete
rv = client.post('/admin/userview/delete/?id=2')
eq_(rv.status_code, 302)
eq_(User.query.count(), 1)
rv = client.post('/admin/userview/delete/?id=1')
eq_(rv.status_code, 302)
eq_(User.query.count(), 0)
eq_(UserInfo.query.count(), 0)
0
Example 83
Project: vcfnp Source File: test_table.py
def test_tabulate_variants_flatten_ann():
vcf_fn = 'fixture/test_ann.vcf'
# test without flattening
fields = ('CHROM', 'POS', 'REF', 'ALT', 'ANN')
tbl = list(VariantsTable(vcf_fn, fields=fields, flatteners={'ANN': None}))
debug(tbl)
eq_(fields, tbl[0])
eq_(('2L', 103, 'C', 'T',
'T|intergenic_region|MODIFIER|AGAP004677|AGAP004677|intergenic_region'
'|AGAP004677|||||||||'),
tbl[1])
eq_(('2L', 192, 'G', 'A', '.'), tbl[2])
eq_(('2L', 13513722, 'A', 'T',
'T|missense_variant|MODERATE|AGAP005273|AGAP005273|transcript|AGAP005'
'273-RA|VectorBase|1/4|n.17A>T|p.Asp6Val|17/4788|17/-1|6/-1||'),
tbl[3])
# test with explicit flattening
fields = ('CHROM', 'POS', 'REF', 'ALT', 'ANN')
tbl = list(
VariantsTable(
vcf_fn,
fields=fields,
flatteners={'ANN': (vcfnp.eff.ANN_FIELDS,
vcfnp.eff.flatten_ann('NA'))}
)
)
debug(tbl)
eq_(fields[:4] + ('Allele', 'Annotation', 'Annotation_Impact', 'Gene_Name',
'Gene_ID', 'Feature_Type', 'Feature_ID',
'Transcript_BioType', 'Rank', 'HGVS_c', 'HGVS_p',
'cDNA_pos', 'cDNA_length', 'CDS_pos', 'CDS_length',
'AA_pos', 'AA_length', 'Distance'),
tbl[0])
eq_(('2L', 103, 'C', 'T', 'T', 'intergenic_region', 'MODIFIER',
'AGAP004677', 'AGAP004677', 'intergenic_region', 'AGAP004677', 'NA',
'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA'),
tbl[1])
eq_(('2L', 192, 'G', 'A', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA',
'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA'), tbl[2])
eq_(('2L', 13513722, 'A', 'T', 'T', 'missense_variant', 'MODERATE',
'AGAP005273', 'AGAP005273', 'transcript', 'AGAP005273-RA',
'VectorBase', '1/4', 'n.17A>T', 'p.Asp6Val', '17', '4788', '17', '-1',
'6', '-1', 'NA'), tbl[3])
# test with default flattening
fields = ('CHROM', 'POS', 'REF', 'ALT', 'ANN')
tbl = list(VariantsTable(vcf_fn, fields=fields, fill=None))
debug(tbl)
eq_(fields[:4] + ('Allele', 'Annotation', 'Annotation_Impact', 'Gene_Name',
'Gene_ID', 'Feature_Type', 'Feature_ID',
'Transcript_BioType', 'Rank', 'HGVS_c', 'HGVS_p',
'cDNA_pos', 'cDNA_length', 'CDS_pos', 'CDS_length',
'AA_pos', 'AA_length', 'Distance'),
tbl[0])
eq_(('2L', 103, 'C', 'T', 'T', 'intergenic_region', 'MODIFIER',
'AGAP004677', 'AGAP004677', 'intergenic_region', 'AGAP004677', None,
None, None, None, None, None, None, None, None, None, None),
tbl[1])
eq_(('2L', 192, 'G', 'A', None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None), tbl[2])
eq_(('2L', 13513722, 'A', 'T', 'T', 'missense_variant', 'MODERATE',
'AGAP005273', 'AGAP005273', 'transcript', 'AGAP005273-RA',
'VectorBase', '1/4', 'n.17A>T', 'p.Asp6Val', '17', '4788', '17', '-1',
'6', '-1', None), tbl[3])
0
Example 84
Project: spinoff Source File: actor_test.py
def test_absolute_uri():
#
uri = Uri.parse('')
eq_(str(uri), '')
eq_(uri.name, '')
eq_(uri.path, '')
ok_(not uri.url)
eq_(uri, Uri.parse(''))
eq_(uri, '')
ok_(not uri.parent)
ok_(not uri.node)
eq_(uri.root, uri)
eq_(list(uri.steps), [''])
ok_(uri.local is uri)
#
uri = Uri.parse('/foo')
eq_(str(uri), '/foo')
eq_(uri.name, 'foo')
eq_(uri.path, '/foo')
ok_(not uri.url)
eq_(uri, Uri.parse('/foo'))
eq_(uri, '/foo', "Uri.__eq__ supports str")
ok_(uri.parent == '')
ok_(not uri.node)
eq_(uri.root, '')
eq_(list(uri.steps), ['', 'foo'])
ok_(uri.local is uri)
#
uri = Uri.parse('/foo/bar')
eq_(uri.name, 'bar')
eq_(uri, '/foo/bar', "Uri.__eq__ supports str")
eq_(uri.path, '/foo/bar')
ok_(not uri.url)
eq_(uri, Uri.parse('/foo/bar'))
eq_(uri, '/foo/bar', "Uri.__eq__ supports str")
eq_(uri.parent, '/foo')
ok_(not uri.node)
eq_(uri.root, '')
eq_(list(uri.steps), ['', 'foo', 'bar'])
ok_(uri.local is uri)
0
Example 85
Project: varcode Source File: test_variant_collection.py
def test_variant_collection_serialization():
variant_list = [
Variant(
1, start=10, ref="AA", alt="AAT", ensembl=77),
Variant(10, start=15, ref="A", alt="G"),
Variant(20, start=150, ref="", alt="G"),
]
original = VariantCollection(
variant_list,
source_to_metadata_dict={
"test_data":
{variant: {"a": "b", "bar": 2} for variant in variant_list}})
# This causes the variants' ensembl objects to make a SQL connection,
# which makes the ensembl object non-serializable. By calling this
# method, we are checking that we don't attempt to directly serialize
# the ensembl object.
original.effects()
original_first_variant = original[0]
original_metadata = original.metadata
# Test pickling
reconstructed = pickle.loads(pickle.dumps(original))
eq_(original, reconstructed)
eq_(reconstructed[0], original_first_variant)
eq_(reconstructed.metadata[original_first_variant],
original_metadata[original_first_variant])
merged = original.intersection(original)
merged_reconstructed = pickle.loads(pickle.dumps(merged))
eq_(merged, merged_reconstructed)
# Test JSON serialization
variants_from_json = VariantCollection.from_json(original.to_json())
eq_(original, variants_from_json)
eq_(variants_from_json[0], original_first_variant)
# pylint: disable=no-member
eq_(variants_from_json.metadata[original_first_variant],
original_metadata[original_first_variant])
0
Example 86
def test_port(self):
"""Test peep port."""
# We can't get the package name from URL-based requirements before pip
# 1.0. Tolerate it so we can at least test everything else:
try:
activate('pip>=6.1.0')
except RuntimeError:
try:
activate('pip>=1.0.1')
except RuntimeError:
schema_package_name = 'None'
else:
schema_package_name = 'schema'
else:
schema_package_name = 'https://github.com/erikrose/schema/archive/99dc4130f0f05fd3c2d4bc6663a2419851f3c90f.zip#egg=schema'
reqs = """
# sha256: Jo-gDCfedW1xZj3WH3OkqNhydWm7G0dLLOYCBVOCaHI
# sha256: mXhebPcVzc3lne4FpnbpnwSDWnHnztIByjF0AcMiupY
certifi==2015.04.28
# Those were 2 hashes!
# A comment above hash
# sha256: mrHTE_mbIJ-PcaYp82gzAwyNfHIoLPd1aDS69WfcpmI
# A comment between hash and package
click==4.0
# No hash:
configobj==5.0.6
# sha256: mEFMy7mQkCOXKZaP5CJaMXPj8OwbteW9dmq9drwZqNk
https://github.com/erikrose/schema/archive/99dc4130f0f05fd3c2d4bc6663a2419851f3c90f.zip#egg=schema
"""
with requirements(reqs) as reqs_path:
result = run('{python} {peep} port {reqs}',
python=python_path(),
peep=peep_path(),
reqs=reqs_path).decode('ascii')
expected = (
'\n# from {reqs_path}\n\n'
'certifi==2015.04.28 \\\n'
' --hash=sha256:268fa00c27de756d71663dd61f73a4a8d8727569bb1b474b2ce6020553826872 \\\n'
' --hash=sha256:99785e6cf715cdcde59dee05a676e99f04835a71e7ced201ca317401c322ba96\n'
'click==4.0 \\\n'
' --hash=sha256:9ab1d313f99b209f8f71a629f36833030c8d7c72282cf7756834baf567dca662\n'
'configobj==5.0.6\n'
'{schema} \\\n'
' --hash=sha256:98414ccbb99090239729968fe4225a3173e3f0ec1bb5e5bd766abd76bc19a8d9\n'
''.format(schema=schema_package_name, reqs_path=reqs_path))
eq_(result, expected)
0
Example 87
Project: varlens Source File: test_variants.py
def test_read_evidence():
result = run([
data_path("CELSR1/vcfs/vcf_1.vcf"),
"--include-read-evidence",
"--reads", data_path("CELSR1/bams/bam_0.bam"),
"--genome", "b37",
])
allele_groups = ["num_ref", "num_alt", "total_depth"]
for allele_group in allele_groups:
result[allele_group] = result[allele_group].astype(int)
eq_(cols_concat(
result,
["contig", "interbase_start"] + allele_groups),
{
'22-50636217-0-0-0',
'22-50875932-0-0-0',
'22-21829554-0-0-0',
"22-46931059-50-0-50",
"22-46931061-51-0-51",
})
# Same thing but with chunk rows = 1
with temp_file(".csv") as out_csv:
run([
data_path("CELSR1/vcfs/vcf_1.vcf"),
"--include-read-evidence",
"--reads", data_path("CELSR1/bams/bam_0.bam"),
"--genome", "b37",
"--chunk-rows", "1",
"--out", out_csv,
])
result = pandas.read_csv(out_csv)
allele_groups = ["num_ref", "num_alt", "total_depth"]
for allele_group in allele_groups:
result[allele_group] = result[allele_group].astype(int)
eq_(cols_concat(
result,
["contig", "interbase_start"] + allele_groups),
{
'22-50636217-0-0-0',
'22-50875932-0-0-0',
'22-21829554-0-0-0',
"22-46931059-50-0-50",
"22-46931061-51-0-51",
})
result = run([
"--include-read-evidence",
"--reads", data_path("gatk_mini_bundle_extract.bam"),
"--read-source-name", "foo",
"--single-variant", "chr20:10008951", "C", "A",
"--genome", "b37",
])
for allele_group in allele_groups:
result[allele_group] = result[allele_group].astype(int)
eq_(cols_concat(result, expected_cols + allele_groups),
{"GRCh37-20-10008950-10008951-C-A-4-1-5"})
result = run([
"--include-read-evidence",
"--reads", data_path("gatk_mini_bundle_extract.bam"),
"--read-source-name", "foo",
"--single-variant", "chr20:10008951", "C", "A",
"--genome", "b37",
"--is-reverse",
])
for allele_group in allele_groups:
result[allele_group] = result[allele_group].astype(int)
eq_(cols_concat(result, expected_cols + allele_groups),
{"GRCh37-20-10008950-10008951-C-A-1-0-1"})
0
Example 88
Project: editxt Source File: test_config.py
def test_Config_schema():
eq_(configify({"indent.mode": "xyz"}), {"indent": {"mode": "xyz"}})
def test(data, key, value, errors={}, stop=[1]):
config = mod.Config("/tmp/missing.3216546841325465132546514321654")
config.data = configify(data)
config.transform_deprecations()
with CaptureLog(mod) as log:
if isinstance(value, Exception):
with assert_raises(type(value), msg=str(value)):
config[key]
else:
eq_(config[key], value)
eq_(dict(log.data), errors)
yield test, {}, "unknown", KeyError("unknown")
yield test, {}, "unknown.sub", KeyError("unknown.sub")
yield test, {}, "theme.highlight_selected_text.enabled", True
yield test, {"theme.highlight_selected_text": {}}, \
"theme.highlight_selected_text.enabled", True
yield test, {"theme.highlight_selected_text": {"enabled": True}}, \
"theme.highlight_selected_text.enabled", True
yield test, {"theme.highlight_selected_text": []}, \
"theme.highlight_selected_text.enabled", True, \
{"error": ["theme.highlight_selected_text: expected dict, got []"]}
yield test, {"theme.highlight_selected_text": {"enabled": "treu"}}, \
"theme.highlight_selected_text.enabled", True, \
{"error": ["theme.highlight_selected_text.enabled: expected boolean, got 'treu'"]}
yield test, {"theme.highlight_selected_text": True}, \
"theme.highlight_selected_text.enabled", True, \
{"error": ["theme.highlight_selected_text: expected dict, got True"]}
yield test, {}, "theme.highlight_selected_text.enabled.x", \
ValueError("theme.highlight_selected_text.enabled.x: "
"theme.highlight_selected_text.enabled is boolean, not a dict")
# deprecated settings should still work
yield test, {"highlight_selected_text": {"enabled": False}}, \
"theme.highlight_selected_text.enabled", False
yield test, {"highlight_selected_text": {"color": get_color("FFEEFF")}}, \
"theme.highlight_selected_text.color", get_color("FFEEFF")
yield test, {}, "indent.mode", const.INDENT_MODE_SPACE
yield test, {"indent": {"mode": "xyz"}}, \
"indent.mode", const.INDENT_MODE_SPACE, \
{"error": ["indent.mode: expected one of (space|tab), got 'xyz'"]}
yield test, {}, "indent.size", 4
yield test, {"indent.size": "two"}, "indent.size", 4, \
{"error": ["indent.size: expected integer, got 'two'"]}
yield test, {"indent.size": 0}, "indent.size", 4, \
{"error": ["indent.size: 0 is less than the minimum value (1)"]}
yield test, {}, "newline_mode", const.NEWLINE_MODE_UNIX
yield test, {"newline_mode": "xyz"}, \
"newline_mode", const.NEWLINE_MODE_UNIX, \
{"error": ["newline_mode: expected one of (LF|CR|CRLF|UNICODE), got 'xyz'"]}
yield test, {}, "theme.right_margin.position", const.DEFAULT_RIGHT_MARGIN
yield test, {"theme.right_margin": {"position": 42}}, "theme.right_margin.position", 42
yield test, {"theme.right_margin": {"position": "xyz"}}, \
"theme.right_margin.position", const.DEFAULT_RIGHT_MARGIN, \
{"error": ["theme.right_margin.position: expected integer, got 'xyz'"]}
# deprecated key should still work
yield test, {"right_margin": {"position": 42}}, "theme.right_margin.position", 42
yield test, {}, "theme.right_margin.line_color", get_color("E6E6E6")
yield test, {}, "theme.right_margin.margin_color", get_color("F7F7F7")
# deprecated key should still work
yield test, {"right_margin.line_color": get_color("eeeeee")}, \
"theme.right_margin.line_color", get_color("eeeeee")
yield test, {"right_margin.margin_color": get_color("eeeeee")}, \
"theme.right_margin.margin_color", get_color("eeeeee")
yield test, {}, "soft_wrap", const.WRAP_NONE
yield test, {"soft_wrap": "xyz"}, \
"soft_wrap", const.WRAP_NONE, \
{"error": ["soft_wrap: expected one of (none|word), got 'xyz'"]}
yield test, {}, "updates_path_on_file_move", True
yield test, {}, "diff_program", "opendiff"
yield test, {"diff_program": "gdiff -u"}, "diff_program", "gdiff -u"
0
Example 89
Project: jingo-minify Source File: tests.py
@patch('jingo_minify.helpers.time.time')
@patch('jingo_minify.helpers.os.path.getmtime')
def test_js_helper(getmtime, time):
"""
Given the js() tag if we return the assets that make up that bundle
as defined in settings.MINIFY_BUNDLES.
If we're not in debug mode, we just return a minified url
"""
getmtime.return_value = 1
time.return_value = 1
env = jingo.env
t = env.from_string("{{ js('common', debug=True) }}")
s = t.render()
expected = "\n".join(['<script src="%s?build=1"></script>'
% (settings.STATIC_URL + j) for j in
settings.MINIFY_BUNDLES['js']['common']])
eq_(s, expected)
t = env.from_string("{{ js('common', debug=False) }}")
s = t.render()
eq_(s, '<script src="%sjs/common-min.js?build=%s"></script>' %
(settings.STATIC_URL, BUILD_ID_JS))
t = env.from_string("{{ js('common_url', debug=True) }}")
s = t.render()
eq_(s, '<script src="%s"></script>' %
"http://example.com/test.js?build=1")
t = env.from_string("{{ js('common_url', debug=False) }}")
s = t.render()
eq_(s, '<script src="%sjs/common_url-min.js?build=%s"></script>' %
(settings.STATIC_URL, BUILD_ID_JS))
t = env.from_string("{{ js('common_protocol_less_url', debug=True) }}")
s = t.render()
eq_(s, '<script src="%s"></script>' %
"//example.com/test.js?build=1")
t = env.from_string("{{ js('common_protocol_less_url', debug=False) }}")
s = t.render()
eq_(s, '<script src="%sjs/common_protocol_less_url-min.js?build=%s">'
'</script>' % (settings.STATIC_URL, BUILD_ID_JS))
t = env.from_string("{{ js('common_bundle', debug=True) }}")
s = t.render()
eq_(s, '<script src="js/test.js?build=1"></script>\n'
'<script src="http://example.com/test.js?build=1"></script>\n'
'<script src="//example.com/test.js?build=1"></script>\n'
'<script src="https://example.com/test.js?build=1"></script>')
t = env.from_string("{{ js('common_bundle', debug=False) }}")
s = t.render()
eq_(s, '<script src="%sjs/common_bundle-min.js?build=%s"></script>' %
(settings.STATIC_URL, BUILD_ID_JS))
0
Example 90
@logged_in
@with_settings(aws={'bucket': 'humptydump'})
def test_edit_tags(self):
session = Client().session()
pic = Image(filename="silly", title="Silly Picture")
session.add(pic)
session.flush()
pic.add_tags(['goofy', 'silly'])
session.flush()
self.visit_url('/image/{0}'.format(pic.image_id))
tag_button = self.browser.find_by_id('tag-button')
assert tag_button, "Couldn't find a button for listing tags!"
tag_button.click()
tags = self.browser.find_by_css('li.tag')
assert all([t.visible for t in tags]), "Tag listing was not visible!"
eq_([t.text for t in tags], ['goofy', 'silly'])
self.browser.click_link_by_text('Edit')
assert all([not t.visible for t in tags]), "Tag listing didn't disappear!"
tag_removes = self.browser.find_by_css('a.remove-tag')
eq_([t.text for t in tag_removes], ['goofy', 'silly'])
assert all([t.visible for t in tag_removes]), "Remove tag controls weren't visible!"
add_tag = self.browser.find_by_css('a.add-tag')
eq_(add_tag.text, 'Add tag')
assert add_tag.visible, "Add tag control wasn't visible!"
tag_removes[0].click()
eq_(list(pic.get_tags()), ['silly'])
tag_removes = self.browser.find_by_css('a.remove-tag')
eq_([t.text for t in tag_removes], ['silly'])
self.browser.click_link_by_text('Stop Editing')
tag_button.click()
tags = self.browser.find_by_css('li.tag')
assert all([t.visible for t in tags]), "Tag listing was not visible!"
eq_([t.text for t in tags], ['silly'])
self.browser.click_link_by_text('Edit')
add_tag.click()
focused_input = self.browser.find_by_css('input:focus').first
tag_input = self.browser.find_by_id('tag').first
eq_(focused_input['id'], 'tag', "Add-tag input wasn't automatically focused!")
tag_input.fill('funny\n')
tag_removes = self.browser.find_by_css('a.remove-tag')
eq_([t.text for t in tag_removes], ['silly', 'funny'])
eq_(list(pic.get_tags()), ['silly', 'funny'])
self.browser.click_link_by_text('Stop Editing')
tag_button.click()
tags = self.browser.find_by_css('li.tag')
assert all([t.visible for t in tags]), "Tag listing was not visible!"
eq_([t.text for t in tags], ['silly', 'funny'])
self.browser.click_link_by_text('Edit')
tag_removes[1].click()
eq_(list(pic.get_tags()), ['silly'])
0
Example 91
Project: python-ipmi Source File: test_sdr.py
def test_sdrfullsensorrecord_linearization():
sdr = SdrFullSensorRecord(None)
# linear
sdr.linearization = 0
eq_(sdr.l(1), 1)
eq_(sdr.l(10), 10)
# ln
sdr.linearization = 1
eq_(sdr.l(1), 0)
# log
sdr.linearization = 2
eq_(sdr.l(10), 1)
eq_(sdr.l(100), 2)
# log
sdr.linearization = 3
eq_(sdr.l(8), 3)
eq_(sdr.l(16), 4)
# e
sdr.linearization = 4
eq_(sdr.l(1), 2.718281828459045)
# exp10
sdr.linearization = 5
eq_(sdr.l(1), 10)
eq_(sdr.l(2), 100)
# exp2
sdr.linearization = 6
eq_(sdr.l(3), 8)
eq_(sdr.l(4), 16)
# 1/x
sdr.linearization = 7
eq_(sdr.l(2), 0.5)
eq_(sdr.l(4), 0.25)
# sqr
sdr.linearization = 8
eq_(sdr.l(2), 4)
# cube
sdr.linearization = 9
eq_(sdr.l(2), 8)
eq_(sdr.l(3), 27)
# sqrt
sdr.linearization = 10
eq_(sdr.l(16), 4)
# cubert
sdr.linearization = 11
eq_(sdr.l(8), 2)
eq_(sdr.l(27), 3)
0
Example 92
Project: drest Source File: resource_tests.py
def test_tastypie_patch_list(self):
api = drest.api.TastyPieAPI(MOCKAPI)
api.auth(user='john.doe', api_key='JOHNDOE_API_KEY')
# Test Creating:
new_project1 = dict(
update_date='2013-02-27T21:07:26.403343',
create_date='2013-02-27T21:07:26.403323',
label='NewProject1'
)
new_project2 = dict(
update_date='2013-02-27T21:07:27.403343',
create_date='2013-02-27T21:07:27.403323',
label='NewProject2'
)
response = api.projects.patch_list([new_project1, new_project2])
eq_(response.status, 202)
projects = api.projects.get().data['objects']
labels = [p['label'] for p in projects]
res = new_project1['label'] in labels
ok_(res)
res = new_project2['label'] in labels
ok_(res)
new_labels = ['NewProject1', 'NewProject2']
new_uris = [p['resource_uri'] for p in projects \
if p['label'] in new_labels]
# Test Deleting:
response = api.projects.patch_list([], new_uris)
eq_(response.status, 202)
projects = api.projects.get().data['objects']
labels = [p['label'] for p in projects]
res = 'NewProject1' not in labels
ok_(res)
res = 'NewProject2' not in labels
ok_(res)
0
Example 93
Project: jstestnet Source File: tests.py
def test_work(self):
user_agent = ('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; '
'en-US; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12')
worker = Worker()
worker.save()
ts = self.suite()
token = Token.create(ts)
# No work to fetch.
r = self.client.post(reverse('work.query'),
dict(worker_id=worker.id, user_agent=user_agent))
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['desc'], 'No commands from server.')
# Simulate Hudson requesting a job:
r = self.client.post(reverse('system.start_tests'),
data={'browsers': 'firefox', 'token': token,
'name': ts.slug})
eq_(r.status_code, 200)
# Do work
r = self.client.post(reverse('work.query'),
dict(worker_id=worker.id, user_agent=user_agent))
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['cmd'], 'run_test')
eq_(data['args'][0]['url'], ts.default_url)
eq_(data['args'][0]['name'], ts.name)
work_queue_id = data['args'][0]['work_queue_id']
queue = WorkQueue.objects.get(pk=work_queue_id)
eq_(queue.worker.id, worker.id)
eq_(queue.finished, False)
eq_(queue.results, None)
eq_(queue.results_received, False)
eq_(queue.worker.last_heartbeat.timetuple()[0:3],
datetime.now().timetuple()[0:3])
eq_(queue.worker.user_agent, user_agent)
eq_(sorted([(e.engine, e.version) for e in
queue.worker.engines.all()]),
sorted(parse_useragent(user_agent)))
results = {
'failures': 0,
'total': 1,
'tests': [{'test': 'foo',
'message': '1 equals 2',
'module': 'some module',
'result': True}]
}
r = self.client.post(reverse('work.submit_results'),
dict(work_queue_id=queue.id,
results=json.dumps(results)))
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['desc'], 'Test result received')
# Refresh from db...
queue = WorkQueue.objects.get(pk=queue.id)
eq_(queue.finished, True)
eq_(queue.results, json.dumps(results))
eq_(queue.results_received, False)
# Cannot fetch more work.
r = self.client.post(reverse('work.query'),
dict(worker_id=worker.id, user_agent=user_agent))
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['desc'], 'No commands from server.')
0
Example 94
Project: flask_injector Source File: flask_injector_tests.py
def test_memory_leak():
# The RequestScope holds references to GreenThread objects which would
# cause memory leak
# More explanation below
#
# In Werkzeug locals are indexed using values returned by ``get_ident`` function:
#
# try:
# from greenlet import getcurrent as get_ident
# except ImportError:
# try:
# from thread import get_ident
# except ImportError:
# from _thread import get_ident
#
# This is what LocalManager.cleanup runs indirectly (__ident_func__
# points to get_ident unless it's overridden):
#
# self.__storage__.pop(self.__ident_func__(), None)
# If something's assigned in local storage *after* the cleanup is done an entry
# in internal storage under "the return value of get_ident()" key is recreated
# and a reference to the key will be kept forever.
#
# This is not strictly related to Eventlet/GreenThreads but that's how
# the issue manifested itself so the test reflects that.
app = Flask(__name__)
FlaskInjector(app)
@app.route('/')
def index():
return 'test'
def get_request():
with app.test_client() as c:
c.get('/')
green_thread = greenthread.spawn(get_request)
green_thread.wait()
# Delete green_thread so the GreenThread object is dereferenced
del green_thread
# Force run garbage collect to make sure GreenThread object is collected if
# there is no memory leak
gc.collect()
greenthread_count = len([
obj for obj in gc.get_objects()
if type(obj) is greenthread.GreenThread])
eq_(greenthread_count, 0)
0
Example 95
Project: skll Source File: test_featureset.py
def test_equality():
"""
Test featureset equality
"""
# create a featureset
fs1, _ = make_classification_data(num_examples=100,
num_features=4,
num_labels=3,
train_test_ratio=1.0)
# create a featureset with a different set but same number
# of features and everything else the same
fs2, _ = make_classification_data(num_examples=100,
num_features=4,
num_labels=3,
train_test_ratio=1.0)
fs2.features *= 2
# create a featureset with different feature names
# and everything else the same
fs3, _ = make_classification_data(num_examples=100,
num_features=4,
num_labels=3,
feature_prefix='g',
train_test_ratio=1.0)
# create a featureset with a different set of labels
# and everything else the same
fs4, _ = make_classification_data(num_examples=100,
num_features=4,
num_labels=2,
train_test_ratio=1.0)
# create a featureset with a different set but same number
# of IDs and everything else the same
fs5, _ = make_classification_data(num_examples=100,
num_features=4,
num_labels=3,
train_test_ratio=1.0)
fs5.ids = np.array(['A' + i for i in fs2.ids])
# create a featureset with a different vectorizer
# and everything else the same
fs6, _ = make_classification_data(num_examples=100,
num_features=4,
num_labels=3,
train_test_ratio=1.0,
use_feature_hashing=True,
feature_bins=2)
# create a featureset with a different number of features
# and everything else the same
fs7, _ = make_classification_data(num_examples=100,
num_features=5,
num_labels=3,
train_test_ratio=1.0)
# create a featureset with a different number of examples
# and everything else the same
fs8, _ = make_classification_data(num_examples=200,
num_features=4,
num_labels=3,
train_test_ratio=1.0)
# create a featureset with a different vectorizer instance
# and everything else the same
fs9, _ = make_classification_data(num_examples=100,
num_features=4,
num_labels=3,
train_test_ratio=1.0)
# now check for the expected equalities
assert_not_equal(fs1, fs2)
assert_not_equal(fs1, fs3)
assert_not_equal(fs1, fs4)
assert_not_equal(fs1, fs5)
assert_not_equal(fs1, fs6)
assert_not_equal(fs1, fs7)
assert_not_equal(fs1, fs8)
assert_not_equal(id(fs1.vectorizer), id(fs9.vectorizer))
eq_(fs1, fs9)
0
Example 96
Project: crmsh Source File: test_scripts.py
@with_setup(setup_func, teardown_func)
def test_optional_step_ref():
"""
It seems I have a bug in referencing ids from substeps.
"""
a = '''---
- version: 2.2
category: Script
include:
- agent: test:apache
name: apache
parameters:
- name: id
required: true
'''
b = '''---
- version: 2.2
category: Script
include:
- script: apache
required: false
parameters:
- name: wiz
required: true
actions:
- cib: "primitive {{wiz}} {{apache:id}}"
'''
script_a = scripts.load_script_string('apache', a)
script_b = scripts.load_script_string('test-b', b)
assert script_a is not None
assert script_b is not None
actions = scripts.verify(script_a,
{"id": "apacho"}, external_check=False)
eq_(len(actions), 1)
pprint(actions)
assert actions[0]['text'] == "primitive apacho test:apache"
#import ipdb
#ipdb.set_trace()
actions = scripts.verify(script_b,
{'wiz': "SARUMAN", "apache": {"id": "apacho"}}, external_check=False)
eq_(len(actions), 1)
pprint(actions)
assert actions[0]['text'] == "primitive SARUMAN apacho"
0
Example 97
Project: douban-sqlstore Source File: test_sqlstore.py
def test_push_config(self):
farm1_dbconf = {
'host': '127.0.0.1',
'port': 3306,
'db': 'test_sqlstore1',
'user': 'sqlstore',
'passwd': 'sqlstore',
}
farm1_new_dbconf = {
'host': '127.0.0.1',
'port': 3306,
'db': 'test_sqlstore3',
'user': 'sqlstore',
'passwd': 'sqlstore',
}
farm2_dbconf = {
'host': '127.0.0.1',
'port': 3306,
'db': 'test_sqlstore2',
'user': 'sqlstore',
'passwd': 'sqlstore',
}
farm3_dbconf = {
'host': '127.0.0.1',
'port': 3306,
'db': 'test_sqlstore3',
'user': 'sqlstore',
'passwd': 'sqlstore',
}
store = M.store_from_config(self.database)
farm1 = store.get_farm('farm1')
eq_(farm1.dbcnf, farm1_dbconf)
farm2 = store.get_farm('farm2')
eq_(farm2.dbcnf, farm2_dbconf)
# push new config
store.receive_conf(str(self.database_new_config))
# farm1 should change
_farm1 = store.get_farm('farm1')
eq_(_farm1.dbcnf, farm1_new_dbconf)
ok_(_farm1 is not farm1)
# farm2 should be re-used
_farm2 = store.get_farm('farm2')
eq_(_farm2.dbcnf, farm2_dbconf)
ok_(_farm2 is farm2)
# farm3 should be created
farm3 = store.get_farm('farm3')
eq_(farm3.dbcnf, farm3_dbconf)
0
Example 98
Project: editxt Source File: test_parser.py
def test_CommandParser():
def test_parser(argstr, options, parser):
if isinstance(options, Exception):
def check(err):
eq_(type(err), type(options))
eq_(str(err), str(options))
eq_(err.errors, options.errors)
eq_(err.parse_index, options.parse_index)
with assert_raises(type(options), msg=check):
parser.parse(argstr)
else:
opts = parser.default_options()
opts.__dict__.update(options)
eq_(parser.parse(argstr), opts)
test = partial(test_parser, parser=CommandParser(yesno))
yield test, "", Options(yes=True)
yield test, "no", Options(yes=False)
manual = SubArgs("manual",
Int("bass", default=50),
Int("treble", default=50))
preset = SubArgs("preset",
Choice("flat", "rock", "cinema", name="value"))
level = Choice(
("off", 0),
('high', 4),
("medium", 2),
('low', 1),
name="level"
)
radio_parser = CommandParser(
SubParser("equalizer",
manual,
preset,
),
level,
Int("volume", default=50), #, min=0, max=100),
String("name"),
)
test = partial(test_parser, parser=radio_parser)
yield test, "manual", Options(equalizer=(manual, Options(bass=50, treble=50)))
yield test, "", Options()
yield test, "preset rock low", Options(
level=1, equalizer=(preset, Options(value="rock")))
yield test, " high", Options(level=0, name="high")
yield test, " high", Options(level=4)
yield test, "high", Options(level=4)
yield test, "hi", Options(level=4)
yield test, "high '' yes", ArgumentError('unexpected argument(s): yes', ..., [], 8)
def test_placeholder(argstr, expected, parser=radio_parser):
eq_(parser.get_placeholder(argstr), expected)
test = test_placeholder
yield test, "", "equalizer ... off 50 name"
yield test, " ", "50 name"
yield test, " 5", " name"
yield test, " 5 ", "name"
yield test, " high", ""
yield test, " hi", "gh 50 name"
yield test, " high", " 50 name"
yield test, "hi", "gh 50 name"
yield test, "high ", "50 name"
def check_completions(argstr, expected, start=None, parser=radio_parser):
words = parser.get_completions(argstr)
eq_(words, expected)
if start is not None:
eq_([w.start for w in words], [start] * len(words), words)
test = check_completions
yield test, "", ['manual', 'preset'], 0
yield test, " ", []
yield test, " 5", []
yield test, " 5 ", []
yield test, " high", []
yield test, " ", ["off", "high", "medium", "low"], 1
yield test, " hi", ["high"], 1
parser = CommandParser(
Int("num", default=0),
VarArgs("value", ColonString("value")),
)
test = partial(check_completions, parser=parser)
yield test, "", []
yield test, "abc", [":abc", ":def"], 3
yield test, " abc", [":abc", ":def"], 4
yield test, " abc:def:ghi def:a", [":abc"], 16
parser = CommandParser(
level, Int("value"), Choice("highlander", "tundra", "4runner"))
test = partial(check_completions, parser=parser)
yield test, "h", ["high"], 0
yield test, "t", ["tundra"], 0
yield test, "high", ["high"], 0
yield test, "high ", []
yield test, "high 4", []
yield test, "high x", []
yield test, "high 4", ["4runner"], 6
0
Example 99
Project: skll Source File: test_utilities.py
def check_filter_features_arff_argparse(filter_type, label_col='y',
id_col='id', inverse=False,
quiet=False):
"""
A utility function to check that we are setting up argument parsing
correctly for filter_features for ARFF file types. We are not checking
whether the results are correct because we have separate tests for that.
"""
# replace the run_configuration function that's called
# by the main() in filter_feature with a mocked up version
writer_class = skll.data.writers.ARFFWriter
# create some dummy input and output filenames
infile = join(_my_dir, 'other', 'test_filter_features_input.arff')
outfile = 'bar.arff'
# create a simple featureset with actual ids, labels and features
fs, _ = make_classification_data(num_labels=3, train_test_ratio=1.0)
writer = writer_class(infile, fs, label_col=label_col, id_col=id_col)
writer.write()
ff_cmd_args = [infile, outfile]
if filter_type == 'feature':
if inverse:
features_to_keep = ['f01', 'f04', 'f07', 'f10']
else:
features_to_keep = ['f02', 'f03', 'f05', 'f06', 'f08', 'f09']
ff_cmd_args.append('-f')
for f in features_to_keep:
ff_cmd_args.append(f)
elif filter_type == 'id':
if inverse:
ids_to_keep = ['EXAMPLE_{}'.format(x) for x in range(1, 100, 2)]
else:
ids_to_keep = ['EXAMPLE_{}'.format(x) for x in range(2, 102, 2)]
ff_cmd_args.append('-I')
for idee in ids_to_keep:
ff_cmd_args.append(idee)
elif filter_type == 'label':
if inverse:
labels_to_keep = ['0', '1']
else:
labels_to_keep = ['2']
ff_cmd_args.append('-L')
for lbl in labels_to_keep:
ff_cmd_args.append(lbl)
ff_cmd_args.extend(['-l', label_col])
ff_cmd_args.extend(['--id_col', id_col])
if inverse:
ff_cmd_args.append('-i')
if quiet:
ff_cmd_args.append('-q')
# Substitute mock methods for the main methods that get called by
# filter_features for arff files: FeatureSet.filter() and the __init__()
# method of the appropriate writer. We also need to mock the write()
# method to prevent actual writing.
with patch.object(FeatureSet, 'filter', autospec=True) as filter_mock, \
patch.object(writer_class, '__init__', autospec=True,
return_value=None) as write_init_mock, \
patch.object(writer_class, 'write', autospec=True) as write_mock:
ff.main(argv=ff_cmd_args)
# get the various arguments from the three mocked up methods
filter_pos_arguments, filter_kw_arguments = filter_mock.call_args
write_pos_arguments, write_kw_arguments = write_init_mock.call_args
# make sure that the arguments they got were the ones we specified
eq_(write_pos_arguments[1], outfile)
eq_(write_kw_arguments['quiet'], quiet)
# note that we cannot test the label_col column for the writer
# the reason is that is set conditionally and those conditions
# do not execute with mocking
eq_(filter_pos_arguments[0], fs)
eq_(filter_kw_arguments['inverse'], inverse)
if filter_type == 'feature':
eq_(filter_kw_arguments['features'], features_to_keep)
elif filter_type == 'id':
eq_(filter_kw_arguments['ids'], ids_to_keep)
elif filter_type == 'label':
eq_(filter_kw_arguments['labels'], labels_to_keep)
0
Example 100
def test_model():
app, db, admin = setup()
GeoModel = create_models(db)
db.create_all()
GeoModel.query.delete()
db.session.commit()
view = ModelView(GeoModel, db.session)
admin.add_view(view)
eq_(view.model, GeoModel)
eq_(view._primary_key, 'id')
# Verify form
eq_(view._create_form_class.point.field_class, GeoJSONField)
eq_(view._create_form_class.point.kwargs['geometry_type'], "POINT")
eq_(view._create_form_class.line.field_class, GeoJSONField)
eq_(view._create_form_class.line.kwargs['geometry_type'], "LINESTRING")
eq_(view._create_form_class.polygon.field_class, GeoJSONField)
eq_(view._create_form_class.polygon.kwargs['geometry_type'], "POLYGON")
eq_(view._create_form_class.multi.field_class, GeoJSONField)
eq_(view._create_form_class.multi.kwargs['geometry_type'], "MULTIPOINT")
# Make some test clients
client = app.test_client()
rv = client.get('/admin/geomodel/')
eq_(rv.status_code, 200)
rv = client.get('/admin/geomodel/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/geomodel/new/', data={
"name": "test1",
"point": '{"type": "Point", "coordinates": [125.8, 10.0]}',
"line": '{"type": "LineString", "coordinates": [[50.2345, 94.2], [50.21, 94.87]]}',
"polygon": '{"type": "Polygon", "coordinates": [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]]}',
"multi": '{"type": "MultiPoint", "coordinates": [[100.0, 0.0], [101.0, 1.0]]}',
})
eq_(rv.status_code, 302)
model = db.session.query(GeoModel).first()
eq_(model.name, "test1")
eq_(to_shape(model.point).geom_type, "Point")
eq_(list(to_shape(model.point).coords), [(125.8, 10.0)])
eq_(to_shape(model.line).geom_type, "LineString")
eq_(list(to_shape(model.line).coords), [(50.2345, 94.2), (50.21, 94.87)])
eq_(to_shape(model.polygon).geom_type, "Polygon")
eq_(list(to_shape(model.polygon).exterior.coords),
[(100.0, 0.0), (101.0, 0.0), (101.0, 1.0), (100.0, 1.0), (100.0, 0.0)])
eq_(to_shape(model.multi).geom_type, "MultiPoint")
eq_(len(to_shape(model.multi).geoms), 2)
eq_(list(to_shape(model.multi).geoms[0].coords), [(100.0, 0.0)])
eq_(list(to_shape(model.multi).geoms[1].coords), [(101.0, 1.0)])
rv = client.get('/admin/geomodel/')
eq_(rv.status_code, 200)
html = rv.data.decode('utf-8')
pattern = r'(.|\n)+({.*"type": ?"Point".*})</textarea>(.|\n)+'
group = re.match(pattern, html).group(2)
p = json.loads(group)
eq_(p['coordinates'][0], 125.8)
eq_(p['coordinates'][1], 10.0)
url = '/admin/geomodel/edit/?id=%s' % model.id
rv = client.get(url)
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_(r' name="multi">{"type":"MultiPoint","coordinates":[[100,0],[101,1]]}</textarea>' in data)
# rv = client.post(url, data={
# "name": "edited",
# "point": '{"type": "Point", "coordinates": [99.9, 10.5]}',
# "line": '', # set to NULL in the database
# })
# eq_(rv.status_code, 302)
#
# model = db.session.query(GeoModel).first()
# eq_(model.name, "edited")
# eq_(to_shape(model.point).geom_type, "Point")
# eq_(list(to_shape(model.point).coords), [(99.9, 10.5)])
# eq_(to_shape(model.line), None)
# eq_(to_shape(model.polygon).geom_type, "Polygon")
# eq_(list(to_shape(model.polygon).exterior.coords),
# [(100.0, 0.0), (101.0, 0.0), (101.0, 1.0), (100.0, 1.0), (100.0, 0.0)])
# eq_(to_shape(model.multi).geom_type, "MultiPoint")
# eq_(len(to_shape(model.multi).geoms), 2)
# eq_(list(to_shape(model.multi).geoms[0].coords), [(100.0, 0.0)])
# eq_(list(to_shape(model.multi).geoms[1].coords), [(101.0, 1.0)])
url = '/admin/geomodel/delete/?id=%s' % model.id
rv = client.post(url)
eq_(rv.status_code, 302)
eq_(db.session.query(GeoModel).count(), 0)