Here are the examples of the python api time.sleep taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
171 Examples
0
Example 51
Project: pywikibot-core Source File: blockpageschecker.py
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# Loading the comments
global categoryToCheck, project_inserted
# always, define a generator to understand if the user sets one,
# defining what's genFactory
always = False
generator = False
show = False
moveBlockCheck = False
protectedpages = False
protectType = 'edit'
namespace = 0
# To prevent Infinite loops
errorCount = 0
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
# Process local args
for arg in local_args:
option, sep, value = arg.partition(':')
if option == '-always':
always = True
elif option == '-move':
moveBlockCheck = True
elif option == '-show':
show = True
elif option in ('-protectedpages', '-moveprotected'):
protectedpages = True
if option == '-moveprotected':
protectType = 'move'
if value:
namespace = int(value)
else:
genFactory.handleArg(arg)
if config.mylang not in project_inserted:
pywikibot.output(u"Your project is not supported by this script.\n"
u"You have to edit the script and add it!")
return
site = pywikibot.Site()
if protectedpages:
generator = site.protectedpages(namespace=namespace, type=protectType)
# Take the right templates to use, the category and the comment
TSP = i18n.translate(site, templateSemiProtection)
TTP = i18n.translate(site, templateTotalProtection)
TSMP = i18n.translate(site, templateSemiMoveProtection)
TTMP = i18n.translate(site, templateTotalMoveProtection)
TNR = i18n.translate(site, templateNoRegex)
TU = i18n.translate(site, templateUnique)
categories = i18n.translate(site, categoryToCheck)
commentUsed = i18n.twtranslate(site, 'blockpageschecker-summary')
if not generator:
generator = genFactory.getCombinedGenerator()
if not generator:
generator = list()
pywikibot.output(u'Loading categories...')
# Define the category if no other generator has been setted
for CAT in categories:
cat = pywikibot.Category(site, CAT)
# Define the generator
gen = pagegenerators.CategorizedPageGenerator(cat)
for pageCat in gen:
generator.append(pageCat)
pywikibot.output(u'Categories loaded, start!')
# Main Loop
preloadingGen = pagegenerators.PreloadingGenerator(generator, groupsize=60)
for page in preloadingGen:
pagename = page.title(asLink=True)
pywikibot.output('Loading %s...' % pagename)
try:
text = page.text
except pywikibot.NoPage:
pywikibot.output("%s doesn't exist! Skipping..." % pagename)
continue
except pywikibot.IsRedirectPage:
pywikibot.output("%s is a redirect! Skipping..." % pagename)
if show:
showQuest(page)
continue
# FIXME: This check does not work :
# PreloadingGenerator cannot set correctly page.editRestriction
# (see bug T57322)
# if not page.canBeEdited():
# pywikibot.output("%s is sysop-protected : this account can't edit "
# "it! Skipping..." % pagename)
# continue
restrictions = page.protection()
try:
editRestr = restrictions['edit']
except KeyError:
editRestr = None
if not page.canBeEdited():
pywikibot.output(u"%s is protected: "
u"this account can't edit it! Skipping..."
% pagename)
continue
# Understand, according to the template in the page, what should be the
# protection and compare it with what there really is.
TemplateInThePage = understandBlock(text, TTP, TSP, TSMP, TTMP, TU)
# Only to see if the text is the same or not...
oldtext = text
# keep track of the changes for each step (edit then move)
changes = -1
if not editRestr:
# page is not edit-protected
# Deleting the template because the page doesn't need it.
if not (TTP or TSP):
raise pywikibot.Error(
'This script is not localized to use it on \n{0}. '
'Missing "templateSemiProtection" or'
'"templateTotalProtection"'.format(site.sitename))
if TU:
replaceToPerform = u'|'.join(TTP + TSP + TU)
else:
replaceToPerform = u'|'.join(TTP + TSP)
text, changes = re.subn('<noinclude>(%s)</noinclude>'
% replaceToPerform, '', text)
if changes == 0:
text, changes = re.subn('(%s)' % replaceToPerform, '', text)
msg = u'The page is editable for all'
if not moveBlockCheck:
msg += u', deleting the template..'
pywikibot.output(u'%s.' % msg)
elif editRestr[0] == 'sysop':
# total edit protection
if (TemplateInThePage[0] == 'sysop-total' and TTP) or \
(TemplateInThePage[0] == 'unique' and TU):
msg = 'The page is protected to the sysop'
if not moveBlockCheck:
msg += ', skipping...'
pywikibot.output(msg)
else:
if not TNR or TU and not TNR[4] or not (TU or TNR[1]):
raise pywikibot.Error(
'This script is not localized to use it on \n{0}. '
'Missing "templateNoRegex"'.format(
site.sitename))
pywikibot.output(u'The page is protected to the sysop, but the '
u'template seems not correct. Fixing...')
if TU:
text, changes = re.subn(TemplateInThePage[1], TNR[4], text)
else:
text, changes = re.subn(TemplateInThePage[1], TNR[1], text)
elif TSP or TU:
# implicitely editRestr[0] = 'autoconfirmed', edit-Semi-protection
if TemplateInThePage[0] == 'autoconfirmed-total' or \
TemplateInThePage[0] == 'unique':
msg = 'The page is editable only for the autoconfirmed users'
if not moveBlockCheck:
msg += ', skipping...'
pywikibot.output(msg)
else:
if not TNR or TU and not TNR[4] or not (TU or TNR[1]):
raise pywikibot.Error(
'This script is not localized to use it on \n{0}. '
'Missing "templateNoRegex"'.format(
site.sitename))
pywikibot.output(u'The page is editable only for the '
u'autoconfirmed users, but the template '
u'seems not correct. Fixing...')
if TU:
text, changes = re.subn(TemplateInThePage[1], TNR[4], text)
else:
text, changes = re.subn(TemplateInThePage[1], TNR[0], text)
if changes == 0:
# We tried to fix edit-protection templates, but it did not work.
pywikibot.warning('No edit-protection template could be found')
if moveBlockCheck and changes > -1:
# checking move protection now
try:
moveRestr = restrictions['move']
except KeyError:
moveRestr = False
changes = -1
if not moveRestr:
pywikibot.output(u'The page is movable for all, deleting the '
u'template...')
# Deleting the template because the page doesn't need it.
if TU:
replaceToPerform = u'|'.join(TSMP + TTMP + TU)
else:
replaceToPerform = u'|'.join(TSMP + TTMP)
text, changes = re.subn('<noinclude>(%s)</noinclude>'
% replaceToPerform, '', text)
if changes == 0:
text, changes = re.subn('(%s)' % replaceToPerform, '', text)
elif moveRestr[0] == 'sysop':
# move-total-protection
if (TemplateInThePage[0] == 'sysop-move' and TTMP) or \
(TemplateInThePage[0] == 'unique' and TU):
pywikibot.output(u'The page is protected from moving to '
u'the sysop, skipping...')
if TU:
# no changes needed, better to revert the old text.
text = oldtext
else:
pywikibot.output(u'The page is protected from moving to '
u'the sysop, but the template seems not '
u'correct. Fixing...')
if TU:
text, changes = re.subn(TemplateInThePage[1], TNR[4],
text)
else:
text, changes = re.subn(TemplateInThePage[1], TNR[3],
text)
elif TSMP or TU:
# implicitely moveRestr[0] = 'autoconfirmed',
# move-semi-protection
if TemplateInThePage[0] == 'autoconfirmed-move' or \
TemplateInThePage[0] == 'unique':
pywikibot.output(u'The page is movable only for the '
u'autoconfirmed users, skipping...')
if TU:
# no changes needed, better to revert the old text.
text = oldtext
else:
pywikibot.output(u'The page is movable only for the '
u'autoconfirmed users, but the template '
u'seems not correct. Fixing...')
if TU:
text, changes = re.subn(TemplateInThePage[1], TNR[4],
text)
else:
text, changes = re.subn(TemplateInThePage[1], TNR[2],
text)
if changes == 0:
# We tried to fix move-protection templates, but it did not work
pywikibot.warning('No move-protection template could be found')
if oldtext != text:
# Ok, asking if the change has to be performed and do it if yes.
pywikibot.output(color_format(
'\n\n>>> {lightpurple}{0}{default} <<<', page.title()))
pywikibot.showDiff(oldtext, text)
if not always:
choice = pywikibot.input_choice(u'Do you want to accept these '
u'changes?',
[('Yes', 'y'), ('No', 'n'),
('All', 'a')], 'n')
if choice == 'a':
always = True
if always or choice == 'y':
while True:
try:
page.put(text, commentUsed, force=True)
except pywikibot.EditConflict:
pywikibot.output(u'Edit conflict! skip!')
break
except pywikibot.ServerError:
# Sometimes there is this error that's quite annoying
# because can block the whole process for nothing.
errorCount += 1
if errorCount < 5:
pywikibot.output(u'Server Error! Wait..')
time.sleep(3)
continue
else:
# Prevent Infinite Loops
raise pywikibot.ServerError(u'Fifth Server Error!')
except pywikibot.SpamfilterError as e:
pywikibot.output(u'Cannot change %s because of '
u'blacklist entry %s'
% (page.title(), e.url))
break
except pywikibot.LockedPage:
pywikibot.output(u'The page is still protected. '
u'Skipping...')
break
except pywikibot.PageNotSaved as error:
pywikibot.output(u'Error putting page: %s'
% (error.args,))
break
else:
# Break only if the errors are one after the other
errorCount = 0
break
0
Example 52
Project: cgat Source File: CSV2DB.py
def run(infile, options, report_step=10000):
options.tablename = quoteTableName(
options.tablename, backend=options.backend)
if options.map:
m = {}
for x in options.map:
f, t = x.split(":")
m[f] = t
options.map = m
else:
options.map = {}
existing_tables = set()
quick_import_separator = "\t"
if options.database_backend == "postgres":
import psycopg2
raise NotImplementedError("needs refactoring for commandline options")
dbhandle = psycopg2.connect(options.psql_connection)
error = psycopg2.Error
options.null = "NULL"
options.string_value = "'%s'"
options.text = "TEXT"
options.index = "TEXT"
if options.insert_quick:
raise ValueError("quick import not implemented.")
elif options.database_backend == "mysql":
import MySQLdb
dbhandle = MySQLdb.connect(host=options.database_host,
user=options.database_username,
passwd=options.database_password,
port=options.database_port,
db=options.database_name)
error = Exception
options.null = "NULL"
options.string_value = "%s"
options.text = "TEXT"
options.index = "VARCHAR(40)"
if options.insert_quick:
raise ValueError("quick import not implemented.")
elif options.backend == "sqlite":
import sqlite3
dbhandle = sqlite3.connect(options.database_name)
try:
os.chmod(options.database_name, 0o664)
except OSError as msg:
E.warn("could not change permissions of database: %s" % msg)
# Avoid the following error:
# sqlite3.ProgrammingError: You must not use 8-bit bytestrings
# unless you use a text_factory that can interpret 8-bit
# bytestrings (like text_factory = str). It is highly
# recommended that you instead just switch your application
# to Unicode strings
# Note: might be better to make csv2db unicode aware.
dbhandle.text_factory = str
error = sqlite3.OperationalError
options.insert_many = True # False
options.null = None # "NULL"
options.text = "TEXT"
options.index = "TEXT"
options.string_value = "%s" # "'%s'"
statement = "SELECT name FROM sqlite_master WHERE type='table'"
cc = executewait(dbhandle, statement, error, options.retry)
existing_tables = set([x[0] for x in cc])
cc.close()
# use , as separator
quick_import_statement = \
"sqlite3 %s '.import %%s %s'" % \
(options.database_name, options.tablename)
quick_import_separator = "|"
if options.header is not None:
options.header = [x.strip() for x in options.header.split(",")]
if options.utf:
reader = CSV.UnicodeDictReader(infile,
dialect=options.dialect,
fieldnames=options.header)
else:
reader = csv.DictReader(CSV.CommentStripper(infile),
dialect=options.dialect,
fieldnames=options.header)
if options.replace_header:
try:
next(reader)
except StopIteration:
pass
E.info("reading %i columns to guess column types" % options.guess_size)
rows = []
for row in reader:
if None in row:
raise ValueError(
"undefined columns in input file at row: %s" % row)
try:
rows.append(IOTools.convertDictionary(row, map=options.map))
except TypeError as msg:
E.warn(
"incomplete line? Type error in conversion: "
"'%s' with data: %s" % (msg, str(row)))
except ValueError as msg:
E.warn(
"incomplete line? Type error in conversion: "
"'%s' with data: %s" % (msg, str(row)))
if len(rows) >= options.guess_size:
break
E.info("read %i rows for type guessing" % len(rows))
E.info("creating table")
if len(rows) == 0:
if options.allow_empty:
if not reader.fieldnames:
E.warn("no data - no table created")
else:
# create empty table and exit
take, map_column2type, ignored = createTable(
dbhandle,
error,
options.tablename,
options,
retry=options.retry,
headers=reader.fieldnames,
ignore_empty=options.ignore_empty,
ignore_columns=options.ignore_columns,
rename_columns=options.rename_columns,
lowercase=options.lowercase,
ignore_duplicates=options.ignore_duplicates,
indices=options.indices,
first_column=options.first_column,
existing_tables=existing_tables,
append=options.append)
E.info("empty table created")
return
else:
raise ValueError("empty table")
else:
take, map_column2type, ignored = createTable(
dbhandle,
error,
options.tablename,
options,
rows=rows,
retry=options.retry,
headers=reader.fieldnames,
ignore_empty=options.ignore_empty,
ignore_columns=options.ignore_columns,
rename_columns=options.rename_columns,
lowercase=options.lowercase,
ignore_duplicates=options.ignore_duplicates,
indices=options.indices,
first_column=options.first_column,
existing_tables=existing_tables,
append=options.append)
def row_iter(rows, reader):
for row in rows:
yield quoteRow(row, take, map_column2type,
options.missing_values,
null=options.null,
string_value=options.string_value)
for data in reader:
yield quoteRow(IOTools.convertDictionary(data, map=options.map),
take,
map_column2type,
options.missing_values,
null=options.null,
string_value=options.string_value)
ninput = 0
E.info("inserting data")
if options.insert_quick:
E.info("using quick insert")
outfile, filename = tempfile.mkstemp()
E.debug("dumping data into %s" % filename)
for d in row_iter(rows, reader):
ninput += 1
os.write(outfile, quick_import_separator.join(
[str(d[x]) for x in take]) + "\n")
if ninput % report_step == 0:
E.info("iteration %i\n" % ninput)
os.close(outfile)
statement = quick_import_statement % filename
E.debug(statement)
# infinite loop possible
while 1:
retcode = E.run(statement, cwd=os.getcwd(), close_fds=True)
if retcode != 0:
E.warn("import error using statement: %s" % statement)
if not options.retry:
raise ValueError(
"import error using statement: %s" % statement)
time.sleep(5)
continue
break
os.remove(filename)
# there is no way to insert NULL values into sqlite. The only
# solution is to update all colums.
for column in take:
executewait(dbhandle,
"UPDATE %s SET %s = NULL WHERE %s = 'None'" % (
options.tablename, column, column),
error,
options.retry)
elif options.insert_many:
data = []
for d in row_iter(rows, reader):
ninput += 1
data.append([d[x] for x in take])
if ninput % report_step == 0:
E.info("iteration %i" % ninput)
statement = "INSERT INTO %s VALUES (%s)" % (
options.tablename, ",".join("?" * len(take)))
E.info("inserting %i rows" % len(data))
E.debug("multiple insert:\n# %s" % statement)
while 1:
try:
dbhandle.executemany(statement, data)
except error as msg:
E.warn("import failed: msg=%s, statement=\n %s" %
(msg, statement))
# TODO: check for database locked msg
if not options.retry:
raise error(msg)
if not re.search("locked", str(msg)):
raise error(msg)
time.sleep(5)
continue
break
else:
# insert line by line (could not figure out how to do bulk loading with
# subprocess and COPY FROM STDIN)
statement = "INSERT INTO %s VALUES (%%(%s)s)" % (options.tablename,
')s, %('.join(take))
# output data used for guessing:
for d in row_iter(rows, reader):
ninput += 1
E.debug("single insert:\n# %s" % (statement % d))
cc = executewait(dbhandle, statement, error,
retry=options.retry,
args=d)
cc.close()
if ninput % report_step == 0:
E.info("iteration %i" % ninput)
E.info("building indices")
nindex = 0
for index in options.indices:
nindex += 1
try:
statement = "CREATE INDEX %s_index%i ON %s (%s)" % (
options.tablename, nindex, options.tablename, index)
cc = executewait(dbhandle, statement, error, options.retry)
cc.close()
E.info("added index on column %s" % (index))
except error as msg:
E.info("adding index on column %s failed: %s" % (index, msg))
statement = "SELECT COUNT(*) FROM %s" % (options.tablename)
cc = executewait(dbhandle, statement, error, options.retry)
result = cc.fetchone()
cc.close()
noutput = result[0]
E.info("ninput=%i, noutput=%i, nskipped_columns=%i" %
(ninput, noutput, len(ignored)))
dbhandle.commit()
0
Example 53
Project: SickRage Source File: parser.py
def _parse_string(self, name): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
if not name:
return
matches = []
bestResult = None
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
if not match:
continue
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'series_num' in named_groups and match.group('series_num'):
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 3
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'air_date' in named_groups:
air_date = match.group('air_date')
try:
assert re.sub(r'[^\d]*', '', air_date) != '112263'
result.air_date = dateutil.parser.parse(air_date, fuzzy_with_tokens=True)[0].date()
result.score += 1
except Exception:
continue
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
if 'version' in named_groups:
# assigns version to anime file if detected using anime regex. Non-anime regex receives -1
version = match.group('version')
if version:
result.version = version
else:
result.version = 1
else:
result.version = -1
matches.append(result)
if matches:
# pick best match with highest score based on placement
bestResult = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = helpers.get_show(bestResult.series_name, self.tryIndexers)
# confirm passed in show object indexer id matches result show object indexer id
if show:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
bestResult.show = show
elif not show and self.showObj:
bestResult.show = self.showObj
# if this is a naming pattern test or result doesn't have a show object then return best result
if not bestResult.show or self.naming_pattern:
return bestResult
# get quality
bestResult.quality = common.Quality.nameQuality(name, bestResult.show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []
# if we have an air-by-date show then get the real season/episode numbers
if bestResult.is_air_by_date:
airdate = bestResult.air_date.toordinal()
main_db_con = db.DBConnection()
sql_result = main_db_con.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?",
[bestResult.show.indexerid, bestResult.show.indexer, airdate])
season_number = None
episode_numbers = []
if sql_result:
season_number = int(sql_result[0][0])
episode_numbers = [int(sql_result[0][1])]
if season_number is None or not episode_numbers:
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(bestResult.show.indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = bestResult.show.lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
t = sickbeard.indexerApi(bestResult.show.indexer).indexer(**lINDEXER_API_PARMS)
epObj = t[bestResult.show.indexerid].airedOn(bestResult.air_date)[0]
season_number = int(epObj["seasonnumber"])
episode_numbers = [int(epObj["episodenumber"])]
except sickbeard.indexer_episodenotfound:
logger.log(u"Unable to find episode with date " + str(bestResult.air_date) + " for show " + bestResult.show.name + ", skipping", logger.WARNING)
episode_numbers = []
except sickbeard.indexer_error as e:
logger.log(u"Unable to contact " + sickbeard.indexerApi(bestResult.show.indexer).name + ": " + ex(e), logger.WARNING)
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif bestResult.show.is_anime and bestResult.ab_episode_numbers:
scene_season = scene_exceptions.get_scene_exception_by_name(bestResult.series_name)[1]
for epAbsNo in bestResult.ab_episode_numbers:
a = epAbsNo
if bestResult.show.is_scene:
a = scene_numbering.get_indexer_absolute_numbering(bestResult.show.indexerid,
bestResult.show.indexer, epAbsNo,
True, scene_season)
(s, e) = helpers.get_all_episodes_from_absolute_number(bestResult.show, [a])
new_absolute_numbers.append(a)
new_episode_numbers.extend(e)
new_season_numbers.append(s)
elif bestResult.season_number and bestResult.episode_numbers:
for epNo in bestResult.episode_numbers:
s = bestResult.season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
bestResult.season_number,
epNo)
if bestResult.show.is_anime:
a = helpers.get_absolute_number_from_season_and_episode(bestResult.show, s, e)
if a:
new_absolute_numbers.append(a)
new_episode_numbers.append(e)
new_season_numbers.append(s)
# need to do a quick sanity check heregex. It's possible that we now have episodes
# from more than one season (by tvdb numbering), and this is just too much
# for sickbeard, so we'd need to flag it.
new_season_numbers = list(set(new_season_numbers)) # remove duplicates
if len(new_season_numbers) > 1:
raise InvalidNameException("Scene numbering results episodes from "
"seasons %s, (i.e. more than one) and "
"sickrage does not support this. "
"Sorry." % (str(new_season_numbers)))
# I guess it's possible that we'd have duplicate episodes too, so lets
# eliminate them
new_episode_numbers = list(set(new_episode_numbers))
new_episode_numbers.sort()
# maybe even duplicate absolute numbers so why not do them as well
new_absolute_numbers = list(set(new_absolute_numbers))
new_absolute_numbers.sort()
if new_absolute_numbers:
bestResult.ab_episode_numbers = new_absolute_numbers
if new_season_numbers and new_episode_numbers:
bestResult.episode_numbers = new_episode_numbers
bestResult.season_number = new_season_numbers[0]
if bestResult.show.is_scene:
logger.log(
u"Converted parsed result " + bestResult.original_name + " into " + str(bestResult).decode('utf-8',
'xmlcharrefreplace'),
logger.DEBUG)
# CPU sleep
time.sleep(0.02)
return bestResult
0
Example 54
Project: congress-legislators Source File: social_media.py
def main():
regexes = {
"youtube": [
"(?:https?:)?//(?:www\\.)?youtube.com/embed/?\?(list=[^\\s\"/\\?#&']+)",
"(?:https?:)?//(?:www\\.)?youtube.com/channel/([^\\s\"/\\?#']+)",
"(?:https?:)?//(?:www\\.)?youtube.com/(?:subscribe_widget\\?p=)?(?:subscription_center\\?add_user=)?(?:user/)?([^\\s\"/\\?#']+)"
],
"facebook": [
"\\('facebook.com/([^']+)'\\)",
"(?:https?:)?//(?:www\\.)?facebook.com/(?:home\\.php)?(?:business/dashboard/#/)?(?:government)?(?:#!/)?(?:#%21/)?(?:#/)?pages/[^/]+/(\\d+)",
"(?:https?:)?//(?:www\\.)?facebook.com/(?:profile.php\\?id=)?(?:home\\.php)?(?:#!)?/?(?:people)?/?([^/\\s\"#\\?&']+)"
],
"twitter": [
"(?:https?:)?//(?:www\\.)?twitter.com/(?:intent/user\?screen_name=)?(?:#!/)?(?:#%21/)?@?([^\\s\"'/]+)",
"\\.render\\(\\)\\.setUser\\('@?(.*?)'\\)\\.start\\(\\)"
],
"instagram": [
"instagram.com/(\w{3,})"
]
}
email_enabled = utils.flags().get('email', False)
debug = utils.flags().get('debug', False)
do_update = utils.flags().get('update', False)
do_clean = utils.flags().get('clean', False)
do_verify = utils.flags().get('verify', False)
do_resolvefb = utils.flags().get('resolvefb', False)
do_resolveyt = utils.flags().get('resolveyt', False)
do_resolveig = utils.flags().get('resolveig', False)
do_resolvetw = utils.flags().get('resolvetw', False)
# default to not caching
cache = utils.flags().get('cache', False)
force = not cache
if do_resolvefb:
service = "facebook"
elif do_resolveyt:
service = "youtube"
elif do_resolveig:
service = "instagram"
elif do_resolvetw:
service = "twitter"
else:
service = utils.flags().get('service', None)
if service not in ["twitter", "youtube", "facebook", "instagram"]:
print("--service must be one of twitter, youtube, facebook, or instagram")
exit(0)
# load in members, orient by bioguide ID
print("Loading current legislators...")
current = load_data("legislators-current.yaml")
current_bioguide = { }
for m in current:
if "bioguide" in m["id"]:
current_bioguide[m["id"]["bioguide"]] = m
print("Loading blacklist...")
blacklist = {
'twitter': [], 'facebook': [], 'youtube': [], 'instagram': []
}
for rec in csv.DictReader(open("data/social_media_blacklist.csv")):
blacklist[rec["service"]].append(rec["pattern"])
print("Loading whitelist...")
whitelist = {
'twitter': [], 'facebook': [], 'youtube': []
}
for rec in csv.DictReader(open("data/social_media_whitelist.csv")):
whitelist[rec["service"]].append(rec["account"].lower())
# reorient currently known social media by ID
print("Loading social media...")
media = load_data("legislators-social-media.yaml")
media_bioguide = { }
for m in media:
media_bioguide[m["id"]["bioguide"]] = m
def resolvefb():
# in order to preserve the comment block at the top of the file,
# copy it over into a new RtYamlList instance. We do this because
# Python list instances can't hold other random attributes.
import rtyaml
updated_media = rtyaml.RtYamlList()
if hasattr(media, '__initial_comment_block'):
updated_media.__initial_comment_block = getattr(media, '__initial_comment_block')
for m in media:
social = m['social']
if ('facebook' in social and social['facebook']) and ('facebook_id' not in social):
graph_url = "https://graph.facebook.com/%s" % social['facebook']
if re.match('\d+', social['facebook']):
social['facebook_id'] = social['facebook']
print("Looking up graph username for %s" % social['facebook'])
fbobj = requests.get(graph_url).json()
if 'username' in fbobj:
print("\tGot graph username of %s" % fbobj['username'])
social['facebook'] = fbobj['username']
else:
print("\tUnable to get graph username")
else:
try:
print("Looking up graph ID for %s" % social['facebook'])
fbobj = requests.get(graph_url).json()
if 'id' in fbobj:
print("\tGot graph ID of %s" % fbobj['id'])
social['facebook_id'] = fbobj['id']
else:
print("\tUnable to get graph ID")
except:
print("\tUnable to get graph ID for: %s" % social['facebook'])
social['facebook_id'] = None
updated_media.append(m)
print("Saving social media...")
save_data(updated_media, "legislators-social-media.yaml")
def resolveyt():
# To avoid hitting quota limits, register for a YouTube 2.0 API key at
# https://code.google.com/apis/youtube/dashboard
# and put it below
api_file = open('cache/youtube_api_key','r')
api_key = api_file.read()
bioguide = utils.flags().get('bioguide', None)
updated_media = []
for m in media:
if bioguide and (m['id']['bioguide'] != bioguide):
updated_media.append(m)
continue
social = m['social']
if ('youtube' in social) or ('youtube_id' in social):
if 'youtube' not in social:
social['youtube'] = social['youtube_id']
ytid = social['youtube']
profile_url = ("https://gdata.youtube.com/feeds/api/users/%s"
"?v=2&prettyprint=true&alt=json&key=%s" % (ytid, api_key))
try:
print("Resolving YT info for %s" % social['youtube'])
ytreq = requests.get(profile_url)
# print "\tFetched with status code %i..." % ytreq.status_code
if ytreq.status_code == 404:
# If the account name isn't valid, it's probably a redirect.
try:
# Try to scrape the real YouTube username
print("\Scraping YouTube username")
search_url = ("https://www.youtube.com/%s" % social['youtube'])
csearch = requests.get(search_url).text.encode('ascii','ignore')
u = re.search(r'<a[^>]*href="[^"]*/user/([^/"]*)"[.]*>',csearch)
if u:
print("\t%s maps to %s" % (social['youtube'],u.group(1)))
social['youtube'] = u.group(1)
profile_url = ("https://gdata.youtube.com/feeds/api/users/%s"
"?v=2&prettyprint=true&alt=json" % social['youtube'])
print("\tFetching GData profile...")
ytreq = requests.get(profile_url)
print("\tFetched GData profile")
else:
raise Exception("Couldn't figure out the username format for %s" % social['youtube'])
except:
print("\tCouldn't locate YouTube account")
raise
ytobj = ytreq.json()
social['youtube_id'] = ytobj['entry']['yt$channelId']['$t']
print("\tResolved youtube_id to %s" % social['youtube_id'])
# even though we have their channel ID, do they also have a username?
if ytobj['entry']['yt$username']['$t'] != ytobj['entry']['yt$userId']['$t']:
if social['youtube'].lower() != ytobj['entry']['yt$username']['$t'].lower():
# YT accounts are case-insensitive. Preserve capitalization if possible.
social['youtube'] = ytobj['entry']['yt$username']['$t']
print("\tAdded YouTube username of %s" % social['youtube'])
else:
print("\tYouTube says they do not have a separate username")
del social['youtube']
except:
print("Unable to get YouTube Channel ID for: %s" % social['youtube'])
updated_media.append(m)
print("Saving social media...")
save_data(updated_media, "legislators-social-media.yaml")
def resolveig():
# in order to preserve the comment block at the top of the file,
# copy it over into a new RtYamlList instance. We do this because
# Python list instances can't hold other random attributes.
import rtyaml
updated_media = rtyaml.RtYamlList()
if hasattr(media, '__initial_comment_block'):
updated_media.__initial_comment_block = getattr(media, '__initial_comment_block')
client_id_file = open('cache/instagram_client_id','r')
client_id = client_id_file.read()
bioguide = utils.flags().get('bioguide', None)
for m in media:
if bioguide and (m['id']['bioguide'] != bioguide):
updated_media.append(m)
continue
social = m['social']
if 'instagram' not in social and 'instagram_id' not in social:
updated_media.append(m)
continue
instagram_handle = social['instagram']
query_url = "https://api.instagram.com/v1/users/search?q={query}&client_id={client_id}".format(query=instagram_handle,client_id=client_id)
instagram_user_search = requests.get(query_url).json()
for user in instagram_user_search['data']:
time.sleep(0.5)
if user['username'] == instagram_handle:
m['social']['instagram_id'] = int(user['id'])
print("matched instagram_id {instagram_id} to {instagram_handle}".format(instagram_id=social['instagram_id'],instagram_handle=instagram_handle))
updated_media.append(m)
save_data(updated_media, "legislators-social-media.yaml")
def resolvetw():
"""
Does two batch lookups:
1. All entries with `twitter_id`: Checks to see if the corresponding Twitter profile has the same screen_name
as found in the entry's `twitter`. If not, the `twitter` value is updated.
2. All entries with `twitter` (but not `twitter_id`): fetches the corresponding Twitter profile by screen_name and
inserts ID. If no profile is found, the `twitter` value is deleted.
Note: cache/twitter_client_id must be a formatted JSON dict:
{
"consumer_secret": "xyz",
"access_token": "abc",
"access_token_secret": "def",
"consumer_key": "jk"
}
"""
import rtyaml
from social.twitter import get_api, fetch_profiles
updated_media = rtyaml.RtYamlList()
if hasattr(media, '__initial_comment_block'):
updated_media.__initial_comment_block = getattr(media, '__initial_comment_block')
client_id_file = open('cache/twitter_client_id', 'r')
_c = json.load(client_id_file)
api = get_api(_c['access_token'], _c['access_token_secret'], _c['consumer_key'], _c['consumer_secret'])
bioguide = utils.flags().get('bioguide', None)
lookups = {'screen_names': [], 'ids': []} # store members that have `twitter` or `twitter_id` info
for m in media:
# we start with appending to updated_media so that we keep the same order of entries
# as found in the loaded file
updated_media.append(m)
if bioguide and (m['id']['bioguide'] != bioguide):
continue
social = m['social']
# now we add entries to either the `ids` or the `screen_names` list to batch lookup
if 'twitter_id' in social:
# add to the queue to be batched-looked-up
lookups['ids'].append(m)
# append
elif 'twitter' in social:
lookups['screen_names'].append(m)
#######################################
# perform Twitter batch lookup for ids:
if lookups['screen_names']:
arr = lookups['screen_names']
print("Looking up Twitter ids for", len(arr), "names.")
tw_names = [m['social']['twitter'] for m in arr]
tw_profiles = fetch_profiles(api, screen_names = tw_names)
for m in arr:
social = m['social']
# find profile that corresponds to a given screen_name
twitter_handle = social['twitter']
twp = next((p for p in tw_profiles if p['screen_name'].lower() == twitter_handle.lower()), None)
if twp:
m['social']['twitter_id'] = int(twp['id'])
print("Matched twitter_id `%s` to `%s`" % (social['twitter_id'], twitter_handle))
else:
# Remove errant Twitter entry for now
print("No Twitter user profile for:", twitter_handle)
m['social'].pop('twitter')
print("\t ! removing Twitter handle:", twitter_handle)
##########################################
# perform Twitter batch lookup for names by id, to update any renamings:
if lookups['ids']:
arr = lookups['ids']
print("Looking up Twitter screen_names for", len(arr), "ids.")
tw_ids = [m['social']['twitter_id'] for m in arr]
tw_profiles = fetch_profiles(api, ids = tw_ids)
any_renames_needed = False
for m in arr:
social = m['social']
# find profile that corresponds to a given screen_name
t_id = social['twitter_id']
t_name = social.get('twitter')
twp = next((p for p in tw_profiles if int(p['id']) == t_id), None)
if twp:
# Be silent if there is no change to screen name
if t_name and (twp['screen_name'].lower() == t_name.lower()):
pass
else:
any_renames_needed = True
m['social']['twitter'] = twp['screen_name']
print("For twitter_id `%s`, renamed `%s` to `%s`" % (t_id, t_name, m['social']['twitter']))
else:
# No entry found for this twitter id
print("No Twitter user profile for %s, %s" % (t_id, t_name))
m['social'].pop('twitter_id')
print("\t ! removing Twitter id:", t_id)
if not any_renames_needed:
print("No renames needed")
# all done with Twitter
save_data(updated_media, "legislators-social-media.yaml")
def sweep():
to_check = []
bioguide = utils.flags().get('bioguide', None)
if bioguide:
possibles = [bioguide]
else:
possibles = list(current_bioguide.keys())
for bioguide in possibles:
if media_bioguide.get(bioguide, None) is None:
to_check.append(bioguide)
elif (media_bioguide[bioguide]["social"].get(service, None) is None) and \
(media_bioguide[bioguide]["social"].get(service + "_id", None) is None):
to_check.append(bioguide)
else:
pass
utils.mkdir_p("cache/social_media")
writer = csv.writer(open("cache/social_media/%s_candidates.csv" % service, 'w'))
writer.writerow(["bioguide", "official_full", "website", "service", "candidate", "candidate_url"])
if len(to_check) > 0:
rows_found = []
for bioguide in to_check:
candidate = candidate_for(bioguide)
if candidate:
url = current_bioguide[bioguide]["terms"][-1].get("url", None)
candidate_url = "https://%s.com/%s" % (service, candidate)
row = [bioguide, current_bioguide[bioguide]['name']['official_full'].encode('utf-8'), url, service, candidate, candidate_url]
writer.writerow(row)
print("\tWrote: %s" % candidate)
rows_found.append(row)
if email_enabled and len(rows_found) > 0:
email_body = "Social media leads found:\n\n"
for row in rows_found:
email_body += ("%s\n" % row)
utils.send_email(email_body)
def verify():
bioguide = utils.flags().get('bioguide', None)
if bioguide:
to_check = [bioguide]
else:
to_check = list(media_bioguide.keys())
for bioguide in to_check:
entry = media_bioguide[bioguide]
current = entry['social'].get(service, None)
if not current:
continue
bioguide = entry['id']['bioguide']
candidate = candidate_for(bioguide)
if not candidate:
# if current is in whitelist, and none is on the page, that's okay
if current.lower() in whitelist[service]:
continue
else:
candidate = ""
url = current_bioguide[bioguide]['terms'][-1].get('url')
if current.lower() != candidate.lower():
print("[%s] mismatch on %s - %s -> %s" % (bioguide, url, current, candidate))
def update():
for rec in csv.DictReader(open("cache/social_media/%s_candidates.csv" % service)):
bioguide = rec["bioguide"]
candidate = rec["candidate"]
if bioguide in media_bioguide:
media_bioguide[bioguide]['social'][service] = candidate
else:
new_media = {'id': {}, 'social': {}}
new_media['id']['bioguide'] = bioguide
thomas_id = current_bioguide[bioguide]['id'].get("thomas", None)
govtrack_id = current_bioguide[bioguide]['id'].get("govtrack", None)
if thomas_id:
new_media['id']['thomas'] = thomas_id
if govtrack_id:
new_media['id']['govtrack'] = govtrack_id
new_media['social'][service] = candidate
media.append(new_media)
print("Saving social media...")
save_data(media, "legislators-social-media.yaml")
# if it's a youtube update, always do the resolve
# if service == "youtube":
# resolveyt()
def clean():
print("Loading historical legislators...")
historical = load_data("legislators-historical.yaml")
count = 0
for m in historical:
if m["id"]["bioguide"] in media_bioguide:
media.remove(media_bioguide[m["id"]["bioguide"]])
count += 1
print("Removed %i out of office legislators from social media file..." % count)
print("Saving historical legislators...")
save_data(media, "legislators-social-media.yaml")
def candidate_for(bioguide):
url = current_bioguide[bioguide]["terms"][-1].get("url", None)
if not url:
if debug:
print("[%s] No official website, skipping" % bioguide)
return None
if debug:
print("[%s] Downloading..." % bioguide)
cache = "congress/%s.html" % bioguide
body = utils.download(url, cache, force, {'check_redirects': True})
if not body:
return None
all_matches = []
for regex in regexes[service]:
matches = re.findall(regex, body, re.I)
if matches:
all_matches.extend(matches)
if all_matches:
for candidate in all_matches:
passed = True
for blacked in blacklist[service]:
if re.search(blacked, candidate, re.I):
passed = False
if not passed:
if debug:
print("\tBlacklisted: %s" % candidate)
continue
return candidate
return None
if do_update:
update()
elif do_clean:
clean()
elif do_verify:
verify()
elif do_resolvefb:
resolvefb()
elif do_resolveyt:
resolveyt()
elif do_resolveig:
resolveig()
elif do_resolvetw:
resolvetw()
else:
sweep()
0
Example 55
Project: gmusicapi Source File: musicmanager.py
@utils.accept_singleton(basestring)
@utils.empty_arg_shortcircuit(return_code='{}')
def upload(self, filepaths, enable_matching=False,
enable_transcoding=True, transcode_quality='320k'):
"""Uploads the given filepaths.
All non-mp3 files will be transcoded before being uploaded.
This is a limitation of Google's backend.
An available installation of ffmpeg or avconv is required in most cases:
see `the installation page
<https://unofficial-google-music-api.readthedocs.io/en
/latest/usage.html?#installation>`__ for details.
Returns a 3-tuple ``(uploaded, matched, not_uploaded)`` of dictionaries, eg::
(
{'<filepath>': '<new server id>'}, # uploaded
{'<filepath>': '<new server id>'}, # matched
{'<filepath>': '<reason, eg ALREADY_EXISTS>'} # not uploaded
)
:param filepaths: a list of filepaths, or a single filepath.
:param enable_matching: if ``True``, attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__
to avoid uploading every song.
This requires ffmpeg or avconv.
**WARNING**: currently, mismatched songs can *not* be fixed with the 'Fix Incorrect Match'
button nor :py:func:`report_incorrect_match
<gmusicapi.clients.Webclient.report_incorrect_match>`.
They would have to be deleted and reuploaded with matching disabled
(or with the Music Manager).
Fixing matches from gmusicapi may be supported in a future release; see issue `#89
<https://github.com/simon-weber/gmusicapi/issues/89>`__.
:param enable_transcoding:
if ``False``, non-MP3 files that aren't matched using `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__
will not be uploaded.
:param transcode_quality: if int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame
(`lower-better int,
<http://trac.ffmpeg.org/wiki/Encoding%20VBR%20(Variable%20Bit%20Rate)%20mp3%20audio>`__).
If string, pass to ffmpeg/avconv ``-b:a`` (eg ``'128k'`` for an average bitrate of 128k).
The default is 320kbps cbr (the highest possible quality).
All Google-supported filetypes are supported; see `Google's docuementation
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1100462>`__.
If ``PERMANENT_ERROR`` is given as a not_uploaded reason, attempts to reupload will never
succeed. The file will need to be changed before the server will reconsider it; the easiest
way is to change metadata tags (it's not important that the tag be uploaded, just that the
contents of the file change somehow).
"""
if self.uploader_id is None or self.uploader_name is None:
raise NotLoggedIn("Not authenticated as an upload device;"
" run Api.login(...perform_upload_auth=True...)"
" first.")
# TODO there is way too much code in this function.
# To return.
uploaded = {}
matched = {}
not_uploaded = {}
# Gather local information on the files.
local_info = {} # {clientid: (path, Track)}
for path in filepaths:
try:
track = musicmanager.UploadMetadata.fill_track_info(path)
except BaseException as e:
self.logger.exception("problem gathering local info of '%r'", path)
user_err_msg = str(e)
if 'Non-ASCII strings must be converted to unicode' in str(e):
# This is a protobuf-specific error; they require either ascii or unicode.
# To keep behavior consistent, make no effort to guess - require users
# to decode first.
user_err_msg = ("nonascii bytestrings must be decoded to unicode"
" (error: '%s')" % user_err_msg)
not_uploaded[path] = user_err_msg
else:
local_info[track.client_id] = (path, track)
if not local_info:
return uploaded, matched, not_uploaded
# TODO allow metadata faking
# Upload metadata; the server tells us what to do next.
res = self._make_call(musicmanager.UploadMetadata,
[t for (path, t) in local_info.values()],
self.uploader_id)
# TODO checking for proper contents should be handled in verification
md_res = res.metadata_response
responses = [r for r in md_res.track_sample_response]
sample_requests = [req for req in md_res.signed_challenge_info]
# Send scan and match samples if requested.
for sample_request in sample_requests:
path, track = local_info[sample_request.challenge_info.client_track_id]
bogus_sample = None
if not enable_matching:
bogus_sample = b'' # just send empty bytes
try:
res = self._make_call(musicmanager.ProvideSample,
path, sample_request, track,
self.uploader_id, bogus_sample)
except (IOError, ValueError) as e:
self.logger.warning("couldn't create scan and match sample for '%r': %s",
path, str(e))
not_uploaded[path] = str(e)
else:
responses.extend(res.sample_response.track_sample_response)
# Read sample responses and prep upload requests.
to_upload = {} # {serverid: (path, Track, do_not_rematch?)}
for sample_res in responses:
path, track = local_info[sample_res.client_track_id]
if sample_res.response_code == upload_pb2.TrackSampleResponse.MATCHED:
self.logger.info("matched '%r' to sid %s", path, sample_res.server_track_id)
matched[path] = sample_res.server_track_id
if not enable_matching:
self.logger.error("'%r' was matched without matching enabled", path)
elif sample_res.response_code == upload_pb2.TrackSampleResponse.UPLOAD_REQUESTED:
to_upload[sample_res.server_track_id] = (path, track, False)
else:
# there was a problem
# report the symbolic name of the response code enum for debugging
enum_desc = upload_pb2._TRACKSAMPLERESPONSE.enum_types[0]
res_name = enum_desc.values_by_number[sample_res.response_code].name
err_msg = "TrackSampleResponse code %s: %s" % (sample_res.response_code, res_name)
if res_name == 'ALREADY_EXISTS':
# include the sid, too
# this shouldn't be relied on externally, but I use it in
# tests - being surrounded by parens is how it's matched
err_msg += "(%s)" % sample_res.server_track_id
self.logger.warning("upload of '%r' rejected: %s", path, err_msg)
not_uploaded[path] = err_msg
# Send upload requests.
if to_upload:
# TODO reordering requests could avoid wasting time waiting for reup sync
self._make_call(musicmanager.UpdateUploadState, 'start', self.uploader_id)
for server_id, (path, track, do_not_rematch) in to_upload.items():
# It can take a few tries to get an session.
should_retry = True
attempts = 0
while should_retry and attempts < 10:
session = self._make_call(musicmanager.GetUploadSession,
self.uploader_id, len(uploaded),
track, path, server_id, do_not_rematch)
attempts += 1
got_session, error_details = \
musicmanager.GetUploadSession.process_session(session)
if got_session:
self.logger.info("got an upload session for '%r'", path)
break
should_retry, reason, error_code = error_details
self.logger.debug("problem getting upload session: %s\ncode=%s retrying=%s",
reason, error_code, should_retry)
if error_code == 200 and do_not_rematch:
# reupload requests need to wait on a server sync
# 200 == already uploaded, so force a retry in this case
should_retry = True
time.sleep(6) # wait before retrying
else:
err_msg = "GetUploadSession error %s: %s" % (error_code, reason)
self.logger.warning("giving up on upload session for '%r': %s", path, err_msg)
not_uploaded[path] = err_msg
continue # to next upload
# got a session, do the upload
# this terribly inconsistent naming isn't my fault: Google--
session = session['sessionStatus']
external = session['externalFieldTransfers'][0]
session_url = external['putInfo']['url']
content_type = external.get('content_type', 'audio/mpeg')
if track.original_content_type != locker_pb2.Track.MP3:
if enable_transcoding:
try:
self.logger.info("transcoding '%r' to mp3", path)
contents = utils.transcode_to_mp3(path, quality=transcode_quality)
except (IOError, ValueError) as e:
self.logger.warning("error transcoding %r: %s", path, e)
not_uploaded[path] = "transcoding error: %s" % e
continue
else:
not_uploaded[path] = "transcoding disabled"
continue
else:
with open(path, 'rb') as f:
contents = f.read()
upload_response = self._make_call(musicmanager.UploadFile,
session_url, content_type, contents)
success = upload_response.get('sessionStatus', {}).get('state')
if success:
uploaded[path] = server_id
else:
# 404 == already uploaded? serverside check on clientid?
self.logger.debug("could not finalize upload of '%r'. response: %s",
path, upload_response)
not_uploaded[path] = 'could not finalize upload; details in log'
self._make_call(musicmanager.UpdateUploadState, 'stopped', self.uploader_id)
return uploaded, matched, not_uploaded
0
Example 56
Project: Spawning Source File: spawning_controller.py
def main():
current_directory = os.path.realpath('.')
if current_directory not in sys.path:
sys.path.append(current_directory)
parser = optparse.OptionParser(description="Spawning is an easy-to-use and flexible wsgi server. It supports graceful restarting so that your site finishes serving any old requests while starting new processes to handle new requests with the new code. For the simplest usage, simply pass the dotted path to your wsgi application: 'spawn my_module.my_wsgi_app'", version=spawning.__version__)
parser.add_option('-v', '--verbose', dest='verbose', action='store_true', help='Display verbose configuration '
'information when starting up or restarting.')
parser.add_option("-f", "--factory", dest='factory', default='spawning.wsgi_factory.config_factory',
help="""Dotted path (eg mypackage.mymodule.myfunc) to a callable which takes a dictionary containing the command line arguments and figures out what needs to be done to start the wsgi application. Current valid values are: spawning.wsgi_factory.config_factory, spawning.paste_factory.config_factory, and spawning.django_factory.config_factory. The factory used determines what the required positional command line arguments will be. See the spawning.wsgi_factory module for docuementation on how to write a new factory.
""")
parser.add_option("-i", "--host",
dest='host', default=DEFAULTS['host'],
help='The local ip address to bind.')
parser.add_option("-p", "--port",
dest='port', type='int', default=DEFAULTS['port'],
help='The local port address to bind.')
parser.add_option("-s", "--processes",
dest='processes', type='int', default=DEFAULTS['num_processes'],
help='The number of unix processes to start to use for handling web i/o.')
parser.add_option("-t", "--threads",
dest='threads', type='int', default=DEFAULTS['threadpool_workers'],
help="The number of posix threads to use for handling web requests. "
"If threads is 0, do not use threads but instead use eventlet's cooperative "
"greenlet-based microthreads, monkeypatching the socket and pipe operations which normally block "
"to cooperate instead. Note that most blocking database api modules will not "
"automatically cooperate.")
parser.add_option('-d', '--daemonize', dest='daemonize', action='store_true',
help="Daemonize after starting children.")
parser.add_option('-u', '--chuid', dest='chuid', metavar="ID",
help="Change user ID in daemon mode (and group ID if given, "
"separate with colon.)")
parser.add_option('--pidfile', dest='pidfile', metavar="FILE",
help="Write own process ID to FILE in daemon mode.")
parser.add_option('--stdout', dest='stdout', metavar="FILE",
help="Redirect stdout to FILE in daemon mode.")
parser.add_option('--stderr', dest='stderr', metavar="FILE",
help="Redirect stderr to FILE in daemon mode.")
parser.add_option('-w', '--watch', dest='watch', action='append',
help="Watch the given file's modification time. If the file changes, the web server will "
'restart gracefully, allowing old requests to complete in the old processes '
'while starting new processes with the latest code or configuration.')
## TODO Hook up the svn reloader again
parser.add_option("-r", "--reload",
type='str', dest='reload',
help='If --reload=dev is passed, reload any time '
'a loaded module or configuration file changes.')
parser.add_option("--deadman", "--deadman_timeout",
type='int', dest='deadman_timeout', default=DEFAULTS['deadman_timeout'],
help='When killing an old i/o process because the code has changed, don\'t wait '
'any longer than the deadman timeout value for the process to gracefully exit. '
'If all requests have not completed by the deadman timeout, the process will be mercilessly killed.')
parser.add_option('-l', '--access-log-file', dest='access_log_file', default=None,
help='The file to log access log lines to. If not given, log to stdout. Pass /dev/null to discard logs.')
parser.add_option('-c', '--coverage', dest='coverage', action='store_true',
help='If given, gather coverage data from the running program and make the '
'coverage report available from the /_coverage url. See the figleaf docs '
'for more info: http://darcs.idyll.org/~t/projects/figleaf/doc/')
parser.add_option('--sysinfo', dest='sysinfo', action='store_true',
help='If given, gather system information data and make the '
'report available from the /_sysinfo url.')
parser.add_option('-m', '--max-memory', dest='max_memory', type='int', default=0,
help='If given, the maximum amount of memory this instance of Spawning '
'is allowed to use. If all of the processes started by this Spawning controller '
'use more than this amount of memory, send a SIGHUP to the controller '
'to get the children to restart.')
parser.add_option('--backdoor', dest='backdoor', action='store_true',
help='Start a backdoor bound to localhost:3000')
parser.add_option('-a', '--max-age', dest='max_age', type='int',
help='If given, the maximum amount of time (in seconds) an instance of spawning_child '
'is allowed to run. Once this time limit has expired the child will'
'gracefully kill itself while the server starts a replacement.')
parser.add_option('--no-keepalive', dest='no_keepalive', action='store_true',
help='Disable HTTP/1.1 KeepAlive')
parser.add_option('-z', '--z-restart-args', dest='restart_args',
help='For internal use only')
parser.add_option('--status-port', dest='status_port', type='int', default=0,
help='If given, hosts a server status page at that port. Two pages are served: a human-readable HTML version at http://host:status_port/status, and a machine-readable version at http://host:status_port/status.json')
parser.add_option('--status-host', dest='status_host', type='string', default='',
help='If given, binds the server status page to the specified local ip address. Defaults to the same value as --host. If --status-port is not supplied, the status page will not be activated.')
options, positional_args = parser.parse_args()
if len(positional_args) < 1 and not options.restart_args:
parser.error("At least one argument is required. "
"For the default factory, it is the dotted path to the wsgi application "
"(eg my_package.my_module.my_wsgi_application). For the paste factory, it "
"is the ini file to load. Pass --help for detailed information about available options.")
if options.backdoor:
try:
eventlet.spawn(eventlet.backdoor.backdoor_server, eventlet.listen(('localhost', 3000)))
except Exception, ex:
sys.stderr.write('**> Error opening backdoor: %s\n' % ex)
sock = None
if options.restart_args:
restart_args = json.loads(options.restart_args)
factory = restart_args['factory']
factory_args = restart_args['factory_args']
start_delay = restart_args.get('start_delay')
if start_delay is not None:
factory_args['start_delay'] = start_delay
print "(%s) delaying startup by %s" % (os.getpid(), start_delay)
time.sleep(start_delay)
fd = restart_args.get('fd')
if fd is not None:
sock = socket.fromfd(restart_args['fd'], socket.AF_INET, socket.SOCK_STREAM)
## socket.fromfd doesn't result in a socket object that has the same fd.
## The old fd is still open however, so we close it so we don't leak.
os.close(restart_args['fd'])
return start_controller(sock, factory, factory_args)
## We're starting up for the first time.
if options.daemonize:
# Do the daemon dance. Note that this isn't what is considered good
# daemonization, because frankly it's convenient to keep the file
# descriptiors open (especially when there are prints scattered all
# over the codebase.)
# What we do instead is fork off, create a new session, fork again.
# This leaves the process group in a state without a session
# leader.
pid = os.fork()
if not pid:
os.setsid()
pid = os.fork()
if pid:
os._exit(0)
else:
os._exit(0)
print "(%s) now daemonized" % (os.getpid(),)
# Close _all_ open (and othewise!) files.
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = 4096
for fdnum in xrange(maxfd):
try:
os.close(fdnum)
except OSError, e:
if e.errno != errno.EBADF:
raise
# Remap std{in,out,err}
devnull = os.open(os.path.devnull, os.O_RDWR)
oflags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
if devnull != 0: # stdin
os.dup2(devnull, 0)
if options.stdout:
stdout_fd = os.open(options.stdout, oflags)
if stdout_fd != 1:
os.dup2(stdout_fd, 1)
os.close(stdout_fd)
else:
os.dup2(devnull, 1)
if options.stderr:
stderr_fd = os.open(options.stderr, oflags)
if stderr_fd != 2:
os.dup2(stderr_fd, 2)
os.close(stderr_fd)
else:
os.dup2(devnull, 2)
# Change user & group ID.
if options.chuid:
user, group = set_process_owner(options.chuid)
print "(%s) set user=%s group=%s" % (os.getpid(), user, group)
else:
# Become a process group leader only if not daemonizing.
os.setpgrp()
## Fork off the thing that watches memory for this process group.
controller_pid = os.getpid()
if options.max_memory and not os.fork():
env = environ()
from spawning import memory_watcher
basedir, cmdname = os.path.split(memory_watcher.__file__)
if cmdname.endswith('.pyc'):
cmdname = cmdname[:-1]
os.chdir(basedir)
command = [
sys.executable,
cmdname,
'--max-age', str(options.max_age),
str(controller_pid),
str(options.max_memory)]
os.execve(sys.executable, command, env)
factory = options.factory
# If you tell me to watch something, I'm going to reload then
if options.watch:
options.reload = True
if options.status_port == options.port:
options.status_port = None
sys.stderr.write('**> Status port cannot be the same as the service port, disabling status.\n')
factory_args = {
'verbose': options.verbose,
'host': options.host,
'port': options.port,
'num_processes': options.processes,
'threadpool_workers': options.threads,
'watch': options.watch,
'reload': options.reload,
'deadman_timeout': options.deadman_timeout,
'access_log_file': options.access_log_file,
'pidfile': options.pidfile,
'coverage': options.coverage,
'sysinfo': options.sysinfo,
'no_keepalive' : options.no_keepalive,
'max_age' : options.max_age,
'argv_str': " ".join(sys.argv[1:]),
'args': positional_args,
'status_port': options.status_port,
'status_host': options.status_host or options.host
}
start_controller(sock, factory, factory_args)
0
Example 57
Project: amun Source File: amun_request_handler.py
def collect_incoming_data(self, data):
try:
### proxy/mirror
### TODO: move to extra function to avoid redundancy
if self.enableProxy and self.proxyMode:
self.proxyShellcode.append(data)
self.proxyRequests.append(data)
self.sendRequest = "".join(data)
bytesTosend = len(self.sendRequest)
if bytesTosend==0:
shellcodeSet = {}
shellcodeSet['vulnname'] = "ProxyCapture"
shellcodeSet['shellcode'] = "".join(self.proxyShellcode)
self.proxyResult = self.shellcode_manager.start_matching( shellcodeSet, self.remote_ip, self.own_ip, self.own_port, self.replace_locals, False )
### to XML generator
print self.proxyRequests
print self.proxyReplies
if not self.proxyTimedOut:
try:
self.log_obj.log("sending data to proxy host %s (%s)" % (self.proxytoIP, bytesTosend), 6, "debug", False, True)
while bytesTosend>0:
bytes_send = self.origin_socket.send(self.sendRequest)
bytesTosend = bytesTosend - bytes_send
self.sendRequest = self.sendRequest[bytes_send:]
except socket.error, e:
self.log_obj.log("sending to proxy/remote host failed %s" % (self.proxytoIP), 6, "crit", False, True)
pass
while True:
try:
self.log_obj.log("waiting for data from proxy host %s" % (self.proxytoIP), 6, "debug", False, True)
self.origin_socket.settimeout(2.0)
self.out_buffer = self.origin_socket.recv(1024)
self.log_obj.log("received %s bytes from proxy" % (len(self.out_buffer)), 6, "debug", False, True)
self.proxyReplies.append(self.out_buffer)
if len(self.out_buffer)<1024 and len(self.out_buffer)!=0:
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
break
elif self.out_buffer=='':
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
break
elif len(self.out_buffer)==0:
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
break
else:
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
time.sleep(.0001)
except socket.error, e:
if e[0]==11:
pass
else:
self.proxyTimedOut = True
break
except KeyboardInterrupt:
raise
self.out_buffer = ""
self.sendRequest = ""
event_item = None
if self.currentConnections.has_key(self.identifier):
### existing connection
vuln_modulList = self.get_existing_connection()
else:
### create new connection
vuln_modulList = self.set_existing_connection()
### set initial state
state ="amun_not_set"
### handle vulnerabilities
if self.proxyResult == None:
(result,state) = self.handle_vulnerabilities(data, vuln_modulList)
### update connection entry
self.update_existing_connection(vuln_modulList)
self.set_new_socket_connection()
else:
result = shellcodeSet
result['vuln_modul'] = "ProxyCapture"
result['shellresult'] = self.proxyResult
### check for shellcode and start download manager
if result['shellresult']!="None":
for resEntry in result['shellresult']:
if resEntry['result']:
for key in vuln_modulList.keys():
del vuln_modulList[key]
### create exploit event
event_item = (self.remote_ip,
self.remote_port,
self.own_ip,
self.own_port,
result['vuln_modul'],
int(time.time()),
resEntry)
if not self.event_dict['exploit'].has_key(self.identifier):
self.event_dict['exploit'][self.identifier] = event_item
### attach to download list
self.handle_download(resEntry)
### attach to successful exploit list
if self.blocksucexpl == 1:
item_id = str(self.remote_ip)
self.event_dict['sucexpl_connections'][item_id] = int(time.time())
try:
self.socket_object.send("\r\n")
except socket.error, e:
pass
self.delete_existing_connection()
try:
self.shutdown(socket.SHUT_RDWR)
except:
pass
self.connected = False
self.close()
return
else:
### failed to determine shellcode
for key in vuln_modulList.keys():
del vuln_modulList[key]
### create failed exploit event
event_item = (self.remote_ip,
self.remote_port,
self.own_ip,
self.own_port,
result['vuln_modul'],
int(time.time()),
resEntry)
if not self.event_dict['exploit'].has_key(self.identifier):
self.event_dict['exploit'][self.identifier] = event_item
### attach to successful exploit list
if self.blocksucexpl == 1:
item_id = str(self.remote_ip)
self.event_dict['sucexpl_connections'][item_id] = int(time.time())
try:
self.socket_object.send("\r\n")
except socket.error, e:
pass
self.delete_existing_connection()
try:
self.shutdown(socket.SHUT_RDWR)
except:
pass
self.connected = False
self.close()
return
### check replies and take the first
try:
if len(result['replies'])>0:
reply_message = result['replies'][0]
### calc reply message length
bytesTosend = len(reply_message)
try:
while bytesTosend>0:
bytes_send = self.socket_object.send(reply_message)
bytesTosend = bytesTosend - bytes_send
except socket.error, e:
### client gone
self.delete_existing_connection()
try:
self.shutdown(socket.SHUT_RDWR)
except:
pass
self.connected = False
self.close()
return
except:
pass
### TODO: proxy unknown attack to high-interaction honeypot
### Problem: can only be done if no previous stage was used, i.e. still at STAGE1
### possible solution: record network traffic and replay against Proxy (needs memory/disc space)
### TODO: vuln-proxy modul to register ports for monitoring and proxying to other system
### TODO: read and store network traffic and generate vulnerability from this data
### TODO: every request needs to be run thru the shellcode manager and if shellcode detected then vulnerability modul is finished
### requires: shellcode and vulnerability name in dict vulnResult
try:
if len(result['stage_list'])>0:
for entry in result['stage_list']:
if not entry.endswith('STAGE1'):
self.enableProxy = False
break
except:
pass
if self.enableProxy and not self.proxyMode and len(vuln_modulList)<=0 and len(data)>=0 and state!="amun_stage_finished":
### enable proxy state
self.proxyMode = True
### check for proxyState earlier
### FIXME: configuration: mirror, proxy, none
### open socket to honeypot system or remote attacker
proxyConnResult = self.setup_remote_connection(self.remote_ip)
if not proxyConnResult:
return
self.log_obj.log("no module switching to proxy mode %s<->%s<->%s" % (self.remote_ip, self.own_ip, self.proxytoIP), 6, "debug", False, True)
### transmit data to proxy modul
self.sendRequest = "".join(data)
bytesTosend = len(self.sendRequest)
self.proxyRequests.append(data)
try:
self.log_obj.log("sending initial data to proxy host %s" % (self.proxytoIP), 6, "debug", False, True)
self.log_obj.log("sending %s bytes" % (bytesTosend), 6, "debug", False, True)
while bytesTosend>0:
bytes_send = self.origin_socket.send(self.sendRequest)
bytesTosend = bytesTosend - bytes_send
self.sendRequest = self.sendRequest[bytes_send:]
except:
self.log_obj.log("sending to proxy/remote host failed %s" % (self.proxytoIP), 6, "crit", False, True)
pass
while True:
try:
self.log_obj.log("waiting for proxy reply %s" % (self.proxytoIP), 6, "debug", False, True)
self.origin_socket.settimeout(2.0)
self.out_buffer = self.origin_socket.recv(1024)
self.log_obj.log("received %s bytes from proxy" % (len(self.out_buffer)), 6, "debug", False, True)
self.proxyReplies.append(self.out_buffer)
if len(self.out_buffer)<1024 and len(self.out_buffer)!=0:
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("1 sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
break
elif self.out_buffer=='':
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("2 sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
break
elif len(self.out_buffer)==0:
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("3 sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
break
else:
try:
bytesTosend = len(self.out_buffer)
while bytesTosend>0:
bytes_send = self.socket_object.send(self.out_buffer)
bytesTosend = bytesTosend - bytes_send
self.out_buffer = self.out_buffer[bytes_send:]
self.log_obj.log("4 sending data to attacker %s (%s)" % (self.remote_ip, bytes_send), 8, "debug", False, True)
except:
break
time.sleep(.0001)
except socket.error, e:
if e[0]==11:
pass
else:
self.proxyTimedOut = True
break
except KeyboardInterrupt:
raise
self.out_buffer = ""
self.sendRequest = ""
### connection finished but modules left
if len(vuln_modulList)>0 and len(data)<=0:
for key in vuln_modulList.keys():
modul = vuln_modulList[key]
self.log_obj.log("%s leaving communication (stage: %s bytes: %s)"\
% (modul.getVulnName(),modul.getCurrentStage(),len(data)), 6, "debug", False, True)
result['stage_list'].append(modul.getCurrentStage())
del vuln_modulList[key]
if self.event_dict['initial_connections'].has_key(self.identifier):
del self.event_dict['initial_connections'][self.identifier]
### modules left?
if len(vuln_modulList)<=0 and not self.proxyMode:
if self.verboseLogging:
self.log_obj.log("no vulnerability modul left (%s) -> closing connection" % (self.own_port), 6, "debug", False, True)
if not event_item and len(data)>0 and state!="amun_stage_finished":
self.log_obj.log("unknown vuln (Attacker: %s Port: %s, Mess: %s (%i) Stages: %s)" % (self.remote_ip, self.own_port, [data], len(data), result['stage_list']), 6, "crit", True, False)
elif not event_item and len(data)>0:
self.log_obj.log("incomplete vuln (Attacker: %s Port: %s, Mess: %s (%i) Stages: %s)" % (self.remote_ip, self.own_port, [data], len(data), result['stage_list']), 6, "crit", True, False)
elif not event_item and len(data)==0 and state!="amun_stage_finished":
self.log_obj.log("PortScan Detected on Port: %s (%s)" % (self.own_port, self.remote_ip), 6, "div", True, False)
pass
try:
self.socket_object.send("\r\n")
except socket.error, e:
pass
self.delete_existing_connection()
try:
self.shutdown(socket.SHUT_RDWR)
except:
pass
if self.event_dict['initial_connections'].has_key(self.identifier):
del self.event_dict['initial_connections'][self.identifier]
self.connected = False
self.close()
return
except KeyboardInterrupt:
raise
0
Example 58
Project: pyrocore Source File: rtcontrol.py
def mainloop(self):
""" The main loop.
"""
# Print field definitions?
if self.options.help_fields:
self.parser.print_help()
print_help_fields()
sys.exit(1)
# Print usage if no conditions are provided
if not self.args:
self.parser.error("No filter conditions given!")
# Check special action options
actions = []
if self.options.ignore:
actions.append(Bunch(name="ignore", method="ignore", label="IGNORE" if int(self.options.ignore) else "HEED",
help="commands on torrent", interactive=False, args=(self.options.ignore,)))
if self.options.prio:
actions.append(Bunch(name="prio", method="set_prio", label="PRIO" + str(self.options.prio),
help="for torrent", interactive=False, args=(self.options.prio,)))
# Check standard action options
# TODO: Allow certain combinations of actions (like --tag foo --stop etc.)
# How do we get a sensible order of execution?
for action_mode in self.ACTION_MODES:
if getattr(self.options, action_mode.name):
if actions:
self.parser.error("Options --%s and --%s are mutually exclusive" % (
", --".join(i.name.replace('_', '-') for i in actions),
action_mode.name.replace('_', '-'),
))
if action_mode.argshelp:
action_mode.args = (getattr(self.options, action_mode.name),)
actions.append(action_mode)
if not actions and self.options.flush:
actions.append(Bunch(name="flush", method="flush", label="FLUSH",
help="flush session data", interactive=False, args=()))
self.options.flush = False # No need to flush twice
if any(i.interactive for i in actions):
self.options.interactive = True
# Reduce results according to index range
selection = None
if self.options.select:
try:
if '-' in self.options.select:
selection = tuple(int(i or default, 10) for i, default in
zip(self.options.select.split('-', 1), ("1", "-1")))
else:
selection = 1, int(self.options.select, 10)
except (ValueError, TypeError), exc:
self.fatal("Bad selection '%s' (%s)" % (self.options.select, exc))
# print repr(config.engine)
# config.engine.open()
# print repr(config.engine)
# Preparation steps
raw_output_format = self.options.output_format
default_output_format = "default"
if actions:
default_output_format = "action_cron" if self.options.cron else "action"
self.validate_output_format(default_output_format)
sort_key = self.validate_sort_fields()
matcher = matching.ConditionParser(engine.FieldDefinition.lookup, "name").parse(self.args)
self.LOG.debug("Matcher is: %s" % matcher)
# Detach to background?
# This MUST happen before the next step, when we connect to the torrent client
if self.options.detach:
config.engine.load_config()
osmagic.daemonize(logfile=config.log_execute)
time.sleep(.05) # let things settle a little
# View handling
if self.options.modify_view:
if self.options.from_view or self.options.to_view:
self.fatal("You cannot combine --modify-view with --from-view or --to-view")
self.options.from_view = self.options.to_view = self.options.modify_view
# Find matching torrents
# TODO: this could get speedier quite a bit when we pre-select
# a subset of all items in rtorrent itself, via a dynamic view!
# Or sort them just the right way, and then abort after we cannot
# possibly find more matches.
#
view = config.engine.view(self.options.from_view, matcher)
matches = list(view.items())
orig_matches = matches[:]
matches.sort(key=sort_key, reverse=self.options.reverse_sort)
for mode in self.options.anneal:
if self.anneal(mode, matches, orig_matches):
matches.sort(key=sort_key, reverse=self.options.reverse_sort)
if selection:
matches = matches[selection[0]-1:selection[1]]
if not matches:
# Think "404 NOT FOUND", but then exit codes should be < 256
self.return_code = 44
# Build header stencil
stencil = None
if self.options.column_headers and self.plain_output_format and matches:
stencil = fmt.to_console(formatting.format_item(
self.options.output_format, matches[0], self.FORMATTER_DEFAULTS)).split('\t')
# Tee to ncurses view, if requested
if self.options.tee_view and (self.options.to_view or self.options.view_only):
self.show_in_view(view, matches)
# Generate summary?
summary = FieldStatistics(len(matches))
if self.options.stats or self.options.summary:
for field in self.get_output_fields():
try:
0 + getattr(matches[0], field)
except (TypeError, ValueError, IndexError):
summary.total[field] = ''
else:
for item in matches:
summary.add(field, getattr(item, field))
def output_formatter(templ, ns=None):
"Output formatting helper"
full_ns = dict(
version = self.version,
proxy = config.engine.open(),
view = view,
query = matcher,
matches = matches,
summary = summary
)
full_ns.update(ns or {})
return formatting.expand_template(templ, full_ns)
# Execute action?
if actions:
action = actions[0] # TODO: loop over it
self.LOG.log(logging.DEBUG if self.options.cron else logging.INFO, "%s %s %d out of %d torrents." % (
"Would" if self.options.dry_run else "About to", action.label, len(matches), view.size(),
))
defaults = {"action": action.label}
defaults.update(self.FORMATTER_DEFAULTS)
if self.options.column_headers and matches:
self.emit(None, stencil=stencil)
# Perform chosen action on matches
template_args = [formatting.preparse("{{#tempita}}" + i if "{{" in i else i) for i in action.args]
for item in matches:
if not self.prompt.ask_bool("%s item %s" % (action.label, item.name)):
continue
if self.options.output_format and str(self.options.output_format) != "-":
self.emit(item, defaults, to_log=self.options.cron)
args = tuple([output_formatter(i, ns=dict(item=item)) for i in template_args])
if self.options.dry_run:
if self.options.debug:
self.LOG.debug("Would call action %s(*%r)" % (action.method, args))
else:
getattr(item, action.method)(*args)
if self.options.flush:
item.flush()
# Show in ncurses UI?
elif not self.options.tee_view and (self.options.to_view or self.options.view_only):
self.show_in_view(view, matches)
# Execute OS commands?
elif self.options.call or self.options.spawn:
if self.options.call and self.options.spawn:
self.fatal("You cannot mix --call and --spawn")
template_cmds = []
if self.options.call:
template_cmds.append([formatting.preparse("{{#tempita}}" + self.options.call)])
else:
for cmd in self.options.spawn:
template_cmds.append([formatting.preparse("{{#tempita}}" + i if "{{" in i else i)
for i in shlex.split(str(cmd))])
for item in matches:
cmds = [[output_formatter(i, ns=dict(item=item)) for i in k] for k in template_cmds]
cmds = [[i.encode('utf-8') if isinstance(i, unicode) else i for i in k] for k in cmds]
if self.options.dry_run:
self.LOG.info("Would call command(s) %r" % (cmds,))
else:
for cmd in cmds:
if self.options.call:
logged_cmd = cmd[0]
else:
logged_cmd = '"%s"' % ('" "'.join(cmd),)
if self.options.verbose:
self.LOG.info("Calling: %s" % (logged_cmd,))
try:
if self.options.call:
subprocess.check_call(cmd[0], shell=True)
else:
subprocess.check_call(cmd)
except subprocess.CalledProcessError, exc:
raise error.UserError("Command failed: %s" % (exc,))
except OSError, exc:
raise error.UserError("Command failed (%s): %s" % (logged_cmd, exc,))
# Dump as JSON array?
elif self.options.json:
json_data = matches
if raw_output_format:
json_fields = raw_output_format.split(',')
json_data = [dict([(name, getattr(i, name)) for name in json_fields])
for i in matches]
json.dump(json_data, sys.stdout, indent=2, separators=(',', ': '),
sort_keys=True, cls=pymagic.JSONEncoder)
sys.stdout.write('\n')
sys.stdout.flush()
# Show via template?
elif self.options.output_template:
output_template = self.options.output_template
if not output_template.startswith("file:"):
output_template = "file:" + output_template
sys.stdout.write(output_formatter(output_template))
sys.stdout.flush()
# Show on console?
elif self.options.output_format and str(self.options.output_format) != "-":
if not self.options.summary:
line_count = 0
for item in matches:
# Emit a header line every 'output_header_frequency' lines
if self.options.column_headers and line_count % config.output_header_frequency == 0:
self.emit(None, stencil=stencil)
# Print matching item
line_count += self.emit(item, self.FORMATTER_DEFAULTS)
# Print summary?
if matches and summary:
self.emit(None, stencil=stencil,
#item_formatter=None if self.options.summary
# # TODO: this can be done better!
# else lambda i: re.sub("[^ \t]", "=", i)
# #else lambda i: re.sub("=[ \t]+", lambda k: "=" * len(k.group(0)) + k.group(0)[-1], re.sub("[^ \t]", "=", i))
)
self.emit(summary.total, item_formatter=lambda i: i.rstrip() + " [SUM of %d item(s)]" % len(matches))
self.emit(summary.min, item_formatter=lambda i: i.rstrip() + " [MIN of %d item(s)]" % len(matches))
self.emit(summary.average, item_formatter=lambda i: i.rstrip() + " [AVG of %d item(s)]" % len(matches))
self.emit(summary.max, item_formatter=lambda i: i.rstrip() + " [MAX of %d item(s)]" % len(matches))
self.LOG.info("Dumped %d out of %d torrents." % (len(matches), view.size(),))
else:
self.LOG.info("Filtered %d out of %d torrents." % (len(matches), view.size(),))
if self.options.debug and 0:
print '\n' + repr(matches[0])
print '\n' + repr(matches[0].files)
# XMLRPC stats
self.LOG.debug("XMLRPC stats: %s" % config.engine._rpc)
0
Example 59
Project: nzbToMedia Source File: autoProcessTV.py
def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None):
cfg = dict(core.CFG[section][inputCategory])
host = cfg["host"]
port = cfg["port"]
ssl = int(cfg.get("ssl", 0))
web_root = cfg.get("web_root", "")
protocol = "https://" if ssl else "http://"
if not server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)):
logger.error("Server did not respond. Exiting", section)
return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
# auto-detect correct fork
fork, fork_params = autoFork(section, inputCategory)
username = cfg.get("username", "")
password = cfg.get("password", "")
apikey = cfg.get("apikey", "")
delete_failed = int(cfg.get("delete_failed", 0))
nzbExtractionBy = cfg.get("nzbExtractionBy", "Downloader")
process_method = cfg.get("process_method")
remote_path = int(cfg.get("remote_path", 0))
wait_for = int(cfg.get("wait_for", 2))
force = int(cfg.get("force", 0))
delete_on = int(cfg.get("delete_on", 0))
ignore_subs = int(cfg.get("ignore_subs", 0))
extract = int(cfg.get("extract", 0))
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0]
SpecificPath = os.path.join(dirName, str(inputName))
cleanName = os.path.splitext(SpecificPath)
if cleanName[1] == ".nzb":
SpecificPath = cleanName[0]
if os.path.isdir(SpecificPath):
dirName = SpecificPath
# Attempt to create the directory if it doesn't exist and ignore any
# error stating that it already exists. This fixes a bug where SickRage
# won't process the directory because it doesn't exist.
try:
os.makedirs(dirName) # Attempt to create the directory
except OSError as e:
# Re-raise the error if it wasn't about the directory not existing
if e.errno != errno.EEXIST:
raise
if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"):
if inputName:
process_all_exceptions(inputName, dirName)
inputName, dirName = convert_to_ascii(inputName, dirName)
# Now check if tv files exist in destination.
if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
if listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: {0}'.format(dirName))
core.extractFiles(dirName)
inputName, dirName = convert_to_ascii(inputName, dirName)
if listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed.
flatten(dirName)
# Check video files for corruption
status = int(failed)
good_files = 0
num_files = 0
for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
num_files += 1
if transcoder.isVideoGood(video, status):
good_files += 1
import_subs(video)
if num_files > 0:
if good_files == num_files and not status == 0:
logger.info('Found Valid Videos. Setting status Success')
status = 0
failed = 0
if good_files < num_files and status == 0:
logger.info('Found corrupt videos. Setting status Failed')
status = 1
failed = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD')
if failureLink:
failureLink += '&corrupt=true'
elif clientAgent == "manual":
logger.warning("No media files found in directory {0} to manually process.".format(dirName), section)
return [0, ""] # Success (as far as this script is concerned)
elif nzbExtractionBy == "Destination":
logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.")
if int(failed) == 0:
logger.info("Setting Status Success.")
status = 0
failed = 0
else:
logger.info("Downloader reported an error during download or verification. Processing this as a failed download.")
status = 1
failed = 1
else:
logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section)
status = 1
failed = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD')
if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads
result, newDirName = transcoder.Transcode_directory(dirName)
if result == 0:
logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section)
dirName = newDirName
chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8)
logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section)
if chmod_directory:
logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section)
core.rchmod(dirName, chmod_directory)
else:
logger.error("FAILED: Transcoding failed for files in {0}".format(dirName), section)
return [1, "{0}: Failed to post-process - Transcoding failed".format(section)]
# configure SB params to pass
fork_params['quiet'] = 1
fork_params['proc_type'] = 'manual'
if inputName is not None:
fork_params['nzbName'] = inputName
for param in copy.copy(fork_params):
if param == "failed":
fork_params[param] = failed
if param in ["dirName", "dir", "proc_dir"]:
fork_params[param] = dirName
if remote_path:
fork_params[param] = remoteDir(dirName)
if param == "process_method":
if process_method:
fork_params[param] = process_method
else:
del fork_params[param]
if param == "force":
if force:
fork_params[param] = force
else:
del fork_params[param]
if param == "delete_on":
if delete_on:
fork_params[param] = delete_on
else:
del fork_params[param]
if param == "ignore_subs":
if ignore_subs:
fork_params[param] = ignore_subs
else:
del fork_params[param]
# delete any unused params so we don't pass them to SB by mistake
[fork_params.pop(k) for k, v in fork_params.items() if v is None]
if status == 0:
logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section)
else:
core.FAILED = True
if failureLink:
reportNzb(failureLink, clientAgent)
if 'failed' in fork_params:
logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section)
elif section == "NzbDrone":
logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(fork), section)
return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader.
else:
logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section)
if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName)
return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader.
url = None
if section == "SickBeard":
url = "{0}{1}:{2}{3}/home/postprocess/processEpisode".format(protocol, host, port, web_root)
elif section == "NzbDrone":
url = "{0}{1}:{2}{3}/api/command".format(protocol, host, port, web_root)
url2 = "{0}{1}:{2}{3}/api/config/downloadClient".format(protocol, host, port, web_root)
headers = {"X-Api-Key": apikey}
# params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'}
if remote_path:
logger.debug("remote_path: {0}".format(remoteDir(dirName)), section)
data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id}
else:
logger.debug("path: {0}".format(dirName), section)
data = {"name": "DownloadedEpisodesScan", "path": dirName, "downloadClientId": download_id}
if not download_id:
data.pop("downloadClientId")
data = json.dumps(data)
try:
if section == "SickBeard":
logger.debug("Opening URL: {0} with params: {1}".format(url, fork_params), section)
s = requests.Session()
login = "{0}{1}:{2}{3}/login".format(protocol, host, port, web_root)
login_params = {'username': username, 'password': password}
s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60))
r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800))
elif section == "NzbDrone":
logger.debug("Opening URL: {0} with data: {1}".format(url, data), section)
r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error("Unable to open URL: {0}".format(url), section)
return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
Success = False
Started = False
if section == "SickBeard":
for line in r.iter_lines():
if line:
logger.postprocess("{0}".format(line), section)
if "Moving file from" in line:
inputName = os.path.split(line)[1]
if "Processing succeeded" in line or "Successfully processed" in line:
Success = True
elif section == "NzbDrone":
try:
res = json.loads(r.content)
scan_id = int(res['id'])
logger.debug("Scan started with id: {0}".format(scan_id), section)
Started = True
except Exception as e:
logger.warning("No scan id was returned due to: {0}".format(e), section)
scan_id = None
Started = False
if status != 0 and delete_failed and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName)
if Success:
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif section == "NzbDrone" and Started:
n = 0
params = {}
url = "{0}/{1}".format(url, scan_id)
while n < 6: # set up wait_for minutes to see if command completes..
time.sleep(10 * wait_for)
command_status = self.command_complete(url, params, headers, section)
if command_status and command_status in ['completed', 'failed']:
break
n += 1
if command_status:
logger.debug("The Scan command return status: {0}".format(command_status), section)
if not os.path.exists(dirName):
logger.debug("The directory {0} has been removed. Renaming was successful.".format(dirName), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif command_status and command_status in ['completed']:
logger.debug("The Scan command has completed successfully. Renaming was successful.", section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif command_status and command_status in ['failed']:
logger.debug("The Scan command has failed. Renaming was not successful.", section)
# return [1, "%s: Failed to post-process %s" % (section, inputName) ]
if self.CDH(url2, headers, section=section):
logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section)
return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)]
else:
logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section)
return [1, "{0}: Failed to post-process {1}".format(section, inputName)]
else:
return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] # We did not receive Success confirmation.
0
Example 60
Project: aws-lambda-ddns-function Source File: union.py
def lambda_handler(event, context):
""" Check to see whether a DynamoDB table already exists. If not, create it. This table is used to keep a record of
instances that have been created along with their attributes. This is necessary because when you terminate an instance
its attributes are no longer available, so they have to be fetched from the table."""
tables = dynamodb_client.list_tables()
if 'DDNS' in tables['TableNames']:
print 'DynamoDB table already exists'
else:
create_table('DDNS')
# Set variables
# Get the state from the Event stream
state = event['detail']['state']
# Get the instance id, region, and tag collection
instance_id = event['detail']['instance-id']
region = event['region']
table = dynamodb_resource.Table('DDNS')
if state == 'running':
time.sleep(60)
instance = compute.describe_instances(InstanceIds=[instance_id])
# Remove response metadata from the response
instance.pop('ResponseMetadata')
# Remove null values from the response. You cannot save a dict/JSON docuement in DynamoDB if it contains null
# values
instance = remove_empty_from_dict(instance)
instance_dump = json.dumps(instance,default=json_serial)
instance_attributes = json.loads(instance_dump)
table.put_item(
Item={
'InstanceId': instance_id,
'InstanceAttributes': instance_attributes
}
)
else:
# Fetch item from DynamoDB
instance = table.get_item(
Key={
'InstanceId': instance_id
},
AttributesToGet=[
'InstanceAttributes'
]
)
instance = instance['Item']['InstanceAttributes']
try:
tags = instance['Reservations'][0]['Instances'][0]['Tags']
except:
tags = []
# Get instance attributes
private_ip = instance['Reservations'][0]['Instances'][0]['PrivateIpAddress']
private_dns_name = instance['Reservations'][0]['Instances'][0]['PrivateDnsName']
private_host_name = private_dns_name.split('.')[0]
try:
public_ip = instance['Reservations'][0]['Instances'][0]['PublicIpAddress']
public_dns_name = instance['Reservations'][0]['Instances'][0]['PublicDnsName']
public_host_name = public_dns_name.split('.')[0]
except BaseException as e:
print 'Instance has no public IP or host name', e
# Get the subnet mask of the instance
subnet_id = instance['Reservations'][0]['Instances'][0]['SubnetId']
subnet = ec2.Subnet(subnet_id)
cidr_block = subnet.cidr_block
subnet_mask = int(cidr_block.split('/')[-1])
reversed_ip_address = reverse_list(private_ip)
reversed_domain_prefix = get_reversed_domain_prefix(subnet_mask, private_ip)
reversed_domain_prefix = reverse_list(reversed_domain_prefix)
# Set the reverse lookup zone
reversed_lookup_zone = reversed_domain_prefix + 'in-addr.arpa.'
print 'The reverse lookup zone for this instance is:', reversed_lookup_zone
# Get VPC id
vpc_id = instance['Reservations'][0]['Instances'][0]['VpcId']
vpc = ec2.Vpc(vpc_id)
# Are DNS Hostnames and DNS Support enabled?
if is_dns_hostnames_enabled(vpc):
print 'DNS hostnames enabled for %s' % vpc_id
else:
print 'DNS hostnames disabled for %s. You have to enable DNS hostnames to use Route 53 private hosted zones.' % vpc_id
if is_dns_support_enabled(vpc):
print 'DNS support enabled for %s' % vpc_id
else:
print 'DNS support disabled for %s. You have to enabled DNS support to use Route 53 private hosted zones.' % vpc_id
# Create the public and private hosted zone collections. These are collections of zones in Route 53.
hosted_zones = route53.list_hosted_zones()
private_hosted_zones = filter(lambda x: x['Config']['PrivateZone'] is True, hosted_zones['HostedZones'])
private_hosted_zone_collection = map(lambda x: x['Name'], private_hosted_zones)
public_hosted_zones = filter(lambda x: x['Config']['PrivateZone'] is False, hosted_zones['HostedZones'])
public_hosted_zones_collection = map(lambda x: x['Name'], public_hosted_zones)
# Check to see whether a reverse lookup zone for the instance already exists. If it does, check to see whether
# the reverse lookup zone is associated with the instance's VPC. If it isn't create the association. You don't
# need to do this when you create the reverse lookup zone because the association is done automatically.
if filter(lambda record: record['Name'] == reversed_lookup_zone, hosted_zones['HostedZones']):
print 'Reverse lookup zone found:', reversed_lookup_zone
reverse_lookup_zone_id = get_zone_id(reversed_lookup_zone)
reverse_hosted_zone_properties = get_hosted_zone_properties(reverse_lookup_zone_id)
if vpc_id in map(lambda x: x['VPCId'], reverse_hosted_zone_properties['VPCs']):
print 'Reverse lookup zone %s is associated with VPC %s' % (reverse_lookup_zone_id, vpc_id)
else:
print 'Associating zone %s with VPC %s' % (reverse_lookup_zone_id, vpc_id)
try:
associate_zone(reverse_lookup_zone_id, region, vpc_id)
except BaseException as e:
print e
else:
print 'No matching reverse lookup zone'
# create private hosted zone for reverse lookups
if state == 'running':
create_reverse_lookup_zone(instance, reversed_domain_prefix, region)
reverse_lookup_zone_id = get_zone_id(reversed_lookup_zone)
# Wait a random amount of time. This is a poor-mans back-off if a lot of instances are launched all at once.
time.sleep(random.random())
# Loop through the instance's tags, looking for the zone and cname tags. If either of these tags exist, check
# to make sure that the name is valid. If it is and if there's a matching zone in DNS, create A and PTR records.
for tag in tags:
if 'ZONE' in tag.get('Key',{}).lstrip().upper():
if is_valid_hostname(tag.get('Value')):
if tag.get('Value').lstrip().lower() in private_hosted_zone_collection:
print 'Private zone found:', tag.get('Value')
private_hosted_zone_name = tag.get('Value').lstrip().lower()
private_hosted_zone_id = get_zone_id(private_hosted_zone_name)
private_hosted_zone_properties = get_hosted_zone_properties(private_hosted_zone_id)
if state == 'running':
if vpc_id in map(lambda x: x['VPCId'], private_hosted_zone_properties['VPCs']):
print 'Private hosted zone %s is associated with VPC %s' % (private_hosted_zone_id, vpc_id)
else:
print 'Associating zone %s with VPC %s' % (private_hosted_zone_id, vpc_id)
try:
associate_zone(private_hosted_zone_id, region, vpc_id)
except BaseException as e:
print 'You cannot create an association with a VPC with an overlapping subdomain.\n', e
exit()
try:
create_resource_record(private_hosted_zone_id, private_host_name, private_hosted_zone_name, 'A', private_ip)
create_resource_record(reverse_lookup_zone_id, reversed_ip_address, 'in-addr.arpa', 'PTR', private_dns_name)
except BaseException as e:
print e
else:
try:
delete_resource_record(private_hosted_zone_id, private_host_name, private_hosted_zone_name, 'A', private_ip)
delete_resource_record(reverse_lookup_zone_id, reversed_ip_address, 'in-addr.arpa', 'PTR', private_dns_name)
except BaseException as e:
print e
# create PTR record
elif tag.get('Value').lstrip().lower() in public_hosted_zones_collection:
print 'Public zone found', tag.get('Value')
public_hosted_zone_name = tag.get('Value').lstrip().lower()
public_hosted_zone_id = get_zone_id(public_hosted_zone_name)
# create A record in public zone
if state =='running':
try:
create_resource_record(public_hosted_zone_id, public_host_name, public_hosted_zone_name, 'A', public_ip)
except BaseException as e:
print e
else:
try:
delete_resource_record(public_hosted_zone_id, public_host_name, public_hosted_zone_name, 'A', public_ip)
except BaseException as e:
print e
else:
print 'No matching zone found for %s' % tag.get('Value')
else:
print '%s is not a valid host name' % tag.get('Value')
# Consider making this an elif CNAME
else:
print 'The tag \'%s\' is not a zone tag' % tag.get('Key')
if 'CNAME' in tag.get('Key',{}).lstrip().upper():
if is_valid_hostname(tag.get('Value')):
cname = tag.get('Value').lstrip().lower()
cname_host_name = cname.split('.')[0]
cname_domain_suffix = cname[cname.find('.')+1:]
cname_domain_suffix_id = get_zone_id(cname_domain_suffix)
for cname_private_hosted_zone in private_hosted_zone_collection:
cname_private_hosted_zone_id = get_zone_id(cname_private_hosted_zone)
if cname_domain_suffix_id == cname_private_hosted_zone_id:
if cname.endswith(cname_private_hosted_zone):
#create CNAME record in private zone
if state == 'running':
try:
create_resource_record(cname_private_hosted_zone_id, cname_host_name, cname_private_hosted_zone, 'CNAME', private_dns_name)
except BaseException as e:
print e
else:
try:
delete_resource_record(cname_private_hosted_zone_id, cname_host_name, cname_private_hosted_zone, 'CNAME', private_dns_name)
except BaseException as e:
print e
for cname_public_hosted_zone in public_hosted_zones_collection:
if cname.endswith(cname_public_hosted_zone):
cname_public_hosted_zone_id = get_zone_id(cname_public_hosted_zone)
#create CNAME record in public zone
if state == 'running':
try:
create_resource_record(cname_public_hosted_zone_id, cname_host_name, cname_public_hosted_zone, 'CNAME', public_dns_name)
except BaseException as e:
print e
else:
try:
delete_resource_record(cname_public_hosted_zone_id, cname_host_name, cname_public_hosted_zone, 'CNAME', public_dns_name)
except BaseException as e:
print e
# Is there a DHCP option set?
# Get DHCP option set configuration
try:
dhcp_options_id = vpc.dhcp_options_id
dhcp_configurations = get_dhcp_configurations(dhcp_options_id)
except BaseException as e:
print 'No DHCP option set assigned to this VPC\n', e
exit()
# Look to see whether there's a DHCP option set assigned to the VPC. If there is, use the value of the domain name
# to create resource records in the appropriate Route 53 private hosted zone. This will also check to see whether
# there's an association between the instance's VPC and the private hosted zone. If there isn't, it will create it.
for configuration in dhcp_configurations:
if configuration[0] in private_hosted_zone_collection:
private_hosted_zone_name = configuration[0]
print 'Private zone found %s' % private_hosted_zone_name
# TODO need a way to prevent overlapping subdomains
private_hosted_zone_id = get_zone_id(private_hosted_zone_name)
private_hosted_zone_properties = get_hosted_zone_properties(private_hosted_zone_id)
# create A records and PTR records
if state == 'running':
if vpc_id in map(lambda x: x['VPCId'], private_hosted_zone_properties['VPCs']):
print 'Private hosted zone %s is associated with VPC %s' % (private_hosted_zone_id, vpc_id)
else:
print 'Associating zone %s with VPC %s' % (private_hosted_zone_id, vpc_id)
try:
associate_zone(private_hosted_zone_id, region,vpc_id)
except BaseException as e:
print 'You cannot create an association with a VPC with an overlapping subdomain.\n', e
exit()
try:
create_resource_record(private_hosted_zone_id, private_host_name, private_hosted_zone_name, 'A', private_ip)
create_resource_record(reverse_lookup_zone_id, reversed_ip_address, 'in-addr.arpa', 'PTR', private_dns_name)
except BaseException as e:
print e
else:
try:
delete_resource_record(private_hosted_zone_id, private_host_name, private_hosted_zone_name, 'A', private_ip)
delete_resource_record(reverse_lookup_zone_id, reversed_ip_address, 'in-addr.arpa', 'PTR', private_dns_name)
except BaseException as e:
print e
else:
print 'No matching zone for %s' % configuration[0]
0
Example 61
Project: tilequeue Source File: command.py
def tilequeue_process(cfg, peripherals):
logger = make_logger(cfg, 'process')
logger.warn('tilequeue processing started')
assert os.path.exists(cfg.query_cfg), \
'Invalid query config path'
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, cfg.template_path, cfg.reload_templates,
os.path.dirname(cfg.query_cfg)))
formats = lookup_formats(cfg.output_formats)
sqs_queue = peripherals.queue
store = make_store(cfg.store_type, cfg.s3_bucket, cfg)
assert cfg.postgresql_conn_info, 'Missing postgresql connection info'
n_cpu = multiprocessing.cpu_count()
sqs_messages_per_batch = 10
n_simultaneous_query_sets = cfg.n_simultaneous_query_sets
if not n_simultaneous_query_sets:
# default to number of databases configured
n_simultaneous_query_sets = len(cfg.postgresql_conn_info['dbnames'])
assert n_simultaneous_query_sets > 0
default_queue_buffer_size = 128
n_layers = len(all_layer_data)
n_formats = len(formats)
n_simultaneous_s3_storage = cfg.n_simultaneous_s3_storage
if not n_simultaneous_s3_storage:
n_simultaneous_s3_storage = max(n_cpu / 2, 1)
assert n_simultaneous_s3_storage > 0
# thread pool used for queries and uploading to s3
n_total_needed_query = n_layers * n_simultaneous_query_sets
n_total_needed_s3 = n_formats * n_simultaneous_s3_storage
n_total_needed = n_total_needed_query + n_total_needed_s3
n_max_io_workers = 50
n_io_workers = min(n_total_needed, n_max_io_workers)
io_pool = ThreadPool(n_io_workers)
feature_fetcher = DataFetcher(cfg.postgresql_conn_info, all_layer_data,
io_pool, n_layers)
# create all queues used to manage pipeline
sqs_input_queue_buffer_size = sqs_messages_per_batch
# holds coord messages from sqs
sqs_input_queue = Queue.Queue(sqs_input_queue_buffer_size)
# holds raw sql results - no filtering or processing done on them
sql_data_fetch_queue = multiprocessing.Queue(default_queue_buffer_size)
# holds data after it has been filtered and processed
# this is where the cpu intensive part of the operation will happen
# the results will be data that is formatted for each necessary format
processor_queue = multiprocessing.Queue(default_queue_buffer_size)
# holds data after it has been sent to s3
s3_store_queue = Queue.Queue(default_queue_buffer_size)
# create worker threads/processes
thread_sqs_queue_reader_stop = threading.Event()
sqs_queue_reader = SqsQueueReader(sqs_queue, sqs_input_queue, logger,
thread_sqs_queue_reader_stop)
data_fetch = DataFetch(
feature_fetcher, sqs_input_queue, sql_data_fetch_queue, io_pool,
peripherals.redis_cache_index, logger)
data_processor = ProcessAndFormatData(
post_process_data, formats, sql_data_fetch_queue, processor_queue,
cfg.layers_to_format, cfg.buffer_cfg, logger)
s3_storage = S3Storage(processor_queue, s3_store_queue, io_pool,
store, logger)
thread_sqs_writer_stop = threading.Event()
sqs_queue_writer = SqsQueueWriter(sqs_queue, s3_store_queue, logger,
thread_sqs_writer_stop)
def create_and_start_thread(fn, *args):
t = threading.Thread(target=fn, args=args)
t.start()
return t
thread_sqs_queue_reader = create_and_start_thread(sqs_queue_reader)
threads_data_fetch = []
threads_data_fetch_stop = []
for i in range(n_simultaneous_query_sets):
thread_data_fetch_stop = threading.Event()
thread_data_fetch = create_and_start_thread(data_fetch,
thread_data_fetch_stop)
threads_data_fetch.append(thread_data_fetch)
threads_data_fetch_stop.append(thread_data_fetch_stop)
# create a data processor per cpu
n_data_processors = n_cpu
data_processors = []
data_processors_stop = []
for i in range(n_data_processors):
data_processor_stop = multiprocessing.Event()
process_data_processor = multiprocessing.Process(
target=data_processor, args=(data_processor_stop,))
process_data_processor.start()
data_processors.append(process_data_processor)
data_processors_stop.append(data_processor_stop)
threads_s3_storage = []
threads_s3_storage_stop = []
for i in range(n_simultaneous_s3_storage):
thread_s3_storage_stop = threading.Event()
thread_s3_storage = create_and_start_thread(s3_storage,
thread_s3_storage_stop)
threads_s3_storage.append(thread_s3_storage)
threads_s3_storage_stop.append(thread_s3_storage_stop)
thread_sqs_writer = create_and_start_thread(sqs_queue_writer)
if cfg.log_queue_sizes:
assert(cfg.log_queue_sizes_interval_seconds > 0)
queue_data = (
(sqs_input_queue, 'sqs'),
(sql_data_fetch_queue, 'sql'),
(processor_queue, 'proc'),
(s3_store_queue, 's3'),
)
queue_printer_thread_stop = threading.Event()
queue_printer = QueuePrint(
cfg.log_queue_sizes_interval_seconds, queue_data, logger,
queue_printer_thread_stop)
queue_printer_thread = create_and_start_thread(queue_printer)
else:
queue_printer_thread = None
queue_printer_thread_stop = None
def stop_all_workers(signum, stack):
logger.warn('tilequeue processing shutdown ...')
logger.info('requesting all workers (threads and processes) stop ...')
# each worker guards its read loop with an event object
# ask all these to stop first
thread_sqs_queue_reader_stop.set()
for thread_data_fetch_stop in threads_data_fetch_stop:
thread_data_fetch_stop.set()
for data_processor_stop in data_processors_stop:
data_processor_stop.set()
for thread_s3_storage_stop in threads_s3_storage_stop:
thread_s3_storage_stop.set()
thread_sqs_writer_stop.set()
if queue_printer_thread_stop:
queue_printer_thread_stop.set()
logger.info('requesting all workers (threads and processes) stop ... '
'done')
# Once workers receive a stop event, they will keep reading
# from their queues until they receive a sentinel value. This
# is mandatory so that no messages will remain on queues when
# asked to join. Otherwise, we never terminate.
logger.info('joining all workers ...')
logger.info('joining sqs queue reader ...')
thread_sqs_queue_reader.join()
logger.info('joining sqs queue reader ... done')
logger.info('enqueueing sentinels for data fetchers ...')
for i in range(len(threads_data_fetch)):
sqs_input_queue.put(None)
logger.info('enqueueing sentinels for data fetchers ... done')
logger.info('joining data fetchers ...')
for thread_data_fetch in threads_data_fetch:
thread_data_fetch.join()
logger.info('joining data fetchers ... done')
logger.info('enqueueing sentinels for data processors ...')
for i in range(len(data_processors)):
sql_data_fetch_queue.put(None)
logger.info('enqueueing sentinels for data processors ... done')
logger.info('joining data processors ...')
for data_processor in data_processors:
data_processor.join()
logger.info('joining data processors ... done')
logger.info('enqueueing sentinels for s3 storage ...')
for i in range(len(threads_s3_storage)):
processor_queue.put(None)
logger.info('enqueueing sentinels for s3 storage ... done')
logger.info('joining s3 storage ...')
for thread_s3_storage in threads_s3_storage:
thread_s3_storage.join()
logger.info('joining s3 storage ... done')
logger.info('enqueueing sentinel for sqs queue writer ...')
s3_store_queue.put(None)
logger.info('enqueueing sentinel for sqs queue writer ... done')
logger.info('joining sqs queue writer ...')
thread_sqs_writer.join()
logger.info('joining sqs queue writer ... done')
if queue_printer_thread:
logger.info('joining queue printer ...')
queue_printer_thread.join()
logger.info('joining queue printer ... done')
logger.info('joining all workers ... done')
logger.info('joining io pool ...')
io_pool.close()
io_pool.join()
logger.info('joining io pool ... done')
logger.info('joining multiprocess data fetch queue ...')
sql_data_fetch_queue.close()
sql_data_fetch_queue.join_thread()
logger.info('joining multiprocess data fetch queue ... done')
logger.info('joining multiprocess process queue ...')
processor_queue.close()
processor_queue.join_thread()
logger.info('joining multiprocess process queue ... done')
logger.warn('tilequeue processing shutdown ... done')
sys.exit(0)
signal.signal(signal.SIGTERM, stop_all_workers)
signal.signal(signal.SIGINT, stop_all_workers)
signal.signal(signal.SIGQUIT, stop_all_workers)
logger.warn('all tilequeue threads and processes started')
# this is necessary for the main thread to receive signals
# when joining on threads/processes, the signal is never received
# http://www.luke.maurits.id.au/blog/post/threads-and-signals-in-python.html
while True:
time.sleep(1024)
0
Example 62
def device_setup(self):
dev = self.dev
mm = self.mm
dev.drv = self
print "[+] initializing device"
dev.init()
print "[+] resetting device"
dev.reset()
sleep(0.5)
if dev.pci.misc_host_ctrl.enable_tagged_status_mode == 0:
print "[+] enabling tagged status mode"
dev.pci.misc_host_ctrl.enable_tagged_status_mode = 1
dma_wmm = 0x6
try:
if dev.config.caps['pcie'].max_payload_size > 0:
dma_wmm += 0x1
except: pass
if dev.pci.dma_rw_ctrl.dma_write_watermark != dma_wmm:
print "[+] configuring dma write watermark"
dev.pci.dma_rw_ctrl.dma_write_watermark = dma_wmm
if not dev.pci.dma_rw_ctrl.disable_cache_alignment:
print "[+] disabling pci dma alignment"
dev.pci.dma_rw_ctrl.disable_cache_alignment = 1
if dev.msi.mode.msix_multi_vector_mode:
print "[+] disabling multi vector mode"
dev.msi.mode.msix_multi_vector_mode = 0
if not dev.grc.misc_local_control.interrupt_on_attention:
print "[+] configuring interrupts on grc attention"
dev.grc.misc_local_control.interrupt_on_attention = 1
if not dev.grc.misc_local_control.auto_seeprom:
print "[+] configuring automatic eeprom access mode"
dev.grc.misc_local_control.auto_seeprom = 1
if not dev.grc.misc_config.timer_prescaler == 0x41:
print "[+] configuring grc timer prescaler"
dev.grc.misc_config.timer_prescaler = 0x41
if not dev.grc.mode.host_send_bds:
print "[+] enabling host send bds"
self.dev.grc.mode.send_no_pseudo_header_cksum = 1
self.dev.grc.mode.host_send_bds = 1
if not dev.grc.mode.host_stack_up:
print "[+] setting host stack up"
dev.grc.mode.host_stack_up = 1
if dev.bufman.dma_mbuf_low_watermark.count != 0x2a:
print "[+] setting dma mbuf low watermark"
dev.bufman.dma_mbuf_low_watermark.count = 0x2a
if dev.bufman.mbuf_high_watermark.count != 0xa0:
print "[+] setting mbuf high watermark"
dev.bufman.mbuf_high_watermark.count = 0xa0
if dev.emac.low_watermark_max_receive_frame.count != 1:
print "[+] configuring dma low watermark flow control"
dev.emac.low_watermark_max_receive_frame.count = 1
dev.bufman.mode.attention_enable = 1
dev.bufman.block_enable()
if dev.rbdi.std_ring_replenish_threshold.count != 0x19:
print "[+] configuring standard rx producer ring replenish threshold"
dev.rbdi.std_ring_replenish_threshold.count = 0x19
self._init_rx_rings()
dev.hpmb.box[tg.mb_rbd_standard_producer].low = 0
dev.rbdi.std_ring_replenish_watermark.count = 0x20
self._init_tx_rings()
dev.hpmb.box[tg.mb_sbd_host_producer].low = 0
self._init_rr_rings()
self.mac_addr = [getattr(dev.emac.addr[0], "byte_%d" % (i + 1)) for i in range(6)]
print ("[+] device mac addr: %02x" + (":%02x" * 5)) % tuple(self.mac_addr)
print "[+] configuring tx mac"
dev.emac.tx_random_backoff = sum(self.mac_addr) & 0x3ff
dev.emac.tx_mac_lengths.ipg = 0x6
dev.emac.tx_mac_lengths.ipg_crs = 0x2
dev.emac.tx_mac_lengths.slot = 0x20
print "[+] configuring rx mac"
dev.emac.rx_mtu = 1500
dev.emac.rx_rules_conf.no_rules_matches_default_class = 2
print "[+] configuring receive list placement"
dev.rlp.config.default_interrupt_distribution_queue = 0
dev.rlp.config.bad_frames_class = 1
dev.rlp.config.number_of_active_lists = 0x10
dev.rlp.config.number_of_lists_per_distribution_group = 1
print "[+] enabling rx statistics"
dev.rlp.stats_enable_mask.a1_silent_indication = 1
dev.rlp.stats_enable_mask.cpu_mactq_priority_disable = 1
dev.rlp.stats_enable_mask.enable_cos_stats = 1
dev.rlp.stats_enable_mask.enable_indiscard_stats = 1
dev.rlp.stats_enable_mask.enable_inerror_stats = 1
dev.rlp.stats_enable_mask.enable_no_more_rbd_stats = 0
dev.rlp.stats_enable_mask.perst_l = 1
dev.rlp.stats_enable_mask.rc_return_ring_enable = 0
dev.rlp.stats_enable_mask.rss_priority = 0
assert dev.rlp.stats_enable_mask.word == 0x7bffff
dev.rlp.stats_control.statistics_enable = 1
print "[+] enabling tx statistics"
dev.sdi.statistics_mask.counters_enable_mask = 1
dev.sdi.statistics_control.faster_update = 1
dev.sdi.statistics_control.statistics_enable = 1
dev.hc.block_disable()
print "[+] configuring host coalesence"
dev.hc.mode.status_block_size = 2
dev.hc.mode.clear_ticks_mode_on_rx = 1
dev.hc.rx_coal_ticks = 0x48
dev.hc.tx_coal_ticks = 0x14
dev.hc.rx_max_coal_bds = 0x05
dev.hc.tx_max_coal_bds = 0x35
dev.hc.rc_max_coal_bds_in_int = 0x05
dev.hc.tx_max_coal_bds_in_int = 0x05
self.status_block_vaddr = mm.alloc(sizeof(tg.status_block))
self.status_block = cast(self.status_block_vaddr, POINTER(tg.status_block))[0]
self.status_block_paddr = mm.get_paddr(self.status_block_vaddr)
dev.hc.status_block_host_addr_hi = self.status_block_paddr >> 32
dev.hc.status_block_host_addr_low = self.status_block_paddr & 0xffffffff
dev.hc.block_enable()
dev.rbdc.mode.attention_enable = 1
dev.rbdc.block_enable()
dev.rlp.block_enable()
if not dev.emac.mode.en_fhde:
print "[+] enabling frame header dma engine"
dev.emac.mode.en_fhde = 1
if not dev.emac.mode.en_rde:
print "[+] enabling receive dma engine"
dev.emac.mode.en_rde = 1
if not dev.emac.mode.en_tde:
print "[+] enabling transmit dma engine"
dev.emac.mode.en_tde = 1
print "[+] clearing rx statistics"
dev.emac.mode.clear_rx_statistics = 1
print "[+] clearing tx statistics"
dev.emac.mode.clear_tx_statistics = 1
while dev.emac.mode.clear_rx_statistics:
pass
while dev.emac.mode.clear_tx_statistics:
pass
if not dev.emac.mode.en_rx_statistics:
print "[+] enabling rx statistics"
dev.emac.mode.en_rx_statistics = 1
if not dev.emac.mode.en_tx_statistics:
print "[+] enabling tx statistics"
dev.emac.mode.en_tx_statistics = 1
if not dev.emac.event_enable.link_state_changed:
print "[+] enabling emac attention on link statue changed"
dev.emac.event_enable.link_state_changed = 1
if not dev.grc.mode.int_on_mac_attn:
print "[+] enabling interrupt on mac attention"
dev.grc.mode.int_on_mac_attn = 1
print "[+] configuringing write dma engine"
dev.wdma.mode.write_dma_pci_target_abort_attention_enable = 1
dev.wdma.mode.write_dma_pci_master_abort_attention_enable = 1
dev.wdma.mode.write_dma_pci_parity_attention_enable = 1
dev.wdma.mode.write_dma_pci_host_address_overflow_attention_enable = 1
dev.wdma.mode.write_dma_pci_fifo_overrun_attention_enable = 1
dev.wdma.mode.write_dma_pci_fifo_underrun_attention_enable = 1
dev.wdma.mode.write_dma_pci_fifo_overwrite_attention_enable = 1
dev.wdma.mode.write_dma_local_memory = 1
dev.wdma.mode.write_dma_pci_parity_error_attention_enable = 1
dev.wdma.mode.write_dma_pci_host_address_overflow_error_attention_enable = 1
dev.wdma.mode.status_tag_fix_enable = 1
dev.wdma.mode.reserved2 = 0
dev.wdma.block_enable()
print "[+] configuring read dma engine"
dev.rdma.mode.read_dma_pci_target_abort_attention_enable = 1
dev.rdma.mode.read_dma_pci_master_abort_attention_enable = 1
dev.rdma.mode.read_dma_pci_parity_error_attention_enable = 1
dev.rdma.mode.read_dma_pci_host_address_overflow_error_attention_enable = 1
dev.rdma.mode.read_dma_pci_fifo_overrun_attention_enable = 1
dev.rdma.mode.read_dma_pci_fifo_underrun_attention_enable = 1
dev.rdma.mode.read_dma_pci_fifo_overread_attention_enable = 1
dev.rdma.mode.read_dma_local_memory_write_longer_than_dma_length_attention_enable = 1
dev.rdma.mode.read_dma_pci_x_split_transaction_timeout_expired_attention_enable = 0
dev.rdma.mode.bd_sbd_corruption_attn_enable = 0
dev.rdma.mode.mbuf_rbd_corruption_attn_enable = 0
dev.rdma.mode.mbuf_sbd_corruption_attn_enable = 0
dev.rdma.mode.reserved3 = 0
dev.rdma.mode.pci_request_burst_length = 3
dev.rdma.mode.reserved2 = 0
dev.rdma.mode.jumbo_2k_mmrr_mode = 1
dev.rdma.mode.mmrr_disable = 0
dev.rdma.mode.address_overflow_error_logging_enable = 0
dev.rdma.mode.post_dma_debug_enable = 0
dev.rdma.mode.hardware_ipv4_post_dma_processing_enable = 0
dev.rdma.mode.hardware_ipv6_post_dma_processing_enable = 0
dev.rdma.mode.in_band_vtag_enable = 0
dev.rdma.mode.reserved = 0
dev.rdma.block_enable()
dev.rdc.mode.attention_enable = 1
dev.rdc.block_enable()
dev.sdc.block_enable()
dev.sbdc.mode.attention_enable = 1
dev.sbdc.block_enable()
dev.rbdi.mode.receive_bds_available_on_disabled_rbd_ring_attn_enable = 1
dev.rbdi.block_enable()
dev.rdi.mode.illegal_return_ring_size = 1
dev.rdi.block_enable()
dev.sdi.mode.multiple_segment_enable = 0
dev.sdi.mode.hardware_pre_dma_enable = 0
dev.sdi.block_enable()
dev.sbdi.mode.attention_enable = 1
dev.sbdi.block_enable()
dev.sbds.mode.attention_enable = 1
dev.sbds.block_enable()
self._populate_rx_ring()
print "[+] enabling transmit mac"
dev.emac.tx_mac_mode.enable_bad_txmbuf_lockup_fix = 1
#dev.emac.tx_mac_mode.enable_flow_control = 1
dev.emac.tx_mac_mode.enable = 1
usleep(100)
print "[+] enabling receive mac"
#dev.emac.mac_hash_0 = 0xffffffff
#dev.emac.mac_hash_1 = 0xffffffff
#dev.emac.mac_hash_2 = 0xffffffff
#dev.emac.mac_hash_3 = 0xffffffff
dev.emac.rx_mac_mode.promiscuous_mode = 1
dev.emac.rx_mac_mode.accept_runts = 1
#dev.emac.rx_mac_mode.enable_flow_control = 1
dev.emac.rx_mac_mode.rss_enable = 1
#dev.emac.rx_mac_mode.rss_ipv4_hash_enable = 1
#dev.emac.rx_mac_mode.rss_tcpipv4_hash_enable = 1
#dev.emac.rx_mac_mode.rss_ipv6_hash_enable = 1
#dev.emac.rx_mac_mode.rss_tcpipv6_hash_enable = 1
dev.emac.rx_mac_mode.enable = 1
usleep(100)
print "[+] configuring led"
dev.emac.led_control.word = 0x800
dev.emac.low_watermark_max_receive_frames = 1
0
Example 63
Project: ev3dev-lang-python Source File: GyroBalancer.py
def main(self):
def shutdown():
touchSensorValueRaw.close()
gyroSensorValueRaw.close()
motorEncoderLeft.close()
motorEncoderRight.close()
motorDutyCycleLeft.close()
motorDutyCycleRight.close()
for motor in list_motors():
motor.stop()
try:
########################################################################
##
## Definitions and Initialization variables
##
########################################################################
# Timing settings for the program
loopTimeMilliSec = 10 # Time of each loop, measured in miliseconds.
loopTimeSec = loopTimeMilliSec/1000.0 # Time of each loop, measured in seconds.
motorAngleHistoryLength = 3 # Number of previous motor angles we keep track of.
# Math constants
radiansPerDegree = math.pi/180 # The number of radians in a degree.
# Platform specific constants and conversions
degPerSecondPerRawGyroUnit = 1 # For the LEGO EV3 Gyro in Rate mode, 1 unit = 1 deg/s
radiansPerSecondPerRawGyroUnit = degPerSecondPerRawGyroUnit*radiansPerDegree # Express the above as the rate in rad/s per gyro unit
degPerRawMotorUnit = 1 # For the LEGO EV3 Large Motor 1 unit = 1 deg
radiansPerRawMotorUnit = degPerRawMotorUnit*radiansPerDegree # Express the above as the angle in rad per motor unit
RPMperPerPercentSpeed = 1.7 # On the EV3, "1% speed" corresponds to 1.7 RPM (if speed control were enabled)
degPerSecPerPercentSpeed = RPMperPerPercentSpeed*360/60 # Convert this number to the speed in deg/s per "percent speed"
radPerSecPerPercentSpeed = degPerSecPerPercentSpeed * radiansPerDegree # Convert this number to the speed in rad/s per "percent speed"
# The rate at which we'll update the gyro offset (precise definition given in docs)
gyroDriftCompensationRate = 0.1 * loopTimeSec * radiansPerSecondPerRawGyroUnit
# A deque (a fifo array) which we'll use to keep track of previous motor positions, which we can use to calculate the rate of change (speed)
motorAngleHistory = deque([0], motorAngleHistoryLength)
# State feedback control gains (aka the magic numbers)
gainGyroAngle = self.gainGyroAngle
gainGyroRate = self.gainGyroRate
gainMotorAngle = self.gainMotorAngle
gainMotorAngularSpeed = self.gainMotorAngularSpeed
gainMotorAngleErrorAccuemulated = self.gainMotorAngleErrorAccuemulated
# Variables representing physical signals (more info on these in the docs)
# The angle of "the motor", measured in raw units (degrees for the
# EV3). We will take the average of both motor positions as "the motor"
# angle, wich is essentially how far the middle of the robot has traveled.
motorAngleRaw = 0
# The angle of the motor, converted to radians (2*pi radians equals 360 degrees).
motorAngle = 0
# The reference angle of the motor. The robot will attempt to drive
# forward or backward, such that its measured position equals this
# reference (or close enough).
motorAngleReference = 0
# The error: the deviation of the measured motor angle from the reference.
# The robot attempts to make this zero, by driving toward the reference.
motorAngleError = 0
# We add up all of the motor angle error in time. If this value gets out of
# hand, we can use it to drive the robot back to the reference position a bit quicker.
motorAngleErrorAccuemulated = 0
# The motor speed, estimated by how far the motor has turned in a given amount of time
motorAngularSpeed = 0
# The reference speed during manouvers: how fast we would like to drive, measured in radians per second.
motorAngularSpeedReference = 0
# The error: the deviation of the motor speed from the reference speed.
motorAngularSpeedError = 0
# The 'voltage' signal we send to the motor. We calulate a new value each
# time, just right to keep the robot upright.
motorDutyCycle = 0
# The raw value from the gyro sensor in rate mode.
gyroRateRaw = 0
# The angular rate of the robot (how fast it is falling forward or backward), measured in radians per second.
gyroRate = 0
# The gyro doesn't measure the angle of the robot, but we can estimate
# this angle by keeping track of the gyroRate value in time
gyroEstimatedAngle = 0
# Over time, the gyro rate value can drift. This causes the sensor to think
# it is moving even when it is perfectly still. We keep track of this offset.
gyroOffset = 0
# filehandles for fast reads/writes
# =================================
touchSensorValueRaw = open(self.touch._path + "/value0", "rb")
gyroSensorValueRaw = open(self.gyro._path + "/value0", "rb")
# Open motor files for (fast) reading
motorEncoderLeft = open(self.left_motor._path + "/position", "rb")
motorEncoderRight = open(self.right_motor._path + "/position", "rb")
# Open motor files for (fast) writing
motorDutyCycleLeft = open(self.left_motor._path + "/duty_cycle_sp", "w")
motorDutyCycleRight = open(self.right_motor._path + "/duty_cycle_sp", "w")
########################################################################
##
## Calibrate Gyro
##
########################################################################
print("-----------------------------------")
print("Calibrating...")
#As you hold the robot still, determine the average sensor value of 100 samples
gyroRateCalibrateCount = 100
for i in range(gyroRateCalibrateCount):
gyroOffset = gyroOffset + FastRead(gyroSensorValueRaw)
time.sleep(0.01)
gyroOffset = gyroOffset/gyroRateCalibrateCount
# Print the result
print("GyroOffset: %s" % gyroOffset)
print("-----------------------------------")
print("GO!")
print("-----------------------------------")
########################################################################
##
## MAIN LOOP (Press Touch Sensor to stop the program)
##
########################################################################
# Initial touch sensor value
touchSensorPressed = FastRead(touchSensorValueRaw)
while not touchSensorPressed:
###############################################################
## Loop info
###############################################################
tLoopStart = time.clock()
###############################################################
## Reading the Remote Control
###############################################################
self.remote.process()
###############################################################
## Reading the Gyro.
###############################################################
gyroRateRaw = FastRead(gyroSensorValueRaw)
gyroRate = (gyroRateRaw - gyroOffset)*radiansPerSecondPerRawGyroUnit
###############################################################
## Reading the Motor Position
###############################################################
motorAngleRaw = (FastRead(motorEncoderLeft) + FastRead(motorEncoderRight))/2
motorAngle = motorAngleRaw*radiansPerRawMotorUnit
motorAngularSpeedReference = self.speed * radPerSecPerPercentSpeed
motorAngleReference = motorAngleReference + motorAngularSpeedReference * loopTimeSec
motorAngleError = motorAngle - motorAngleReference
###############################################################
## Computing Motor Speed
###############################################################
motorAngularSpeed = (motorAngle - motorAngleHistory[0])/(motorAngleHistoryLength * loopTimeSec)
motorAngularSpeedError = motorAngularSpeed - motorAngularSpeedReference
motorAngleHistory.append(motorAngle)
###############################################################
## Computing the motor duty cycle value
###############################################################
motorDutyCycle =(gainGyroAngle * gyroEstimatedAngle
+ gainGyroRate * gyroRate
+ gainMotorAngle * motorAngleError
+ gainMotorAngularSpeed * motorAngularSpeedError
+ gainMotorAngleErrorAccuemulated * motorAngleErrorAccuemulated)
###############################################################
## Apply the signal to the motor, and add steering
###############################################################
SetDuty(motorDutyCycleRight, motorDutyCycle + self.steering)
SetDuty(motorDutyCycleLeft, motorDutyCycle - self.steering)
###############################################################
## Update angle estimate and Gyro Offset Estimate
###############################################################
gyroEstimatedAngle = gyroEstimatedAngle + gyroRate * loopTimeSec
gyroOffset = (1 - gyroDriftCompensationRate) * gyroOffset + gyroDriftCompensationRate * gyroRateRaw
###############################################################
## Update Accuemulated Motor Error
###############################################################
motorAngleErrorAccuemulated = motorAngleErrorAccuemulated + motorAngleError * loopTimeSec
###############################################################
## Read the touch sensor (the kill switch)
###############################################################
touchSensorPressed = FastRead(touchSensorValueRaw)
###############################################################
## Busy wait for the loop to complete
###############################################################
while ((time.clock() - tLoopStart) < loopTimeSec):
time.sleep(0.0001)
shutdown()
# Exit cleanly so that all motors are stopped
except (KeyboardInterrupt, Exception) as e:
log.exception(e)
shutdown()
0
Example 64
Project: Verum Source File: osint_bambenekconsulting_com_v2.py
def minion(self, storage=None, *args, **xargs):
self.app = self.Verum.app(self.parent.PluginFolder, None)
# set storage
if storage is None:
storage = self.parent.storage
self.app.set_interface(storage)
# Check until stopped
while not self.shutdown:
# Check to see if it's the same day, if it is, sleep for a while, otherwise run the import
delta = datetime.utcnow() - self.today
if delta.days <= 0:
time.sleep(SLEEP_TIME)
else:
logging.info("Starting daily {0} enrichment.".format(NAME))
# Get the file
r = requests.get(FEED)
# split it out
feed = r.text.split("\n")
# Create list of IPs for cymru enrichment
ips = set()
for row in feed:
# Parse date
l = row.find("Feed generated at:")
if l > -1:
dt = row[l+18:].strip()
dt = dateutil.parser.parse(dt).strftime("%Y-%m-%dT%H:%M:%SZ")
next
row = row.split(",")
# if it's a record, parse the record
if len(row) == 6:
try:
# split out sub values
# row[0] -> domain
row[1] = row[1].split("|") # ip
row[2] = row[2].split("|") # nameserver domain
row[3] = row[3].split("|") # nameserver ip
row[4] = row[4][26:-22] # malware
# row[5] -> source
# Validate data in row
ext = tldextract.extract(row[0])
if not ext.domain or not ext.suffix:
# domain is not legitimate
next
l = list()
for ip in row[1]:
try:
_ = ipaddress.ip_address(unicode(ip))
l.append(ip)
except:
pass
row[1] = copy.deepcopy(l)
l = list()
for domain in row[2]:
ext = tldextract.extract(domain)
if ext.domain and ext.suffix:
l.append(domain)
row[2] = copy.deepcopy(l)
l = list()
for ip in row[3]:
try:
_ = ipaddress.ip_address(unicode(ip))
l.append(ip)
except:
pass
row[3] = copy.deepcopy(l)
# add the ips to the set of ips
ips = ips.union(set(row[1])).union(set(row[3]))
g = nx.MultiDiGraph()
# Add indicator to graph
## (Must account for the different types of indicators)
target_uri = "class=attribute&key={0}&value={1}".format('domain', row[0])
g.add_node(target_uri, {
'class': 'attribute',
'key': 'domain',
"value": row[0],
"start_time": dt,
"uri": target_uri
})
# Threat node
threat_uri = "class=attribute&key={0}&value={1}".format("malware", row[4])
g.add_node(threat_uri, {
'class': 'attribute',
'key': "malware",
"value": row[4],
"start_time": dt,
"uri": threat_uri
})
# Threat Edge
edge_attr = {
"relationship": "describedBy",
"origin": row[5],
"start_time": dt
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, target_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, threat_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(target_uri, threat_uri, edge_uri, edge_attr)
# for each IP associated with the domain, connect it to the target
for ip in row[1]:
# Create IP node
target_ip_uri = "class=attribute&key={0}&value={1}".format("ip", ip)
g.add_node(target_ip_uri, {
'class': 'attribute',
'key': "ip",
"value": ip,
"start_time": dt,
"uri": target_ip_uri
})
# ip Edge
edge_attr = {
"relationship": "describedBy",
"origin": row[5],
"start_time": dt,
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, target_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, target_ip_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(target_uri, target_ip_uri, edge_uri, edge_attr)
for nameserver in row[2]:
# Create nameserver node
ns_uri = "class=attribute&key={0}&value={1}".format("domain", nameserver)
g.add_node(ns_uri, {
'class': 'attribute',
'key': "domain",
"value": nameserver,
"start_time": dt,
"uri": ns_uri
})
# nameserver Edge
edge_attr = {
"relationship": "describedBy",
"origin": row[5],
"start_time": dt,
'describedBy': 'nameserver'
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, target_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, target_ip_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(target_uri, ns_uri, edge_uri, edge_attr)
# if the number of NS IPs is a multiple of the # of NS's, we'll aassume each NS gets some of the ips
if len(row[2]) and len(row[3]) % len(row[2]) == 0:
for i in range(len(row[2])):
for j in range(len(row[3])/len(row[2])):
# Create NS IP node
ns_ip_uri = "class=attribute&key={0}&value={1}".format("ip", row[3][i*len(row[3])/len(row[2]) + j])
g.add_node(ns_ip_uri, {
'class': 'attribute',
'key': "ip",
"value": ip,
"start_time": dt,
"uri": ns_ip_uri
})
# create NS uri
ns_uri = "class=attribute&key={0}&value={1}".format("domain", row[2][i])
# link NS to IP
edge_attr = {
"relationship": "describedBy",
"origin": row[5],
"start_time": dt
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, ns_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, ns_ip_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(ns_uri, ns_ip_uri, edge_uri, edge_attr)
# otherwise we'll attach each IP to each NS
else:
for ip in row[3]:
# Create NS IP node
ns_ip_uri = "class=attribute&key={0}&value={1}".format("ip", ip)
g.add_node(ns_ip_uri, {
'class': 'attribute',
'key': "ip",
"value": ip,
"start_time": dt,
"uri": ns_ip_uri
})
for ns in row[2]:
# create NS uri
ns_uri = "class=attribute&key={0}&value={1}".format("domain", ns)
# link NS to IP
edge_attr = {
"relationship": "describedBy",
"origin": row[5],
"start_time": dt
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, ns_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, ns_ip_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(ns_uri, ns_ip_uri, edge_uri, edge_attr)
# classify malicious and merge with current graph
g = self.Verum.merge_graphs(g, self.app.classify.run({'key': 'domain', 'value': row[0], 'classification': 'malice'}))
# enrich depending on type
for domain in [row[0]] + row[2]:
try:
g = self.Verum.merge_graphs(g, self.app.run_enrichments(domain, "domain", names=['TLD Enrichment']))
g = self.Verum.merge_graphs(g, self.app.run_enrichments(domain, "domain", names=['IP Whois Enrichment']))
except Exception as e:
logging.info("Enrichment of {0} failed due to {1}.".format(domain, e))
#print "Enrichment of {0} failed due to {1}.".format(domain, e) # DEBUG
#raise
pass
for ip in row[1] + row[3]:
try:
g = self.Verum.merge_graphs(g, self.app.run_enrichments(ip, "ip", names=[u'Maxmind ASN Enrichment']))
except Exception as e:
logging.info("Enrichment of {0} failed due to {1}.".format(ip, e))
pass
try:
self.app.store_graph(self.Verum.remove_non_ascii_from_graph(g))
except:
print g.nodes(data=True) # DEBUG
print g.edges(data=True) # DEBUG
raise
# Do cymru enrichment
if len(ips) >= 50:
# validate IPs
ips2 = set()
for ip in ips:
try:
_ = ipaddress.ip_address(unicode(ip))
ips2.add(ip)
except:
pass
ips = ips2
del(ips2)
try:
self.app.store_graph(self.app.run_enrichments(ips, 'ip', names=[u'Cymru Enrichment']))
#print "Cymru enrichment complete."
except Exception as e:
logging.info("Cymru enrichment of {0} IPs failed due to {1}.".format(len(ips), e))
#print "Cymru enrichment of {0} IPs failed due to {1}.".format(len(ips), e) # DEBUG
pass
ips = set()
except Exception as e:
print row
print e
raise
# Copy today's date to today
self.today = datetime.utcnow()
logging.info("Daily {0} enrichment complete.".format(NAME))
print "Daily {0} enrichment complete.".format(NAME) # DEBUG
0
Example 65
Project: tp-qemu Source File: rh_kernel_update.py
@error.context_aware
def run(test, params, env):
"""
Install/upgrade special kernel package via brew tool or link. And we have
another case 'kernel_install' can to this too, but this case has addational
steps. In future, we will merge this to kernel_install case.
1) Boot the vm
2) Get latest kernel package link from brew
3) Verify the version of guest kernel
4) Compare guest kernel version and brew latest kernel version
5) Backup boot cfg file
6) Install guest kernel firmware (Optional)
7) Install guest kernel
8) Install guest kernel debuginfo (Optional)
9) Backup boot cfg file after installing new kernel
10) Installing virtio driver (Optional)
11) Backup initrd file
12) Update initrd file
13) Make the new installed kernel as default
14) Backup boot cfg file after setting new kernel as default
15) Update the guest kernel cmdline (Optional)
16) Reboot guest after updating kernel
17) Verifying the virtio drivers (Optional)
@param test: QEMU test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
def get_brew_url(mnt_path, download_root):
# get the url from the brew mnt path
url = download_root + mnt_path[11:]
logging.debug("Brew URL is %s" % url)
return url
def install_rpm(session, url, upgrade=False, nodeps=False, timeout=600):
# install a package from brew
cmd = "rpm -ivhf %s" % url
if upgrade:
# Upgrades or installs kernel to a newer version, then remove
# old version.
cmd = "rpm -Uvhf %s" % url
if nodeps:
cmd += " --nodeps"
s, o = session.cmd_status_output(cmd, timeout=timeout)
if s != 0 and ("already" not in o):
raise error.TestFail("Fail to install %s:%s" % (url, o))
return True
# FIXME: need to add the check for newer version
def copy_and_install_rpm(session, url, upgrade=False):
rpm_name = os.path.basename(url)
if url.startswith("http"):
download_cmd = "wget %s" % url
utils.system_output(download_cmd)
rpm_src = rpm_name
else:
rpm_src = utils_misc.get_path(test.bindir, url)
vm.copy_files_to(rpm_src, "/tmp/%s" % rpm_name)
install_rpm(session, "/tmp/%s" % rpm_name, upgrade)
def get_kernel_rpm_link():
method = params.get("method", "link")
if method not in ["link", "brew"]:
raise error.TestError("Unknown installation method %s" % method)
if method == "link":
return (params.get("kernel_version"),
params.get("kernel_rpm"),
params.get("firmware_rpm"))
error.context("Get latest kernel package link from brew", logging.info)
# fetch the newest packages from brew
# FIXME: really brain dead method to fetch the kernel version
# kernel_vesion = re... + hint from configuration file
# is there any smart way to fetch the `uname -r` from
# brew build?
rh_kernel_hint = "[\d+][^\s]+"
kernel_re = params.get("kernel_re")
tag = params.get("brew_tag")
latest_pkg_cmd = "brew latest-pkg %s kernel" % tag
o = utils.system_output(latest_pkg_cmd, timeout=360)
build = re.findall("kernel[^\s]+", o)[0]
logging.debug("Latest package on brew for tag %s is %s" %
(tag, build))
buildinfo = utils.system_output("brew buildinfo %s" % build,
timeout=360)
# install kernel-firmware
firmware_url = None
if "firmware" in buildinfo:
logging.info("Found kernel-firmware")
fw_pattern = ".*firmware.*"
try:
fw_brew_link = re.findall(fw_pattern, buildinfo)[0]
except IndexError:
raise error.TestError("Could not get kernel-firmware package"
" brew link matching pattern '%s'" % fw_pattern)
firmware_url = get_brew_url(fw_brew_link, download_root)
knl_pattern = kernel_re % rh_kernel_hint
try:
knl_brew_link = re.findall(knl_pattern, buildinfo, re.I)[0]
except IndexError:
raise error.TestError("Could not get kernel package brew link"
" matching pattern '%s'" % knl_pattern)
kernel_url = get_brew_url(knl_brew_link, download_root)
debug_re = kernel_re % ("(%s)" % rh_kernel_hint)
try:
kernel_version = re.findall(debug_re, kernel_url, re.I)[0]
except IndexError:
raise error.TestError("Could not get kernel version matching"
" pattern '%s'" % debug_re)
kernel_version += "." + params.get("kernel_suffix", "")
return kernel_version, kernel_url, firmware_url
def get_kernel_debuginfo_rpm_link():
knl_dbginfo_re = params.get("knl_dbginfo_re")
tag = params.get("brew_tag")
latest_pkg_cmd = "brew latest-pkg %s kernel" % tag
o = utils.system_output(latest_pkg_cmd, timeout=360)
build = re.findall("kernel[^\s]+", o)[0]
logging.debug("Latest package on brew for tag %s is %s" %
(tag, build))
buildinfo = utils.system_output("brew buildinfo %s" % build,
timeout=360)
try:
knl_dbginfo_links = re.findall(knl_dbginfo_re,
buildinfo, re.I)
except IndexError:
raise error.TestError("Could not get kernel-debuginfo package "
"brew link matching pattern '%s'" %
knl_dbginfo_re)
knl_dbginfo_urls = []
for l in knl_dbginfo_links:
link = get_brew_url(l, download_root)
knl_dbginfo_urls.append(link)
return knl_dbginfo_urls
def get_guest_kernel_version():
error.context("Verify the version of guest kernel", logging.info)
s, o = session.cmd_status_output("uname -r")
return o.strip()
def is_kernel_debuginfo_installed():
get_kernel_debuginfo_cmd = "rpm -qa | grep %s" % knl_dbginfo_version
s, o = session.cmd_status_output(get_kernel_debuginfo_cmd)
if s != 0:
return False
if knl_dbginfo_version not in o:
logging.debug("%s has not been installed." % knl_dbginfo_version)
return False
logging.debug("%s has already been installed." % knl_dbginfo_version)
return True
def is_virtio_driver_installed():
s, o = session.cmd_status_output("lsmod | grep virtio")
if s != 0:
return False
for driver in virtio_drivers:
if driver not in o:
logging.debug("%s has not been installed." % driver)
return False
logging.debug("%s has already been installed." % driver)
return True
def compare_kernel_version(kernel_version, guest_version):
error.context("Compare guest kernel version and brew's", logging.info)
# return True: when kernel_version <= guest_version
if guest_version == kernel_version:
logging.info("The kernel version is matched %s" % guest_version)
return True
kernel_s = re.split('[.-]', kernel_version)
guest_s = re.split('[.-]', guest_version)
kernel_v = [int(i) for i in kernel_s if i.isdigit()]
guest_v = [int(i) for i in guest_s if i.isdigit()]
for i in range(min(len(kernel_v), len(guest_v))):
if kernel_v[i] < guest_v[i]:
logging.debug("The kernel version: '%s' is old than"
" guest version %s" % (kernel_version, guest_version))
return True
elif kernel_v[i] > guest_v[i]:
return False
if len(kernel_v) < len(guest_v):
logging.debug("The kernel_version: %s is old than guest_version"
" %s" % (kernel_version, guest_version))
return True
return False
def get_guest_pkgs(session, pkg, qformat=""):
"""
Query requries packages in guest which name like 'pkg'.
:parm session: session object to guest.
:parm pkg: package name without version and arch info.
:parm qformat: display format(eg, %{NAME}, %{VERSION}).
:return: list of packages.
:rtype: list
"""
cmd = "rpm -q --whatrequires %s" % pkg
if qformat:
cmd += " --queryformat='%s\n'" % qformat
pkgs = session.cmd_output(cmd).splitlines()
pkgs.append(pkg)
return pkgs
def get_latest_pkgs_url(pkg, arch):
"""
Get url of latest packages in brewweb.
:parm pkg: package name without version info.
:parm brew_tag: requried in cfg file.
:parm vm_arch_name: requried in cfg file.
:parm latest_pkg_cmd: requried in cfg file.
:return: urls for pkg in brewweb.
:rtype: list
"""
tag = params.get("brew_tag")
latest_pkg_cmd = params.get("latest_pkg_cmd", "brew latest-pkg")
latest_pkg_cmd = "%s %s %s" % (latest_pkg_cmd, tag, pkg)
latest_pkg_cmd = "%s --arch=%s --paths" % (latest_pkg_cmd, arch)
mnt_paths = utils.system_output(latest_pkg_cmd).splitlines()
return [get_brew_url(_, download_root)
for _ in mnt_paths if _.endswith(".rpm")]
def upgrade_guest_pkgs(session, pkg, arch, debuginfo=False,
nodeps=True, timeout=600):
"""
upgrade given packages in guest os.
:parm session: session object.
:parm pkg: package name without version info.
:parm debuginfo: bool type, if True, install debuginfo package too.
:parm nodeps: bool type, if True, ignore deps when install rpm.
:parm timeout: float type, timeout value when install rpm.
"""
error.context("Upgrade package '%s' in guest" % pkg, logging.info)
pkgs = get_guest_pkgs(session, pkg, "%{NAME}")
latest_pkgs_url = get_latest_pkgs_url(pkg, arch)
for url in latest_pkgs_url:
if "debuginfo" in url and not debuginfo:
continue
upgrade = bool(filter(lambda x: x in url, pkgs))
logging.info("Install packages from: %s" % url)
install_rpm(session, url, upgrade, nodeps, timeout)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
download_root = params["download_root_url"]
login_timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=login_timeout)
install_virtio = params.get("install_virtio", "yes")
install_knl_debuginfo = params.get("install_knl_debuginfo")
verify_virtio = params.get("verify_virtio", "yes")
args_removed = params.get("args_removed", "").split()
args_added = params.get("args_added", "").split()
restore_initrd_cmd = ""
virtio_drivers = params.get("virtio_drivers_list", "").split()
kernel_version, kernel_rpm, firmware_rpm = get_kernel_rpm_link()
knl_dbginfo_rpm = get_kernel_debuginfo_rpm_link()
knl_dbginfo_version = "kernel-debuginfo-%s" % kernel_version
logging.info("Kernel version: %s" % kernel_version)
logging.info("Kernel rpm : %s" % kernel_rpm)
logging.info("Firmware rpm : %s" % firmware_rpm)
boot_cfg_path = params.get("boot_cfg_path", "/boot/grub/grub.conf")
bootcfg_backup_cmd = "\cp -af {0} {0}-bk".format(boot_cfg_path)
bootcfg_restore_cmd = "\cp -af {0}-bk {0}".format(boot_cfg_path)
count = 0
try:
error.context("Backup '%s'" % boot_cfg_path)
s, o = session.cmd_status_output(bootcfg_backup_cmd)
if s != 0:
raise error.TestError("Failed to backup '%s', guest output: '%s'"
% (boot_cfg_path, o))
count = 1
# judge if need to install a new kernel
ifupdatekernel = True
guest_version = get_guest_kernel_version()
if compare_kernel_version(kernel_version, guest_version):
ifupdatekernel = False
# set kernel_version to current version for later step to use
kernel_version = guest_version
if is_kernel_debuginfo_installed():
install_knl_debuginfo = "no"
if is_virtio_driver_installed():
install_virtio = "no"
else:
logging.info("The guest kerenl is %s but expected is %s" %
(guest_version, kernel_version))
rpm_install_func = install_rpm
if params.get("install_rpm_from_local") == "yes":
rpm_install_func = copy_and_install_rpm
kernel_deps_pkgs = params.get("kernel_deps_pkgs", "dracut").split()
if kernel_deps_pkgs:
for pkg in kernel_deps_pkgs:
arch = params.get("arch_%s" % pkg,
params.get("vm_arch_name"))
upgrade_guest_pkgs(session, pkg, arch)
if firmware_rpm:
error.context("Install guest kernel firmware", logging.info)
rpm_install_func(session, firmware_rpm, upgrade=True)
error.context("Install guest kernel", logging.info)
status = rpm_install_func(session, kernel_rpm)
if status:
count = 2
error.context("Backup '%s' after installing new kernel"
% boot_cfg_path, logging.info)
s, o = session.cmd_status_output(bootcfg_backup_cmd)
if s != 0:
msg = ("Fail to backup '%s' after updating kernel,"
" guest output: '%s'" % (boot_cfg_path, o))
logging.error(msg)
raise error.TestError(msg)
kernel_path = "/boot/vmlinuz-%s" % kernel_version
if install_knl_debuginfo == "yes":
error.context("Installing kernel-debuginfo packages", logging.info)
links = ""
for r in knl_dbginfo_rpm:
links += " %s" % r
install_rpm(session, links)
if install_virtio == "yes":
error.context("Installing virtio driver", logging.info)
initrd_prob_cmd = "grubby --info=%s" % kernel_path
s, o = session.cmd_status_output(initrd_prob_cmd)
if s != 0:
msg = ("Could not get guest kernel information,"
" guest output: '%s'" % o)
logging.error(msg)
raise error.TestError(msg)
try:
initrd_path = re.findall("initrd=(.*)", o)[0]
except IndexError:
raise error.TestError("Could not get initrd path from guest,"
" guest output: '%s'" % o)
driver_list = ["--with=%s " % drv for drv in virtio_drivers]
mkinitrd_cmd = "mkinitrd -f %s " % initrd_path
mkinitrd_cmd += "".join(driver_list)
mkinitrd_cmd += " %s" % kernel_version
cp_initrd_cmd = "\cp -af %s %s-bk" % (initrd_path, initrd_path)
restore_initrd_cmd = "\cp -af %s-bk %s" % (initrd_path,
initrd_path)
error.context("Backup initrd file")
s, o = session.cmd_status_output(cp_initrd_cmd, timeout=200)
if s != 0:
logging.error("Failed to backup guest initrd,"
" guest output: '%s'", o)
error.context("Update initrd file", logging.info)
s, o = session.cmd_status_output(mkinitrd_cmd, timeout=360)
if s != 0:
msg = "Failed to install virtio driver, guest output '%s'" % o
logging.error(msg)
raise error.TestFail(msg)
count = 3
# make sure the newly installed kernel as default
if ifupdatekernel:
error.context("Make the new installed kernel as default",
logging.info)
make_def_cmd = "grubby --set-default=%s " % kernel_path
s, o = session.cmd_status_output(make_def_cmd)
if s != 0:
msg = ("Fail to set %s as default kernel,"
" guest output: '%s'" % (kernel_path, o))
logging.error(msg)
raise error.TestError(msg)
count = 4
error.context(
"Backup '%s' after setting new kernel as default"
% boot_cfg_path)
s, o = session.cmd_status_output(bootcfg_backup_cmd)
if s != 0:
msg = ("Fail to backup '%s', guest output: '%s'"
% (boot_cfg_path, o))
logging.error(msg)
raise error.TestError(msg)
# remove or add the required arguments
error.context("Update the guest kernel cmdline", logging.info)
remove_args_list = ["--remove-args=%s " % arg for arg in args_removed]
update_kernel_cmd = "grubby --update-kernel=%s " % kernel_path
update_kernel_cmd += "".join(remove_args_list)
update_kernel_cmd += '--args="%s"' % " ".join(args_added)
s, o = session.cmd_status_output(update_kernel_cmd)
if s != 0:
msg = "Fail to modify the kernel cmdline, guest output: '%s'" % o
logging.error(msg)
raise error.TestError(msg)
count = 5
# upgrade listed packages to latest version.
for pkg in params.get("upgrade_pkgs", "").split():
_ = params.object_params(pkg)
arch = _.get("vm_arch_name", "x86_64")
nodeps = _.get("ignore_deps") == "yes"
install_debuginfo = _.get("install_debuginfo") == "yes"
timeout = int(_.get("install_pkg_timeout", "600"))
ver_before = session.cmd_output("rpm -q %s" % pkg)
upgrade_guest_pkgs(
session,
pkg, arch,
install_debuginfo,
nodeps,
timeout)
ver_after = session.cmd_output("rpm -q %s" % pkg)
if "not installed" in ver_before:
mesg = "Install '%s' in guest" % ver_after
else:
mesg = "Upgrade '%s' from '%s' to '%s'" % (pkg, ver_before, ver_after)
logging.info(mesg)
# reboot guest
error.context("Reboot guest after updating kernel", logging.info)
time.sleep(int(params.get("sleep_before_reset", 10)))
session = vm.reboot(session, 'shell', timeout=login_timeout)
# check if the guest can bootup normally after kernel update
guest_version = get_guest_kernel_version()
if guest_version != kernel_version:
raise error.TestFail("Fail to verify the guest kernel, \n"
"Expceted version %s \n"
"In fact version %s \n" %
(kernel_version, guest_version))
if verify_virtio == "yes":
error.context("Verifying the virtio drivers", logging.info)
if not is_virtio_driver_installed():
raise error.TestFail("Fail to verify the installation of"
" virtio drivers")
except Exception:
if count in [4, 3, 1]:
# restore boot cfg
s, o = session.cmd_status_output(bootcfg_restore_cmd, timeout=100)
if s != 0:
logging.error("Failed to execute cmd '%s' in guest,"
" guest output: '%s'", bootcfg_restore_cmd, o)
elif count == 2 and restore_initrd_cmd:
# restore initrd file
s, o = session.cmd_status_output(restore_initrd_cmd, timeout=200)
if s != 0:
logging.error("Failed to execute cmd '%s' in guest,"
" guest output: '%s'", restore_initrd_cmd, o)
raise
0
Example 66
Project: haoide Source File: metadata.py
def deploy(self, base64_zip, test_classes=[]):
""" Deploy zip file
Arguments:
* zipFile -- base64 encoded zipfile
"""
result = self.login()
if not result or not result["success"]: return
# Log the StartTime
start_time = datetime.datetime.now()
# Populate the soap_body with actual options
deploy_options = self.settings["deploy_options"]
# If just checkOnly, output VALIDATE, otherwise, output DEPLOY
deploy_or_validate = "validate" if deploy_options["checkOnly"] else "deploy"
# [sf:deploy]
Printer.get('log').write_start().write("[sf:%s] Start request for a deploy..." % deploy_or_validate)
options = deploy_options
options["zipfile"] = base64_zip
# If testLevel is Run Specified Test,
# we need to specify the runTests
testLevel = options.get("testLevel", "NoTestRun")
if testLevel == "RunSpecifiedTests":
options["runTests"] = "\n".join([
"<met:runTests>%s</met:runTests>" % c for c in test_classes
])
soap_body = self.soap.create_request('deploy', options)
try:
response = requests.post(self.metadata_url, soap_body,
verify=False, headers=self.headers)
except requests.exceptions.RequestException as e:
self.result = {
"Error Message": "Network connection timeout when issuing deploy request",
"success": False
}
return self.result
# Check whether session_id is expired
if "INVALID_SESSION_ID" in response.text:
Printer.get('log').write("[sf:%s] Session expired, need login again" % deploy_or_validate)
result = self.login(True)
if not result["success"]:
self.result = result
return self.result
return self.deploy(base64_zip)
# If status_code is > 399, which means it has error
# If status_code is > 399, which means it has error
if response.status_code > 399:
self.result = util.get_response_error(response)
return self.result
# [sf:deploy]
Printer.get('log').write("[sf:%s] Request for a deploy submitted successfully." % deploy_or_validate)
# Get async process id
async_process_id = util.getUniqueElementValueFromXmlString(response.content, "id")
# [sf:deploy]
Printer.get('log').write("[sf:%s] Request ID for the current deploy task: %s" % (deploy_or_validate, async_process_id))
Printer.get('log').write("[sf:%s] Waiting for server to finish processing the request..." % deploy_or_validate)
# 2. issue a check status loop request to assure the async
# process is done
result = self.check_deploy_status(async_process_id)
body = result["body"]
index = 1
failure_dict = {}
while body["status"] in ["Pending", "InProgress", "Canceling"]:
if "stateDetail" in body:
if int(body["numberComponentsDeployed"]) < int(body["numberComponentsTotal"]):
Printer.get('log').write("[sf:%s] Request Status: %s (%s/%s) -- %s" % (
deploy_or_validate,
body["status"],
body["numberComponentsDeployed"],
body["numberComponentsTotal"],
body["stateDetail"]
))
else:
Printer.get('log').write("[sf:%s] TestRun Status: %s (%s/%s) -- %s" % (
deploy_or_validate,
body["status"],
body["numberTestsCompleted"],
body["numberTestsTotal"],
body["stateDetail"]
))
else:
Printer.get('log').write("[sf:%s] Request Status: %s" % (
deploy_or_validate, body["status"]
))
# Process Test Run Result
if "runTestResult" in body["details"] and \
"failures" in body["details"]["runTestResult"]:
failures = body["details"]["runTestResult"]["failures"]
if isinstance(failures, dict):
if failures["id"] not in failure_dict:
failure_dict[failures["id"]] = failures
Printer.get('log').write("-" * 84).write("Test Failures: ")
Printer.get('log').write("%s.\t%s" % (index, failures["message"]))
for msg in failures["stackTrace"].split("\n"):
Printer.get('log').write("\t%s" % msg)
# [sf:deploy]
Printer.get('log').write("-" * 84)
index += index
elif isinstance(failures, list):
for f in failures:
if f["id"] not in failure_dict:
failure_dict[f["id"]] = f
Printer.get('log').write("-" * 84).write("Test Failures: ")
Printer.get('log').write("%s.\t%s" % (index, f["message"]))
# If compile error, there will no stack trace
if isinstance(f["stackTrace"], str):
for msg in f["stackTrace"].split("\n"):
Printer.get('log').write("\t%s" % msg)
Printer.get('log').write("-" * 84)
index += 1
# Thread Wait
sleep_seconds = 2 if body["status"] == "Pending" else self.settings["metadata_polling_frequency"]
time.sleep(sleep_seconds)
result = self.check_deploy_status(async_process_id)
body = result["body"]
# Check if job is canceled
if body["status"] == "Canceled":
Printer.get('log').write("\nBUILD FAILED", False)
Printer.get('log').write("cuem******* DEPLOYMENT FAILED ***********", False)
Printer.get('log').write("Request ID: %s" % async_process_id, False)
Printer.get('log').write("\nRequest Canceled", False)
Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
# If check status request failed, this will not be done
elif body["status"] == "Failed":
# Append failure message
Printer.get('log').write("[sf:%s] Request Failed\n\nBUILD FAILED" % deploy_or_validate)
Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
Printer.get('log').write("Request ID: %s" % async_process_id, False)
# Output Failure Details
failures_messages = []
if "componentFailures" in body["details"]:
component_failures = body["details"]["componentFailures"]
if isinstance(component_failures, dict):
component_failures = [component_failures]
for index in range(len(component_failures)):
component_failure = component_failures[index]
failures_messages.append("%s. %s -- %s: %s (line %s column %s)" % (
index + 1,
component_failure["fileName"],
component_failure["problemType"],
component_failure["problem"],
component_failure["lineNumber"] \
if "lineNumber" in component_failure else "N/A",
component_failure["columnNumber"] \
if "columnNumber" in component_failure else "N/A"
))
elif "runTestResult" in body["details"]:
failures = body["details"]["runTestResult"].get("failures", [])
if isinstance(failures, dict):
failures = [failures]
for index in range(len(failures)):
failure = failures[index]
failures_messages.append("%s. %s -- %s: %s" % (
index + 1,
failure.get("type"),
failure.get("name"),
failure.get("message")
))
elif "errorMessage" in body:
Printer.get('log').write("\n" + body["errorMessage"], False)
warning_messages = []
if "runTestResult" in body["details"]:
runTestResult = body["details"]["runTestResult"]
if "codeCoverageWarnings" in runTestResult:
coverage_warnings = runTestResult["codeCoverageWarnings"]
if isinstance(runTestResult["codeCoverageWarnings"], dict):
coverage_warnings = [coverage_warnings]
elif isinstance(runTestResult["codeCoverageWarnings"], list):
coverage_warnings = coverage_warnings
for warn in coverage_warnings:
if not isinstance(warn["name"], str): continue
warning_messages.append("%s -- %s" % (warn["name"], warn["message"]))
# Output failure message
if failures_messages:
Printer.get('log').write("\n\nAll Component Failures:", False)
Printer.get('log').write("\n"+"\n\n".join(failures_messages), False)
# Output warning message
if warning_messages:
Printer.get('log').write("\n\nTest Coverage Warnings:", False)
Printer.get('log').write("\n"+"\n".join(warning_messages), False)
# End for Deploy Result
Printer.get('log').write("\n*********** %s FAILED ***********" % (
deploy_or_validate.upper()), False)
else:
# Append succeed message
Printer.get('log').write("\n[sf:%s] Request Succeed" % deploy_or_validate, False)
Printer.get('log').write("[sf:%s] *********** %s SUCCEEDED ***********" % (
deploy_or_validate, deploy_or_validate.upper()), False)
Printer.get('log').write("[sf:%s] Finished request %s successfully." % (
deploy_or_validate, async_process_id), False)
# Total time
total_seconds = (datetime.datetime.now() - start_time).seconds
Printer.get('log').write("\n\nTotal time: %s seconds" % total_seconds, False)
# # Display debug log message in the new view
# view = sublime.active_window().new_file()
# view.run_command("new_view", {
# "name": "Debugging Information",
# "input": result.get("header", {}).get("debugLog", "")
# })
self.result = result
0
Example 67
Project: SickGear Source File: parser.py
def _parse_string(self, name):
if not name:
return
matches = []
for regex in self.compiled_regexes:
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes[regex]:
match = cur_regex.match(name)
if not match:
continue
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'series_num' in named_groups and match.group('series_num'):
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if 'bare' == cur_regex_name and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 1
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'air_year' in named_groups and 'air_month' in named_groups and 'air_day' in named_groups:
year = int(match.group('air_year'))
month = int(match.group('air_month'))
day = int(match.group('air_day'))
# make an attempt to detect YYYY-DD-MM formats
if 12 < month:
tmp_month = month
month = day
day = tmp_month
try:
result.air_date = datetime.date(year, month, day)
except ValueError as e:
raise InvalidNameException(ex(e))
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and 'season_only' == cur_regex_name and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = helpers.remove_non_release_groups(match.group('release_group'))
result.score += 1
if 'version' in named_groups:
# assigns version to anime file if detected using anime regex. Non-anime regex receives -1
version = match.group('version')
if version:
result.version = version
else:
result.version = 1
else:
result.version = -1
matches.append(result)
if len(matches):
# pick best match with highest score based on placement
best_result = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = helpers.get_show(best_result.series_name, self.try_scene_exceptions)
# confirm passed in show object indexer id matches result show object indexer id
if show and not self.testing:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
elif not show and self.showObj:
show = self.showObj
best_result.show = show
if show and show.is_anime and 1 < len(self.compiled_regexes[1]) and 1 != regex:
continue
# if this is a naming pattern test then return best result
if not show or self.naming_pattern:
return best_result
# get quality
best_result.quality = common.Quality.nameQuality(name, show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []
# if we have an air-by-date show then get the real season/episode numbers
if best_result.is_air_by_date:
airdate = best_result.air_date.toordinal()
my_db = db.DBConnection()
sql_result = my_db.select(
'SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?',
[show.indexerid, show.indexer, airdate])
season_number = None
episode_numbers = []
if sql_result:
season_number = int(sql_result[0][0])
episode_numbers = [int(sql_result[0][1])]
if not season_number or not len(episode_numbers):
try:
lindexer_api_parms = sickbeard.indexerApi(show.indexer).api_params.copy()
if show.lang:
lindexer_api_parms['language'] = show.lang
t = sickbeard.indexerApi(show.indexer).indexer(**lindexer_api_parms)
ep_obj = t[show.indexerid].airedOn(best_result.air_date)[0]
season_number = int(ep_obj['seasonnumber'])
episode_numbers = [int(ep_obj['episodenumber'])]
except sickbeard.indexer_episodenotfound:
logger.log(u'Unable to find episode with date ' + str(best_result.air_date) + ' for show ' + show.name + ', skipping', logger.WARNING)
episode_numbers = []
except sickbeard.indexer_error as e:
logger.log(u'Unable to contact ' + sickbeard.indexerApi(show.indexer).name + ': ' + ex(e), logger.WARNING)
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if self.convert and show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(show.indexerid,
show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif show.is_anime and len(best_result.ab_episode_numbers) and not self.testing:
scene_season = scene_exceptions.get_scene_exception_by_name(best_result.series_name)[1]
for epAbsNo in best_result.ab_episode_numbers:
a = epAbsNo
if self.convert and show.is_scene:
a = scene_numbering.get_indexer_absolute_numbering(show.indexerid,
show.indexer, epAbsNo,
True, scene_season)
(s, e) = helpers.get_all_episodes_from_absolute_number(show, [a])
new_absolute_numbers.append(a)
new_episode_numbers.extend(e)
new_season_numbers.append(s)
elif best_result.season_number and len(best_result.episode_numbers) and not self.testing:
for epNo in best_result.episode_numbers:
s = best_result.season_number
e = epNo
if self.convert and show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(show.indexerid,
show.indexer,
best_result.season_number,
epNo)
if show.is_anime:
a = helpers.get_absolute_number_from_season_and_episode(show, s, e)
if a:
new_absolute_numbers.append(a)
new_episode_numbers.append(e)
new_season_numbers.append(s)
# need to do a quick sanity check heregex. It's possible that we now have episodes
# from more than one season (by tvdb numbering), and this is just too much
# for sickbeard, so we'd need to flag it.
new_season_numbers = list(set(new_season_numbers)) # remove duplicates
if 1 < len(new_season_numbers):
raise InvalidNameException('Scene numbering results episodes from '
'seasons %s, (i.e. more than one) and '
'SickGear does not support this. '
'Sorry.' % (str(new_season_numbers)))
# I guess it's possible that we'd have duplicate episodes too, so lets
# eliminate them
new_episode_numbers = list(set(new_episode_numbers))
new_episode_numbers.sort()
# maybe even duplicate absolute numbers so why not do them as well
new_absolute_numbers = list(set(new_absolute_numbers))
new_absolute_numbers.sort()
if len(new_absolute_numbers):
best_result.ab_episode_numbers = new_absolute_numbers
if len(new_season_numbers) and len(new_episode_numbers):
best_result.episode_numbers = new_episode_numbers
best_result.season_number = new_season_numbers[0]
if self.convert and show.is_scene:
logger.log(u'Converted parsed result %s into %s'
% (best_result.original_name, str(best_result).decode('utf-8', 'xmlcharrefreplace')),
logger.DEBUG)
# CPU sleep
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
return best_result
0
Example 68
Project: pipeline Source File: ROSE2_main.py
def main():
'''
main run call
'''
debug = False
from optparse import OptionParser
usage = "usage: %prog [options] -g [GENOME] -i [INPUT_REGION_GFF] -r [RANKBY_BAM_FILE] -o [OUTPUT_FOLDER] [OPTIONAL_FLAGS]"
parser = OptionParser(usage=usage)
# required flags
parser.add_option("-i", "--i", dest="input", nargs=1, default=None,
help="Enter a .gff or .bed file of binding sites used to make enhancers")
parser.add_option("-r", "--rankby", dest="rankby", nargs=1, default=None,
help="bamfile to rank enhancer by")
parser.add_option("-o", "--out", dest="out", nargs=1, default=None,
help="Enter an output folder")
parser.add_option("-g", "--genome", dest="genome", nargs=1, default=None,
help="Enter the genome build (MM9,MM8,HG18,HG19)")
# optional flags
parser.add_option("-b", "--bams", dest="bams", nargs=1, default=None,
help="Enter a comma separated list of additional bam files to map to")
parser.add_option("-c", "--control", dest="control", nargs=1, default=None,
help="bamfile to rank enhancer by")
parser.add_option("-s", "--stitch", dest="stitch", nargs=1, default='',
help="Enter a max linking distance for stitching. Default will determine optimal stitching parameter")
parser.add_option("-t", "--tss", dest="tss", nargs=1, default=0,
help="Enter a distance from TSS to exclude. 0 = no TSS exclusion")
parser.add_option("--mask", dest="mask", nargs=1, default=None,
help="Mask a set of regions from analysis. Provide a .bed or .gff of masking regions")
# RETRIEVING FLAGS
(options, args) = parser.parse_args()
if not options.input or not options.rankby or not options.out or not options.genome:
print('hi there')
parser.print_help()
exit()
# making the out folder if it doesn't exist
outFolder = utils.formatFolder(options.out, True)
# figuring out folder schema
gffFolder = utils.formatFolder(outFolder + 'gff/', True)
mappedFolder = utils.formatFolder(outFolder + 'mappedGFF/', True)
# GETTING INPUT FILE
if options.input.split('.')[-1] == 'bed':
# CONVERTING A BED TO GFF
inputGFFName = options.input.split('/')[-1][0:-4]
inputGFFFile = '%s%s.gff' % (gffFolder, inputGFFName)
utils.bedToGFF(options.input, inputGFFFile)
elif options.input.split('.')[-1] == 'gff':
# COPY THE INPUT GFF TO THE GFF FOLDER
inputGFFFile = options.input
os.system('cp %s %s' % (inputGFFFile, gffFolder))
else:
print('WARNING: INPUT FILE DOES NOT END IN .gff or .bed. ASSUMING .gff FILE FORMAT')
# COPY THE INPUT GFF TO THE GFF FOLDER
inputGFFFile = options.input
os.system('cp %s %s' % (inputGFFFile, gffFolder))
# GETTING THE LIST OF BAMFILES TO PROCESS
if options.control:
bamFileList = [options.rankby, options.control]
else:
bamFileList = [options.rankby]
if options.bams:
bamFileList += options.bams.split(',')
#bamFileList = utils.uniquify(bamFileList) # makes sad when you have the same control bam over and over again
# optional args
# Stitch parameter
if options.stitch == '':
stitchWindow = ''
else:
stitchWindow = int(options.stitch)
# tss options
tssWindow = int(options.tss)
if tssWindow != 0:
removeTSS = True
else:
removeTSS = False
# GETTING THE BOUND REGION FILE USED TO DEFINE ENHANCERS
print('USING %s AS THE INPUT GFF' % (inputGFFFile))
inputName = inputGFFFile.split('/')[-1].split('.')[0]
# GETTING THE GENOME
genome = options.genome
print('USING %s AS THE GENOME' % genome)
# GETTING THE CORRECT ANNOT FILE
cwd = os.getcwd()
genomeDict = {
'HG18': '%s/annotation/hg18_refseq.ucsc' % (cwd),
'MM9': '%s/annotation/mm9_refseq.ucsc' % (cwd),
'HG19': '%s/annotation/hg19_refseq.ucsc' % (cwd),
'MM8': '%s/annotation/mm8_refseq.ucsc' % (cwd),
'MM10': '%s/annotation/mm10_refseq.ucsc' % (cwd),
'RN4': '%s/annotation/rn4_refseq.ucsc' % (cwd),
}
annotFile = genomeDict[genome.upper()]
# MAKING THE START DICT
print('MAKING START DICT')
startDict = utils.makeStartDict(annotFile)
#GET CHROMS FOUND IN THE BAMS
print('GETTING CHROMS IN BAMFILES')
bamChromList = getBamChromList(bamFileList)
print("USING THE FOLLOWING CHROMS")
print(bamChromList)
#LOADING IN THE GFF AND FILTERING BY CHROM
print('LOADING AND FILTERING THE GFF')
inputGFF = filterGFF(inputGFFFile,bamChromList)
# LOADING IN THE BOUND REGION REFERENCE COLLECTION
print('LOADING IN GFF REGIONS')
referenceCollection = utils.gffToLocusCollection(inputGFF)
print('CHECKING REFERENCE COLLECTION:')
checkRefCollection(referenceCollection)
# MASKING REFERENCE COLLECTION
# see if there's a mask
if options.mask:
maskFile = options.mask
# if it's a bed file
if maskFile.split('.')[-1].upper() == 'BED':
maskGFF = utils.bedToGFF(maskFile)
elif maskFile.split('.')[-1].upper() == 'GFF':
maskGFF = utils.parseTable(maskFile, '\t')
else:
print("MASK MUST BE A .gff or .bed FILE")
sys.exit()
maskCollection = utils.gffToLocusCollection(maskGFF)
# now mask the reference loci
referenceLoci = referenceCollection.getLoci()
filteredLoci = [locus for locus in referenceLoci if len(maskCollection.getOverlap(locus, 'both')) == 0]
print("FILTERED OUT %s LOCI THAT WERE MASKED IN %s" % (len(referenceLoci) - len(filteredLoci), maskFile))
referenceCollection = utils.LocusCollection(filteredLoci, 50)
# NOW STITCH REGIONS
print('STITCHING REGIONS TOGETHER')
stitchedCollection, debugOutput, stitchWindow = regionStitching(referenceCollection, inputName, outFolder, stitchWindow, tssWindow, annotFile, removeTSS)
# NOW MAKE A STITCHED COLLECTION GFF
print('MAKING GFF FROM STITCHED COLLECTION')
stitchedGFF = utils.locusCollectionToGFF(stitchedCollection)
# making sure start/stop ordering are correct
for i in range(len(stitchedGFF)):
line = stitchedGFF[i]
start = int(line[3])
stop = int(line[4])
if start > stop:
line[3] = stop
line[4] = start
print(stitchWindow)
print(type(stitchWindow))
if not removeTSS:
stitchedGFFFile = '%s%s_%sKB_STITCHED.gff' % (gffFolder, inputName, str(stitchWindow / 1000))
stitchedGFFName = '%s_%sKB_STITCHED' % (inputName, str(stitchWindow / 1000))
debugOutFile = '%s%s_%sKB_STITCHED.debug' % (gffFolder, inputName, str(stitchWindow / 1000))
else:
stitchedGFFFile = '%s%s_%sKB_STITCHED_TSS_DISTAL.gff' % (gffFolder, inputName, str(stitchWindow / 1000))
stitchedGFFName = '%s_%sKB_STITCHED_TSS_DISTAL' % (inputName, str(stitchWindow / 1000))
debugOutFile = '%s%s_%sKB_STITCHED_TSS_DISTAL.debug' % (gffFolder, inputName, str(stitchWindow / 1000))
# WRITING DEBUG OUTPUT TO DISK
if debug:
print('WRITING DEBUG OUTPUT TO DISK AS %s' % (debugOutFile))
utils.unParseTable(debugOutput, debugOutFile, '\t')
# WRITE THE GFF TO DISK
print('WRITING STITCHED GFF TO DISK AS %s' % (stitchedGFFFile))
utils.unParseTable(stitchedGFF, stitchedGFFFile, '\t')
# SETTING UP THE OVERALL OUTPUT FILE
outputFile1 = outFolder + stitchedGFFName + '_ENHANCER_REGION_MAP.txt'
print('OUTPUT WILL BE WRITTEN TO %s' % (outputFile1))
# MAPPING TO THE NON STITCHED (ORIGINAL GFF)
# MAPPING TO THE STITCHED GFF
# Try to use the bamliquidatior_path.py script on cluster, otherwise, failover to local (in path), otherwise fail.
bamliquidator_path = 'bamliquidator_batch'
bamFileListUnique = list(bamFileList)
bamFileListUnique = utils.uniquify(bamFileListUnique)
#prevent redundant mapping
print("MAPPING TO THE FOLLOWING BAMS:")
print(bamFileListUnique)
for bamFile in bamFileListUnique:
bamFileName = bamFile.split('/')[-1]
# MAPPING TO THE STITCHED GFF
mappedOut1Folder = '%s%s_%s_MAPPED' % (mappedFolder, stitchedGFFName, bamFileName)
mappedOut1File = '%s%s_%s_MAPPED/matrix.txt' % (mappedFolder, stitchedGFFName, bamFileName)
if utils.checkOutput(mappedOut1File, 0.2, 0.2):
print("FOUND %s MAPPING DATA FOR BAM: %s" % (stitchedGFFFile, mappedOut1File))
else:
cmd1 = bamliquidator_path + " --sense . -e 200 --match_bamToGFF -r %s -o %s %s" % (stitchedGFFFile, mappedOut1Folder, bamFile)
print(cmd1)
os.system(cmd1)
if utils.checkOutput(mappedOut1File,0.2,5):
print("SUCCESSFULLY MAPPED TO %s FROM BAM: %s" % (stitchedGFFFile, bamFileName))
else:
print("ERROR: FAILED TO MAP %s FROM BAM: %s" % (stitchedGFFFile, bamFileName))
sys.exit()
print('BAM MAPPING COMPLETED NOW MAPPING DATA TO REGIONS')
# CALCULATE DENSITY BY REGION
# NEED TO FIX THIS FUNCTION TO ACCOUNT FOR DIFFERENT OUTPUTS OF LIQUIDATOR
mapCollection(stitchedCollection, referenceCollection, bamFileList, mappedFolder, outputFile1, refName=stitchedGFFName)
print('CALLING AND PLOTTING SUPER-ENHANCERS')
if options.control:
rankbyName = options.rankby.split('/')[-1]
controlName = options.control.split('/')[-1]
cmd = 'R --no-save %s %s %s %s < ROSE2_callSuper.R' % (outFolder, outputFile1, inputName, controlName)
else:
rankbyName = options.rankby.split('/')[-1]
controlName = 'NONE'
cmd = 'R --no-save %s %s %s %s < ROSE2_callSuper.R' % (outFolder, outputFile1, inputName, controlName)
print(cmd)
os.system(cmd)
# calling the gene mapper
time.sleep(20)
superTableFile = "%s_SuperEnhancers.table.txt" % (inputName)
if options.control:
cmd = "python ROSE2_geneMapper.py -g %s -r %s -c %s -i %s%s &" % (genome, options.rankby, options.control, outFolder, superTableFile)
else:
cmd = "python ROSE2_geneMapper.py -g %s -r %s -i %s%s &" % (genome, options.rankby, outFolder, superTableFile)
os.system(cmd)
stretchTableFile = "%s_StretchEnhancers.table.txt" % (inputName)
if options.control:
cmd = "python ROSE2_geneMapper.py -g %s -r %s -c %s -i %s%s &" % (genome, options.rankby, options.control, outFolder, stretchTableFile)
else:
cmd = "python ROSE2_geneMapper.py -g %s -r %s -i %s%s &" % (genome, options.rankby, outFolder, stretchTableFile)
os.system(cmd)
superStretchTableFile = "%s_SuperStretchEnhancers.table.txt" % (inputName)
if options.control:
cmd = "python ROSE2_geneMapper.py -g %s -r %s -c %s -i %s%s &" % (genome, options.rankby, options.control, outFolder, superStretchTableFile)
else:
cmd = "python ROSE2_geneMapper.py -g %s -r %s -i %s%s &" % (genome, options.rankby, outFolder, superStretchTableFile)
os.system(cmd)
0
Example 69
Project: tp-libvirt Source File: virtual_disks_ceph.py
def run(test, params, env):
"""
Test rbd disk device.
1.Prepare test environment,destroy or suspend a VM.
2.Prepare disk image.
3.Edit disks xml and start the domain.
4.Perform test operation.
5.Recover test environment.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
virsh_dargs = {'debug': True, 'ignore_status': True}
def config_ceph():
"""
Write the configs to the file.
"""
src_host = disk_src_host.split()
src_port = disk_src_port.split()
conf_str = "mon_host = "
hosts = []
for host, port in zip(src_host, src_port):
hosts.append("%s:%s" % (host, port))
with open(disk_src_config, 'w') as f:
f.write(conf_str + ','.join(hosts) + '\n')
def create_pool():
"""
Define and start a pool.
"""
sp = libvirt_storage.StoragePool()
if create_by_xml:
p_xml = pool_xml.PoolXML(pool_type=pool_type)
p_xml.name = pool_name
s_xml = pool_xml.SourceXML()
s_xml.vg_name = disk_src_pool
source_host = []
for (host_name, host_port) in zip(
disk_src_host.split(), disk_src_port.split()):
source_host.append({'name': host_name,
'port': host_port})
s_xml.hosts = source_host
if auth_type:
s_xml.auth_type = auth_type
if auth_user:
s_xml.auth_username = auth_user
if auth_usage:
s_xml.secret_usage = auth_usage
p_xml.source = s_xml
logging.debug("Pool xml: %s", p_xml)
p_xml.xmltreefile.write()
ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
libvirt.check_exit_status(ret)
ret = virsh.pool_build(pool_name, **virsh_dargs)
libvirt.check_exit_status(ret)
ret = virsh.pool_start(pool_name, **virsh_dargs)
libvirt.check_exit_status(ret)
else:
auth_opt = ""
if client_name and client_key:
auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
% (auth_type, auth_user, auth_usage))
if not sp.define_rbd_pool(pool_name, mon_host,
disk_src_pool, extra=auth_opt):
raise error.TestFail("Failed to define storage pool")
if not sp.build_pool(pool_name):
raise error.TestFail("Failed to build storage pool")
if not sp.start_pool(pool_name):
raise error.TestFail("Failed to start storage pool")
# Check pool operation
ret = virsh.pool_refresh(pool_name, **virsh_dargs)
libvirt.check_exit_status(ret)
ret = virsh.pool_uuid(pool_name, **virsh_dargs)
libvirt.check_exit_status(ret)
# pool-info
pool_info = sp.pool_info(pool_name)
if pool_info["Autostart"] != 'no':
raise error.TestFail("Failed to check pool information")
# pool-autostart
if not sp.set_pool_autostart(pool_name):
raise error.TestFail("Failed to set pool autostart")
pool_info = sp.pool_info(pool_name)
if pool_info["Autostart"] != 'yes':
raise error.TestFail("Failed to check pool information")
# pool-autostart --disable
if not sp.set_pool_autostart(pool_name, "--disable"):
raise error.TestFail("Failed to set pool autostart")
# find-storage-pool-sources-as
ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
libvirt.check_result(ret, unsupported_msg)
def create_vol(vol_params):
"""
Create volume.
:param p_name. Pool name.
:param vol_params. Volume parameters dict.
:return: True if create successfully.
"""
pvt = libvirt.PoolVolumeTest(test, params)
if create_by_xml:
pvt.pre_vol_by_xml(pool_name, **vol_params)
else:
pvt.pre_vol(vol_name, None, '2G', None, pool_name)
def check_vol(vol_params):
"""
Check volume infomation.
"""
pv = libvirt_storage.PoolVolume(pool_name)
# Supported operation
if vol_name not in pv.list_volumes():
raise error.TestFail("Volume %s doesn't exist" % vol_name)
ret = virsh.vol_dumpxml(vol_name, pool_name)
libvirt.check_exit_status(ret)
# vol-info
if not pv.volume_info(vol_name):
raise error.TestFail("Can't see volmue info")
# vol-key
ret = virsh.vol_key(vol_name, pool_name)
libvirt.check_exit_status(ret)
if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout:
raise error.TestFail("Volume key isn't correct")
# vol-path
ret = virsh.vol_path(vol_name, pool_name)
libvirt.check_exit_status(ret)
if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout:
raise error.TestFail("Volume path isn't correct")
# vol-pool
ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
libvirt.check_exit_status(ret)
if pool_name not in ret.stdout:
raise error.TestFail("Volume pool isn't correct")
# vol-name
ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
libvirt.check_exit_status(ret)
if vol_name not in ret.stdout:
raise error.TestFail("Volume name isn't correct")
# vol-resize
ret = virsh.vol_resize(vol_name, "2G", pool_name)
libvirt.check_exit_status(ret)
# Not supported operation
# vol-clone
ret = virsh.vol_clone(vol_name, "atest.vol", pool_name)
libvirt.check_result(ret, unsupported_msg)
# vol-create-from
volxml = vol_xml.VolXML()
vol_params.update({"name": "atest.vol"})
v_xml = volxml.new_vol(**vol_params)
v_xml.xmltreefile.write()
ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
libvirt.check_result(ret, unsupported_msg)
# vol-wipe
ret = virsh.vol_wipe(vol_name, pool_name)
libvirt.check_result(ret, unsupported_msg)
# vol-upload
ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
"--pool %s" % pool_name)
libvirt.check_result(ret, unsupported_msg)
# vol-download
ret = virsh.vol_download(vol_name, "atest.vol", "--pool %s" % pool_name)
libvirt.check_result(ret, unsupported_msg)
def check_qemu_cmd():
"""
Check qemu command line options.
"""
cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
if disk_src_name:
cmd += " | grep file=rbd:%s:" % disk_src_name
if auth_user and auth_key:
cmd += ('id=%s:auth_supported=cephx' % auth_user)
if disk_src_config:
cmd += " | grep 'conf=%s'" % disk_src_config
elif mon_host:
hosts = '\:6789\;'.join(mon_host.split())
cmd += " | grep 'mon_host=%s'" % hosts
if driver_iothread:
cmd += " | grep iothread=iothread%s" % driver_iothread
# Run the command
process.run(cmd, shell=True)
def check_save_restore():
"""
Test save and restore operation
"""
save_file = os.path.join(test.tmpdir,
"%s.save" % vm_name)
ret = virsh.save(vm_name, save_file, **virsh_dargs)
libvirt.check_exit_status(ret)
ret = virsh.restore(save_file, **virsh_dargs)
libvirt.check_exit_status(ret)
if os.path.exists(save_file):
os.remove(save_file)
# Login to check vm status
vm.wait_for_login().close()
def check_snapshot(snap_option):
"""
Test snapshot operation.
"""
snap_name = "s1"
snap_mem = os.path.join(test.tmpdir, "rbd.mem")
snap_disk = os.path.join(test.tmpdir, "rbd.disk")
expected_fails = []
xml_snap_exp = ["disk name='vda' snapshot='external' type='file'"]
xml_dom_exp = ["source file='%s'" % snap_disk,
"backingStore type='network' index='1'",
"source protocol='rbd' name='%s'" % disk_src_name]
if snap_option.count("disk-only"):
options = ("%s --diskspec vda,file=%s --disk-only" %
(snap_name, snap_disk))
elif snap_option.count("disk-mem"):
options = ("%s --memspec file=%s --diskspec vda,file="
"%s" % (snap_name, snap_mem, snap_disk))
xml_snap_exp.append("memory snapshot='external' file='%s'"
% snap_mem)
else:
options = snap_name
error_msg = params.get("error_msg")
if error_msg:
expected_fails.append(error_msg)
ret = virsh.snapshot_create_as(vm_name, options)
if ret.exit_status:
libvirt.check_result(ret, expected_fails)
# check xml file.
if not ret.exit_status:
snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
debug=True).stdout.strip()
dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
# Delete snapshots.
libvirt.clean_up_snapshots(vm_name)
if os.path.exists(snap_mem):
os.remove(snap_mem)
if os.path.exists(snap_disk):
os.remove(snap_disk)
if not all([x in snap_xml for x in xml_snap_exp]):
raise error.TestFail("Failed to check snapshot xml")
if not all([x in dom_xml for x in xml_dom_exp]):
raise error.TestFail("Failed to check domain xml")
def check_blockcopy(target):
"""
Block copy operation test.
"""
blk_file = os.path.join(test.tmpdir, "blk.rbd")
if os.path.exists(blk_file):
os.remove(blk_file)
blk_mirror = ("mirror type='file' file='%s' "
"format='raw' job='copy'" % blk_file)
# Do blockcopy
ret = virsh.blockcopy(vm_name, target, blk_file)
if ret.exit_status:
error_msg = params.get("error_msg")
if not error_msg:
libvirt.check_exit_status(ret)
else:
libvirt.check_result(ret, [error_msg])
# Passed error check, return
return
dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
if not dom_xml.count(blk_mirror):
raise error.TestFail("Can't see block job in domain xml")
# Abort
ret = virsh.blockjob(vm_name, target, "--abort")
libvirt.check_exit_status(ret)
dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
if dom_xml.count(blk_mirror):
raise error.TestFail("Failed to abort block job")
if os.path.exists(blk_file):
os.remove(blk_file)
# Sleep for a while after abort operation.
time.sleep(5)
# Do blockcopy again
ret = virsh.blockcopy(vm_name, target, blk_file)
libvirt.check_exit_status(ret)
# Wait for complete
def wait_func():
ret = virsh.blockjob(vm_name, target, "--info")
return ret.stderr.count("Block Copy: [100 %]")
timeout = params.get("blockjob_timeout", 600)
utils_misc.wait_for(wait_func, int(timeout))
# Pivot
ret = virsh.blockjob(vm_name, target, "--pivot")
libvirt.check_exit_status(ret)
dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
if not dom_xml.count("source file='%s'" % blk_file):
raise error.TestFail("Failed to pivot block job")
# Remove the disk file.
if os.path.exists(blk_file):
os.remove(blk_file)
def check_in_vm(vm_obj, target, old_parts, read_only=False):
"""
Check mount/read/write disk in VM.
:param vm. VM guest.
:param target. Disk dev in VM.
:return: True if check successfully.
"""
try:
session = vm_obj.wait_for_login()
new_parts = libvirt.get_parts_list(session)
added_parts = list(set(new_parts).difference(set(old_parts)))
logging.info("Added parts:%s", added_parts)
if len(added_parts) != 1:
logging.error("The number of new partitions is invalid in VM")
return False
added_part = None
if target.startswith("vd"):
if added_parts[0].startswith("vd"):
added_part = added_parts[0]
elif target.startswith("hd"):
if added_parts[0].startswith("sd"):
added_part = added_parts[0]
if not added_part:
logging.error("Cann't see added partition in VM")
return False
cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
" touch /mnt/testfile; umount /mnt)"
.format(added_part))
s, o = session.cmd_status_output(cmd, timeout=60)
session.close()
logging.info("Check disk operation in VM:\n, %s, %s", s, o)
# Readonly fs, check the error messages.
# The command may return True, read-only
# messges can be found from the command output
if read_only:
if "Read-only file system" not in o:
return False
else:
return True
# Other errors
if s != 0:
return False
return True
except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
logging.error(str(e))
return False
mon_host = params.get("mon_host")
disk_src_name = params.get("disk_source_name")
disk_src_config = params.get("disk_source_config")
disk_src_host = params.get("disk_source_host")
disk_src_port = params.get("disk_source_port")
disk_src_pool = params.get("disk_source_pool")
disk_format = params.get("disk_format", "raw")
driver_iothread = params.get("driver_iothread")
pre_vm_state = params.get("pre_vm_state", "running")
snap_name = params.get("disk_snap_name")
attach_device = "yes" == params.get("attach_device", "no")
attach_disk = "yes" == params.get("attach_disk", "no")
test_save_restore = "yes" == params.get("test_save_restore", "no")
test_snapshot = "yes" == params.get("test_snapshot", "no")
test_blockcopy = "yes" == params.get("test_blockcopy", "no")
test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
test_vm_parts = "yes" == params.get("test_vm_parts", "no")
additional_guest = "yes" == params.get("additional_guest", "no")
create_snapshot = "yes" == params.get("create_snapshot", "no")
convert_image = "yes" == params.get("convert_image", "no")
create_volume = "yes" == params.get("create_volume", "no")
create_by_xml = "yes" == params.get("create_by_xml", "no")
client_key = params.get("client_key")
client_name = params.get("client_name")
auth_key = params.get("auth_key")
auth_user = params.get("auth_user")
auth_type = params.get("auth_type")
auth_usage = params.get("secret_usage")
pool_name = params.get("pool_name")
pool_type = params.get("pool_type")
vol_name = params.get("vol_name")
vol_cap = params.get("vol_cap")
vol_cap_unit = params.get("vol_cap_unit")
start_error_msg = params.get("start_error_msg")
attach_error_msg = params.get("attach_error_msg")
unsupported_msg = params.get("unsupported_msg")
# Start vm and get all partions in vm.
if vm.is_dead():
vm.start()
session = vm.wait_for_login()
old_parts = libvirt.get_parts_list(session)
session.close()
vm.destroy(gracefully=False)
if additional_guest:
guest_name = "%s_%s" % (vm_name, '1')
timeout = params.get("clone_timeout", 360)
utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
True, timeout=timeout,
ignore_status=False)
additional_vm = vm.clone(guest_name)
if pre_vm_state == "running":
virsh.start(guest_name)
# Back up xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
key_opt = ""
secret_uuid = None
key_file = os.path.join(test.tmpdir, "ceph.key")
img_file = os.path.join(test.tmpdir,
"%s_test.img" % vm_name)
try:
# Set domain state
libvirt.set_domain_state(vm, pre_vm_state)
# Install ceph-common package which include rbd command
if utils_misc.yum_install(["ceph-common"]):
if client_name and client_key:
with open(key_file, 'w') as f:
f.write("[%s]\n\tkey = %s\n" %
(client_name, client_key))
key_opt = "--keyring %s" % key_file
# Create secret xml
sec_xml = secret_xml.SecretXML("no", "no")
sec_xml.usage = auth_type
sec_xml.usage_name = auth_usage
sec_xml.xmltreefile.write()
logging.debug("Secret xml: %s", sec_xml)
ret = virsh.secret_define(sec_xml.xml)
libvirt.check_exit_status(ret)
secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
ret.stdout)[0].lstrip()
logging.debug("Secret uuid %s", secret_uuid)
if secret_uuid is None:
raise error.TestNAError("Failed to get secret uuid")
# Set secret value
auth_key = params.get("auth_key")
ret = virsh.secret_set_value(secret_uuid, auth_key,
**virsh_dargs)
libvirt.check_exit_status(ret)
# TODO - Delete the disk if it exists
#cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
# "{2}".format(mon_host, key_opt, disk_src_name))
#process.run(cmd, ignore_status=True, shell=True)
else:
raise error.TestNAError("Failed to install ceph-common")
if disk_src_config:
config_ceph()
disk_path = ("rbd:%s:mon_host=%s" %
(disk_src_name, mon_host))
if auth_user and auth_key:
disk_path += (":id=%s:key=%s" %
(auth_user, auth_key))
targetdev = params.get("disk_target", "vdb")
# To be compatible with create_disk_xml function,
# some parameters need to be updated.
params.update({
"type_name": params.get("disk_type", "network"),
"target_bus": params.get("disk_target_bus"),
"target_dev": targetdev,
"secret_uuid": secret_uuid,
"source_protocol": params.get("disk_source_protocol"),
"source_name": disk_src_name,
"source_host_name": disk_src_host,
"source_host_port": disk_src_port})
# Prepare disk image
if convert_image:
first_disk = vm.get_first_disk_devices()
blk_source = first_disk['source']
# Convert the image to remote storage
disk_cmd = ("rbd -m %s %s info %s || qemu-img convert"
" -O %s %s %s" % (mon_host, key_opt,
disk_src_name, disk_format,
blk_source, disk_path))
process.run(disk_cmd, ignore_status=False, shell=True)
elif create_volume:
vol_params = {"name": vol_name, "capacity": int(vol_cap),
"capacity_unit": vol_cap_unit, "format": "unknow"}
create_pool()
create_vol(vol_params)
check_vol(vol_params)
else:
# Create an local image and make FS on it.
disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
(disk_format, img_file, img_file))
process.run(disk_cmd, ignore_status=False, shell=True)
# Convert the image to remote storage
disk_cmd = ("rbd -m %s %s info %s || qemu-img convert -O"
" %s %s %s" % (mon_host, key_opt, disk_src_name,
disk_format, img_file, disk_path))
process.run(disk_cmd, ignore_status=False, shell=True)
# Create disk snapshot if needed.
if create_snapshot:
snap_cmd = ("rbd -m %s %s snap create %s@%s" %
(mon_host, key_opt, disk_src_name, snap_name))
process.run(snap_cmd, ignore_status=False, shell=True)
if attach_device:
if create_volume:
params.update({"type_name": "volume"})
# No need auth options for volume
if "auth_user" in params:
params.pop("auth_user")
if "auth_type" in params:
params.pop("auth_type")
if "secret_type" in params:
params.pop("secret_type")
if "secret_uuid" in params:
params.pop("secret_uuid")
if "secret_usage" in params:
params.pop("secret_usage")
xml_file = libvirt.create_disk_xml(params)
opts = params.get("attach_option", "")
ret = virsh.attach_device(vm_name, xml_file,
flagstr=opts, debug=True)
if attach_error_msg:
libvirt.check_result(ret, attach_error_msg)
else:
libvirt.check_exit_status(ret)
if additional_guest:
ret = virsh.attach_device(guest_name, xml_file,
"", debug=True)
libvirt.check_exit_status(ret)
elif attach_disk:
ret = virsh.attach_disk(vm_name, disk_path,
targetdev, **virsh_dargs)
libvirt.check_exit_status(ret)
elif not create_volume:
libvirt.set_vm_disk(vm, params)
if pre_vm_state == "transient":
logging.info("Creating %s...", vm_name)
vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
if vm.is_alive():
vm.destroy(gracefully=False)
vm.undefine()
if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
vmxml_backup.define()
raise error.TestFail("Cann't create the domain")
elif vm.is_dead():
vm.start()
# Wait for vm is running
vm.wait_for_login(timeout=600).close()
if additional_guest:
if additional_vm.is_dead():
additional_vm.start()
# Check qemu command line
if test_qemu_cmd:
check_qemu_cmd()
# Check partitions in vm
if test_vm_parts:
if not check_in_vm(vm, targetdev, old_parts,
read_only=create_snapshot):
raise error.TestFail("Failed to check vm partitions")
if additional_guest:
if not check_in_vm(additional_vm, targetdev, old_parts):
raise error.TestFail("Failed to check vm partitions")
# Save and restore operation
if test_save_restore:
check_save_restore()
if test_snapshot:
snap_option = params.get("snapshot_option", "")
check_snapshot(snap_option)
if test_blockcopy:
check_blockcopy(targetdev)
# Detach the device.
if attach_device and not attach_error_msg:
xml_file = libvirt.create_disk_xml(params)
ret = virsh.detach_device(vm_name, xml_file)
libvirt.check_exit_status(ret)
if additional_guest:
ret = virsh.detach_device(guest_name, xml_file)
libvirt.check_exit_status(ret)
elif attach_disk:
ret = virsh.detach_disk(vm_name, targetdev)
libvirt.check_exit_status(ret)
# Check disk in vm after detachment.
if (attach_device or attach_disk) and not attach_error_msg:
session = vm.wait_for_login()
new_parts = libvirt.get_parts_list(session)
if len(new_parts) != len(old_parts):
raise error.TestFail("Disk still exists in vm"
" after detachment")
session.close()
except virt_vm.VMStartError, details:
if start_error_msg in str(details):
pass
else:
raise error.TestFail("VM failed to start."
"Error: %s" % str(details))
finally:
# Delete snapshots.
snapshot_lists = virsh.snapshot_list(vm_name)
if len(snapshot_lists) > 0:
libvirt.clean_up_snapshots(vm_name, snapshot_lists)
for snap in snapshot_lists:
virsh.snapshot_delete(vm_name, snap, "--metadata")
# Recover VM.
if vm.is_alive():
vm.destroy(gracefully=False)
if additional_guest:
virsh.remove_domain(guest_name,
"--remove-all-storage",
ignore_stauts=True)
# Remove the snapshot.
if create_snapshot:
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
" purge {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, disk_src_name))
process.run(cmd, ignore_status=True, shell=True)
elif attach_device or attach_disk:
cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
"".format(mon_host, key_opt, disk_src_name))
process.run(cmd, ignore_status=True, shell=True)
# Delete tmp files.
if os.path.exists(key_file):
os.remove(key_file)
if os.path.exists(img_file):
os.remove(img_file)
# Clean up volume, pool
if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
virsh.vol_delete(vol_name, pool_name)
if pool_name and virsh.pool_state_dict().has_key(pool_name):
virsh.pool_destroy(pool_name, **virsh_dargs)
virsh.pool_undefine(pool_name, **virsh_dargs)
# Clean up secret
if secret_uuid:
virsh.secret_undefine(secret_uuid)
logging.info("Restoring vm...")
vmxml_backup.sync()
0
Example 70
Project: spnet Source File: test.py
def destroy_db_and_test():
'''tests progressively building an spnet db starting from a blank
slate, adding papers, people, posts, topics, etc. and verifying
the expected results. NB: this is a destructive test, i.e.
it FLUSHES whatever is in the spnet database and fills it with
its own test data.'''
dbconn = connect.init_connection()
dbconn._conn.drop_database('spnet') # start test from a blank slate
rootColl = apptree.get_collections()
lorem = '''Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'''
jojo = core.Person(docData=dict(name='jojo', age=37))
assert jojo != None
assert jojo.force_reload(delay=1) is False # set timer
assert jojo.force_reload() is False # timer still waiting
time.sleep(2)
assert jojo.force_reload() # timer done
a1 = core.EmailAddress(docData=dict(address='[email protected]', current=True),
parent=jojo)
fred = core.Person(docData=dict(name='fred', age=56))
a2 = core.EmailAddress(docData=dict(address='[email protected]',
authenticated=False), parent=fred)
a3 = core.EmailAddress(docData=dict(address='[email protected]',
note='personal account'), parent=fred)
paper1 = core.ArxivPaperData('1302.4871', insertNew='findOrInsert').parent
paper1.update(dict(authors=[jojo._id]))
paper2 = core.ArxivPaperData('1205.6541', insertNew='findOrInsert').parent
paper2.update(dict(authors=[fred._id, jojo._id]))
assert paper1.arxiv.id == '1302.4871'
assert paper2.arxiv.id == '1205.6541'
jojoGplus = core.GplusPersonData(docData=dict(id=1234, displayName='Joseph Nye', image={'url':'http://www.nobelprize.org/nobel_prizes/physics/laureates/1921/einstein.jpg'}),
parent=jojo)
jojoGplus.update(dict(etag='oldversion'))
sig1 = core.SIG.find_or_insert('cosmology')
sig2 = core.SIG.find_or_insert('lambdaCDMmodel')
topicWords = incoming.get_topicIDs(['cosmology', 'astrophysics'],
1, datetime.utcnow(), 'test')
assert topicWords == ['cosmology', 'astrophysics']
astroSIG = core.SIG('astrophysics')
assert astroSIG.name == '#astrophysics'
assert astroSIG.origin == dict(source='test', id=1)
int1 = core.PaperInterest(docData=dict(author=jojo._id, topics=[sig1._id]),
parent=paper1)
assert core.Paper(paper1._id).interests == [int1]
assert core.Paper(paper1._id).get_interests() == {sig1._id:[jojo]}
assert core.Person(jojo._id).interests == [int1]
assert core.Person(jojo._id).topics == [sig1._id]
assert core.SIG(sig1._id).interests == [int1]
assert core.SIG(sig1._id).get_interests() == {paper1:[jojo]}
intAgain = core.PaperInterest((paper1._id, jojo._id))
assert intAgain == int1
try:
intAgain.remove_topic(sig2._id)
except KeyError:
pass
else:
raise AssertionError('failed to catch bad remove_topic()')
assert intAgain.remove_topic(sig1._id) is None
assert core.Paper(paper1._id).interests == []
# test creation via POST
paperLikes = rootColl['papers'].likes
sessioninfo.get_session.sessionDict = dict(person=fred)
int2 = paperLikes._POST(fred._id, sig2._id, '1',
parents=dict(paper=paper2))
assert int2.parent == paper2
assert int2.author == fred
assert int2.topics == [sig2]
assert core.Paper(paper2._id).interests == [int2]
assert core.Person(fred._id).interests == [int2]
assert core.Person(fred._id).topics == [sig2._id]
assert core.SIG(sig2._id).interests == [int2]
try:
paperLikes._POST(fred._id, 'this is not allowed', '1',
parents=dict(paper=paper2))
except KeyError:
pass
else:
raise AssertionError('failed to trap bad topic string')
# test removal via POST
assert paperLikes._POST(fred._id, sig2._id, '0',
parents=dict(paper=core.Paper(paper2._id))) == int2
assert core.Paper(paper2._id).interests == []
int3 = paperLikes._POST(fred._id, '#silicene', '1',
parents=dict(paper=paper2))
assert core.SIG('silicene').interests == [int3]
assert set(core.Person(fred._id).topics) == set([sig2._id, 'silicene'])
gplus2 = core.GplusPersonData(docData=dict(id=1234, displayName='Joseph Nye'),
insertNew='findOrInsert')
assert gplus2 == jojoGplus
gplus3 = core.GplusPersonData(docData=dict(id=5678, displayName='Fred Eiserling'),
insertNew='findOrInsert')
assert gplus3.parent.name == 'Fred Eiserling'
rec1 = core.Post(docData=dict(author=fred._id, citationType='recommend', id='1',
title='Why You Need to Read This Important Extension of the CDM Model',
text=lorem),
parent=paper1)
rec2 = core.Post(docData=dict(author=jojo._id, text='must read!',
citationType='mustread', id='2',
sigs=[sig1._id, sig2._id]),
parent=paper2._id)
assert set(core.Person(jojo._id).topics) == set([sig1._id, sig2._id])
post1 = core.Post(docData=dict(author=fred._id, text='interesting paper!',
id=98765, sigs=[sig1._id]), parent=paper1)
assert set(core.Person(fred._id).topics) == set([sig1._id, sig2._id, 'silicene'])
reply1 = core.Reply(docData=dict(author=jojo._id, text='I disagree with Fred.',
id=7890, replyTo=98765), parent=paper1)
issue1 = core.Issue(docData=dict(paper=paper1, title='The claims are garbage',
category='validity', author=jojo._id,
description='there is a major flaw in the first step of your proof'))
vote1 = core.IssueVote(docData=dict(person=jojo, rating='crucial',
status='open'),
parent=issue1)
assert core.Person(jojo._id).email == [a1]
assert core.Person(jojo._id).replies == [reply1]
jgp = core.GplusPersonData(1234)
assert jgp.parent == jojo
assert jgp.etag == 'oldversion'
assert len(rec1.parent.authors) == 1
assert rec1.parent.authors[0] == jojo
assert len(rec2.parent.authors) == 2
assert jojo in rec2.parent.authors
assert fred in rec2.parent.authors
assert len(rec2.parent.recommendations) == 1
assert len(jojo.recommendations) == 1
assert jojo.recommendations[0] == rec2
assert len(jojo.papers) == 2
assert len(fred.papers) == 1
assert len(paper2.authors[0].email) == 2
assert issue1.author == jojo
p = core.Paper(paper1._id)
assert len(p.issues) == 1
posts1 = p.get_all_posts()
assert len(posts1) == 1
assert posts1 == [post1]
assert posts1[0].text == 'interesting paper!'
assert list(posts1[0].get_replies()) == [reply1]
assert core.Post(98765).author == fred
assert core.Reply(7890).replyTo == post1
assert core.Reply(7890).parent == paper1
assert filter(lambda x:not x.is_rec(), core.Person(fred._id).posts) == [post1]
assert filter(lambda x:not x.is_rec(), core.SIG(sig1._id).posts) == [post1]
assert core.Post(98765).sigs == [sig1]
replyAgain = core.Reply(docData=dict(author=fred._id, text='interesting paper!',
id=7890, replyTo=98765), parent=paper1,
insertNew='findOrInsert')
assert replyAgain == reply1
assert core.Paper(paper1._id).replies == [reply1]
reply2 = core.Reply(docData=dict(author=jojo._id, text='This paper really made me think.',
id=7891, replyTo=98765), parent=paper1,
insertNew='findOrInsert')
assert core.Paper(paper1._id).replies == [reply1, reply2]
assert core.Paper(str(paper1._id)) == paper1, 'auto ID conversion failed'
assert p.issues[0] == issue1
assert len(p.issues[0].votes) == 1
assert len(rec2.sigs) == 2
assert rec2.sigs[0] == sig1
assert sig1.recommendations == [rec2]
rec1.array_append('sigs', sig2)
assert len(sig2.recommendations) == 2
assert core.Post(rec1.id).sigs == [sig2]
rec2.update(dict(text='totally fascinating!', score=27))
rec3 = core.Post(rec2.id)
assert rec3.score == 27
a4 = core.EmailAddress('[email protected]')
assert a4._parent_link == fred._id
assert a4.parent == fred
try:
p = core.Person('abcdefg')
except KeyError:
pass
else:
raise AssertionError('failed to trap bad personID')
try:
a = core.EmailAddress('[email protected]')
except KeyError:
pass
else:
raise AssertionError('failed to trap bad email')
try:
jojo = core.Person(docData=dict(name2='jojo', age=37))
except ValueError:
pass
else:
raise AssertionError('failed to trap Person w/o name')
fred.array_append('numbers', 17)
assert core.Person(fred._id).numbers == [17]
fred.array_append('numbers', 6)
assert core.Person(fred._id).numbers == [17, 6]
fred.array_del('numbers', 17)
assert core.Person(fred._id).numbers == [6]
a4.array_append('numbers', 17)
assert core.EmailAddress(a4.address).numbers == [17]
a4.array_append('numbers', 6)
assert core.EmailAddress(a4.address).numbers == [17, 6]
a4.array_del('numbers', 17)
assert core.EmailAddress(a4.address).numbers == [6]
rec3 = core.Post(docData=dict(author=fred._id, citationType='recommend',
text='I think this is a major breakthrough.',
sigs=[sig2._id], id=3456),
parent=paper2._id)
assert core.SIG(sig1._id).recommendations == [rec2]
assert len(core.SIG(sig2._id).recommendations) == 3
it = gplus.publicAccess.get_person_posts('107295654786633294692')
testPosts = list(gplus.publicAccess.find_or_insert_posts(it))
assert len(testPosts) > 0
nposts = len(core.Paper(paper1._id).posts)
nreplies = len(core.Paper(paper1._id).replies)
it = gplus.publicAccess.get_person_posts('107295654786633294692')
testPosts2 = list(gplus.publicAccess.find_or_insert_posts(it))
assert testPosts == testPosts2
assert nposts == len(core.Paper(paper1._id).posts)
assert nreplies == len(core.Paper(paper1._id).replies)
gpd = core.GplusPersonData('112634568601116338347',
insertNew='findOrInsert')
assert gpd.displayName == 'Meenakshi Roy'
gpd.update_subscriptions(dict(etag='foo', totalItems=1),
[dict(id='114744049040264263224')])
gps = gpd.subscriptions
assert gps.gplusPerson == gpd
mrID = gpd.parent._id
subscriptions = core.Person(mrID).subscriptions
assert len(subscriptions) == 0
gpd2 = core.GplusPersonData('114744049040264263224',
insertNew='findOrInsert')
time.sleep(2)
subscriptions = core.Person(mrID).subscriptions
assert len(subscriptions) == 1
assert subscriptions[0].author == gpd2.parent
cjlposts = gpd2.update_posts(999) # retrieve some recs
assert len(cjlposts) > 0 # got some
assert len(core.Person(mrID).received) > 0 # and they were delivered
assert len(core.Person(mrID).get_deliveries()) > 0 # and UI can retrieve them
recReply = core.Reply(docData=dict(author=jojo._id, id=78901, replyTo=3456,
text='Fred, thanks for your comments! Your insights are really helpful.'),
parent=paper2._id)
# make sure timestamps present on all recs
l = [r.published for r in core.Post.find_obj()]
l = [r.published for r in core.Reply.find_obj()]
assert recReply.replyTo == rec3
assert list(recReply.replyTo.get_replies()) == [recReply]
# pubmed eutils network server constantly failing now??
## pubmedDict = pubmed.get_pubmed_dict('23482246')
## with open('../pubmed/test1.pickle') as ifile:
## correctDict = pickle.load(ifile)
## assert pubmedDict == correctDict
## paper3 = core.PubmedPaperData('23482246', insertNew='findOrInsert').parent
## paper3.update(dict(authors=[fred._id]))
## ppd = core.PubmedPaperData('23139441', insertNew='findOrInsert')
## assert ppd.doi.upper() == '10.1016/J.MSEC.2012.05.020'
## assert paper3.pubmed.id == '23482246'
## assert paper3.title[:40] == correctDict['title'][:40]
s = 'aabbe'
t = doi.map_to_doi(s)
assert t == '10.1002/(SICI)1097-0258(19980815/30)17:15/16<1661::AID-SIM968>3.0.CO;2-2'
assert s == doi.map_to_shortdoi(t)
paper4 = core.DoiPaperData(DOI=t, insertNew='findOrInsert').parent
paper4.update(dict(authors=[fred._id]))
assert paper4.doi.id == s
assert paper4.doi.doi == t
assert paper4.doi.DOI == t.upper()
paper5 = core.DoiPaperData(s, insertNew='findOrInsert').parent
assert paper4 == paper5
assert rootColl['shortDOI']._GET(s) == paper4
txt = 'some text ' + paper4.doi.get_hashtag()
refs, topics, primary = incoming.get_citations_types_and_topics(txt,spnetworkOnly=False)
assert incoming.get_paper(primary,refs[primary][1]) == paper4
spnetPaper = core.DoiPaperData(DOI='10.3389/fncom.2012.00001',
insertNew='findOrInsert').parent
assert spnetPaper.title.lower() == 'open peer review by a selected-papers network'
txt = 'a long comment ' + spnetPaper.doi.get_doctag() + ', some more text'
refs, topics, primary = incoming.get_citations_types_and_topics(txt,spnetworkOnly=False)
assert incoming.get_paper(primary,refs[primary][1]) == spnetPaper
topics, subs = bulk.get_people_subs()
bulk.deliver_recs(topics, subs)
assert len(core.Person(jojo._id).received) == 4
assert len(core.Person(fred._id).received) == 2
0
Example 71
Project: SiCKRAGE Source File: __init__.py
def _parse_string(self, name):
if not name:
return
matches = []
bestResult = None
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
if not match:
continue
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'series_num' in named_groups and match.group('series_num'):
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
result.score += 1
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
result.score += 1
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
if 'air_date' in named_groups:
air_date = match.group('air_date')
try:
result.air_date = parser.parse(air_date, fuzzy=True).date()
result.score += 1
except Exception:
continue
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
if 'version' in named_groups:
# assigns version to anime file if detected using anime regex. Non-anime regex receives -1
version = match.group('version')
if version:
result.version = version
else:
result.version = 1
else:
result.version = -1
matches.append(result)
if len(matches):
# pick best match with highest score based on placement
bestResult = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = self.get_show(bestResult.series_name, self.tryIndexers)
# confirm passed in show object indexer id matches result show object indexer id
if show:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
bestResult.show = show
elif not show and self.showObj:
bestResult.show = self.showObj
# if this is a naming pattern test or result doesn't have a show object then return best result
if not bestResult.show or self.naming_pattern:
return bestResult
# get quality
bestResult.quality = Quality.nameQuality(name, bestResult.show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []
# if we have an air-by-date show then get the real season/episode numbers
if bestResult.is_air_by_date:
airdate = bestResult.air_date.toordinal()
dbData = [x['doc'] for x in
MainDB().db.get_many('tv_episodes', bestResult.show.indexerid, with_doc=True)
if x['doc']['indexer'] == bestResult.show.indexer and x['doc']['airdate'] == airdate]
season_number = None
episode_numbers = []
if dbData:
season_number = int(dbData[0]['season'])
episode_numbers = [int(dbData[0]['episode'])]
if not season_number or not len(episode_numbers):
try:
lINDEXER_API_PARMS = srIndexerApi(bestResult.show.indexer).api_params.copy()
if bestResult.show.lang:
lINDEXER_API_PARMS['language'] = bestResult.show.lang
t = srIndexerApi(bestResult.show.indexer).indexer(**lINDEXER_API_PARMS)
epObj = t[bestResult.show.indexerid].airedOn(bestResult.air_date)[0]
season_number = int(epObj["seasonnumber"])
episode_numbers = [int(epObj["episodenumber"])]
except indexer_episodenotfound:
sickrage.srCore.srLogger.warning(
"Unable to find episode with date " + bestResult.air_date + " for show " + bestResult.show.name + ", skipping")
episode_numbers = []
except indexer_error as e:
sickrage.srCore.srLogger.warning(
"Unable to contact " + srIndexerApi(bestResult.show.indexer).name + ": {}".format(
e))
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif bestResult.show.is_anime and len(bestResult.ab_episode_numbers):
scene_season = get_scene_exception_by_name(bestResult.series_name)[1]
for epAbsNo in bestResult.ab_episode_numbers:
a = epAbsNo
if bestResult.show.is_scene:
a = get_indexer_absolute_numbering(bestResult.show.indexerid,
bestResult.show.indexer, epAbsNo,
True, scene_season)
(s, e) = get_all_episodes_from_absolute_number(bestResult.show, [a])
new_absolute_numbers.append(a)
new_episode_numbers.extend(e)
new_season_numbers.append(s)
elif bestResult.season_number and len(bestResult.episode_numbers):
for epNo in bestResult.episode_numbers:
s = bestResult.season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
bestResult.season_number,
epNo)
if bestResult.show.is_anime:
a = get_absolute_number_from_season_and_episode(bestResult.show, s, e)
if a:
new_absolute_numbers.append(a)
new_episode_numbers.append(e)
new_season_numbers.append(s)
# need to do a quick sanity check heregex. It's possible that we now have episodes
# from more than one season (by tvdb numbering), and this is just too much
# for sickrage, so we'd need to flag it.
new_season_numbers = list(set(new_season_numbers)) # remove duplicates
if len(new_season_numbers) > 1:
raise InvalidNameException("Scene numbering results episodes from "
"seasons %s, (i.e. more than one) and "
"sickrage does not support this. "
"Sorry." % new_season_numbers)
# I guess it's possible that we'd have duplicate episodes too, so lets
# eliminate them
new_episode_numbers = list(set(new_episode_numbers))
new_episode_numbers.sort()
# maybe even duplicate absolute numbers so why not do them as well
new_absolute_numbers = list(set(new_absolute_numbers))
new_absolute_numbers.sort()
if len(new_absolute_numbers):
bestResult.ab_episode_numbers = new_absolute_numbers
if len(new_season_numbers) and len(new_episode_numbers):
bestResult.episode_numbers = new_episode_numbers
bestResult.season_number = new_season_numbers[0]
if bestResult.show.is_scene:
sickrage.srCore.srLogger.debug(
"Converted parsed result {} into {}".format(bestResult.original_name, bestResult))
# CPU sleep
time.sleep(0.02)
return bestResult
0
Example 72
Project: OpenOPC Source File: __init__.py
def iread(self, tags=None, group=None, size=None, pause=0, source='hybrid', update=-1, timeout=5000, sync=False, include_error=False, rebuild=False):
"""Iterable version of read()"""
def add_items(tags):
names = list(tags)
names.insert(0,0)
errors = []
if self.trace: self.trace('Validate(%s)' % tags2trace(names))
try:
errors = opc_items.Validate(len(names)-1, names)
except:
pass
valid_tags = []
valid_values = []
client_handles = []
if not self._group_handles_tag.has_key(sub_group):
self._group_handles_tag[sub_group] = {}
n = 0
elif len(self._group_handles_tag[sub_group]) > 0:
n = max(self._group_handles_tag[sub_group]) + 1
else:
n = 0
for i, tag in enumerate(tags):
if errors[i] == 0:
valid_tags.append(tag)
client_handles.append(n)
self._group_handles_tag[sub_group][n] = tag
n += 1
elif include_error:
error_msgs[tag] = self._opc.GetErrorString(errors[i])
if self.trace and errors[i] != 0: self.trace('%s failed validation' % tag)
client_handles.insert(0,0)
valid_tags.insert(0,0)
server_handles = []
errors = []
if self.trace: self.trace('AddItems(%s)' % tags2trace(valid_tags))
try:
server_handles, errors = opc_items.AddItems(len(client_handles)-1, valid_tags, client_handles)
except:
pass
valid_tags_tmp = []
server_handles_tmp = []
valid_tags.pop(0)
if not self._group_server_handles.has_key(sub_group):
self._group_server_handles[sub_group] = {}
for i, tag in enumerate(valid_tags):
if errors[i] == 0:
valid_tags_tmp.append(tag)
server_handles_tmp.append(server_handles[i])
self._group_server_handles[sub_group][tag] = server_handles[i]
elif include_error:
error_msgs[tag] = self._opc.GetErrorString(errors[i])
valid_tags = valid_tags_tmp
server_handles = server_handles_tmp
return valid_tags, server_handles
def remove_items(tags):
if self.trace: self.trace('RemoveItems(%s)' % tags2trace(['']+tags))
server_handles = [self._group_server_handles[sub_group][tag] for tag in tags]
server_handles.insert(0,0)
errors = []
try:
errors = opc_items.Remove(len(server_handles)-1, server_handles)
except pythoncom.com_error, err:
error_msg = 'RemoveItems: %s' % self._get_error_str(err)
raise OPCError, error_msg
try:
self._update_tx_time()
pythoncom.CoInitialize()
if include_error:
sync = True
if sync:
update = -1
tags, single, valid = type_check(tags)
if not valid:
raise TypeError, "iread(): 'tags' parameter must be a string or a list of strings"
# Group exists
if self._groups.has_key(group) and not rebuild:
num_groups = self._groups[group]
data_source = SOURCE_CACHE
# Group non-existant
else:
if size:
# Break-up tags into groups of 'size' tags
tag_groups = [tags[i:i+size] for i in range(0, len(tags), size)]
else:
tag_groups = [tags]
num_groups = len(tag_groups)
data_source = SOURCE_DEVICE
results = []
for gid in range(num_groups):
if gid > 0 and pause > 0: time.sleep(pause/1000.0)
error_msgs = {}
opc_groups = self._opc.OPCGroups
opc_groups.DefaultGroupUpdateRate = update
# Anonymous group
if group == None:
try:
if self.trace: self.trace('AddGroup()')
opc_group = opc_groups.Add()
except pythoncom.com_error, err:
error_msg = 'AddGroup: %s' % self._get_error_str(err)
raise OPCError, error_msg
sub_group = group
new_group = True
else:
sub_group = '%s.%d' % (group, gid)
# Existing named group
try:
if self.trace: self.trace('GetOPCGroup(%s)' % sub_group)
opc_group = opc_groups.GetOPCGroup(sub_group)
new_group = False
# New named group
except:
try:
if self.trace: self.trace('AddGroup(%s)' % sub_group)
opc_group = opc_groups.Add(sub_group)
except pythoncom.com_error, err:
error_msg = 'AddGroup: %s' % self._get_error_str(err)
raise OPCError, error_msg
self._groups[str(group)] = len(tag_groups)
new_group = True
opc_items = opc_group.OPCItems
if new_group:
opc_group.IsSubscribed = 1
opc_group.IsActive = 1
if not sync:
if self.trace: self.trace('WithEvents(%s)' % opc_group.Name)
global current_client
current_client = self
self._group_hooks[opc_group.Name] = win32com.client.WithEvents(opc_group, GroupEvents)
tags = tag_groups[gid]
valid_tags, server_handles = add_items(tags)
self._group_tags[sub_group] = tags
self._group_valid_tags[sub_group] = valid_tags
# Rebuild existing group
elif rebuild:
tags = tag_groups[gid]
valid_tags = self._group_valid_tags[sub_group]
add_tags = [t for t in tags if t not in valid_tags]
del_tags = [t for t in valid_tags if t not in tags]
if len(add_tags) > 0:
valid_tags, server_handles = add_items(add_tags)
valid_tags = self._group_valid_tags[sub_group] + valid_tags
if len(del_tags) > 0:
remove_items(del_tags)
valid_tags = [t for t in valid_tags if t not in del_tags]
self._group_tags[sub_group] = tags
self._group_valid_tags[sub_group] = valid_tags
if source == 'hybrid': data_source = SOURCE_DEVICE
# Existing group
else:
tags = self._group_tags[sub_group]
valid_tags = self._group_valid_tags[sub_group]
if sync:
server_handles = [item.ServerHandle for item in opc_items]
tag_value = {}
tag_quality = {}
tag_time = {}
tag_error = {}
# Sync Read
if sync:
values = []
errors = []
qualities = []
timestamps= []
if len(valid_tags) > 0:
server_handles.insert(0,0)
if source != 'hybrid':
data_source = SOURCE_CACHE if source == 'cache' else SOURCE_DEVICE
if self.trace: self.trace('SyncRead(%s)' % data_source)
try:
values, errors, qualities, timestamps = opc_group.SyncRead(data_source, len(server_handles)-1, server_handles)
except pythoncom.com_error, err:
error_msg = 'SyncRead: %s' % self._get_error_str(err)
raise OPCError, error_msg
for i,tag in enumerate(valid_tags):
tag_value[tag] = values[i]
tag_quality[tag] = qualities[i]
tag_time[tag] = timestamps[i]
tag_error[tag] = errors[i]
# Async Read
else:
if len(valid_tags) > 0:
if self._tx_id >= 0xFFFF:
self._tx_id = 0
self._tx_id += 1
if source != 'hybrid':
data_source = SOURCE_CACHE if source == 'cache' else SOURCE_DEVICE
if self.trace: self.trace('AsyncRefresh(%s)' % data_source)
try:
opc_group.AsyncRefresh(data_source, self._tx_id)
except pythoncom.com_error, err:
error_msg = 'AsyncRefresh: %s' % self._get_error_str(err)
raise OPCError, error_msg
tx_id = 0
start = time.time() * 1000
while tx_id != self._tx_id:
now = time.time() * 1000
if now - start > timeout:
raise TimeoutError, 'Callback: Timeout waiting for data'
if self.callback_queue.empty():
pythoncom.PumpWaitingMessages()
else:
tx_id, handles, values, qualities, timestamps = self.callback_queue.get()
for i,h in enumerate(handles):
tag = self._group_handles_tag[sub_group][h]
tag_value[tag] = values[i]
tag_quality[tag] = qualities[i]
tag_time[tag] = timestamps[i]
for tag in tags:
if tag_value.has_key(tag):
if (not sync and len(valid_tags) > 0) or (sync and tag_error[tag] == 0):
value = tag_value[tag]
if type(value) == pywintypes.TimeType:
value = str(value)
quality = quality_str(tag_quality[tag])
timestamp = str(tag_time[tag])
else:
value = None
quality = 'Error'
timestamp = None
if include_error:
error_msgs[tag] = self._opc.GetErrorString(tag_error[tag]).strip('\r\n')
else:
value = None
quality = 'Error'
timestamp = None
if include_error and not error_msgs.has_key(tag):
error_msgs[tag] = ''
if single:
if include_error:
yield (value, quality, timestamp, error_msgs[tag])
else:
yield (value, quality, timestamp)
else:
if include_error:
yield (tag, value, quality, timestamp, error_msgs[tag])
else:
yield (tag, value, quality, timestamp)
if group == None:
try:
if not sync and self._group_hooks.has_key(opc_group.Name):
if self.trace: self.trace('CloseEvents(%s)' % opc_group.Name)
self._group_hooks[opc_group.Name].close()
if self.trace: self.trace('RemoveGroup(%s)' % opc_group.Name)
opc_groups.Remove(opc_group.Name)
except pythoncom.com_error, err:
error_msg = 'RemoveGroup: %s' % self._get_error_str(err)
raise OPCError, error_msg
except pythoncom.com_error, err:
error_msg = 'read: %s' % self._get_error_str(err)
raise OPCError, error_msg
0
Example 73
Project: tp-libvirt Source File: iface_options.py
def run(test, params, env):
"""
Test interafce xml options.
1.Prepare test environment,destroy or suspend a VM.
2.Edit xml and start the domain.
3.Perform test operation.
4.Recover test environment.
5.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
virsh_dargs = {'debug': True, 'ignore_status': False}
def create_iface_xml(iface_mac):
"""
Create interface xml file
"""
iface = Interface(type_name=iface_type)
source = ast.literal_eval(iface_source)
if source:
iface.source = source
iface.model = iface_model if iface_model else "virtio"
iface.mac_address = iface_mac
driver_dict = {}
driver_host = {}
driver_guest = {}
if iface_driver:
driver_dict = ast.literal_eval(iface_driver)
if iface_driver_host:
driver_host = ast.literal_eval(iface_driver_host)
if iface_driver_guest:
driver_guest = ast.literal_eval(iface_driver_guest)
iface.driver = iface.new_driver(driver_attr=driver_dict,
driver_host=driver_host,
driver_guest=driver_guest)
logging.debug("Create new interface xml: %s", iface)
return iface
def modify_iface_xml(update, status_error=False):
"""
Modify interface xml options
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
xml_devices = vmxml.devices
iface_index = xml_devices.index(
xml_devices.by_device_tag("interface")[0])
iface = xml_devices[iface_index]
if iface_model:
iface.model = iface_model
else:
del iface.model
if iface_type:
iface.type_name = iface_type
del iface.source
source = ast.literal_eval(iface_source)
if source:
net_ifs = utils_net.get_net_if(state="UP")
# Check source device is valid or not,
# if it's not in host interface list, try to set
# source device to first active interface of host
if (iface.type_name == "direct" and
source.has_key('dev') and
source['dev'] not in net_ifs):
logging.warn("Source device %s is not a interface"
" of host, reset to %s",
source['dev'], net_ifs[0])
source['dev'] = net_ifs[0]
iface.source = source
backend = ast.literal_eval(iface_backend)
if backend:
iface.backend = backend
driver_dict = {}
driver_host = {}
driver_guest = {}
if iface_driver:
driver_dict = ast.literal_eval(iface_driver)
if iface_driver_host:
driver_host = ast.literal_eval(iface_driver_host)
if iface_driver_guest:
driver_guest = ast.literal_eval(iface_driver_guest)
iface.driver = iface.new_driver(driver_attr=driver_dict,
driver_host=driver_host,
driver_guest=driver_guest)
if iface.address:
del iface.address
logging.debug("New interface xml file: %s", iface)
if unprivileged_user:
# Create disk image for unprivileged user
disk_index = xml_devices.index(
xml_devices.by_device_tag("disk")[0])
disk_xml = xml_devices[disk_index]
logging.debug("source: %s", disk_xml.source)
disk_source = disk_xml.source.attrs["file"]
cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
"".format(disk_source, dst_disk, unprivileged_user))
utils.run(cmd)
disk_xml.source = disk_xml.new_disk_source(
attrs={"file": dst_disk})
vmxml.devices = xml_devices
# Remove all channels to avoid of permission problem
channels = vmxml.get_devices(device_type="channel")
for channel in channels:
vmxml.del_device(channel)
vmxml.xmltreefile.write()
logging.debug("New VM xml: %s", vmxml)
utils.run("chmod a+rw %s" % vmxml.xml)
virsh.define(vmxml.xml, **virsh_dargs)
# Try to modify interface xml by update-device or edit xml
elif update:
iface.xmltreefile.write()
ret = virsh.update_device(vm_name, iface.xml,
ignore_status=True)
libvirt.check_exit_status(ret, status_error)
else:
vmxml.devices = xml_devices
vmxml.xmltreefile.write()
vmxml.sync()
def check_offloads_option(if_name, driver_options, session=None):
"""
Check interface offloads by ethtool output
"""
offloads = {"csum": "tx-checksumming",
"gso": "generic-segmentation-offload",
"tso4": "tcp-segmentation-offload",
"tso6": "tx-tcp6-segmentation",
"ecn": "tx-tcp-ecn-segmentation",
"ufo": "udp-fragmentation-offload"}
if session:
ret, output = session.cmd_status_output("ethtool -k %s | head"
" -18" % if_name)
else:
out = utils.run("ethtool -k %s | head -18" % if_name)
ret, output = out.exit_status, out.stdout
if ret:
raise error.TestFail("ethtool return error code")
logging.debug("ethtool output: %s", output)
for offload in driver_options.keys():
if offloads.has_key(offload):
if (output.count(offloads[offload]) and
not output.count("%s: %s" % (
offloads[offload], driver_options[offload]))):
raise error.TestFail("offloads option %s: %s isn't"
" correct in ethtool output" %
(offloads[offload],
driver_options[offload]))
def run_xml_test(iface_mac):
"""
Test for interface options in vm xml
"""
# Get the interface object according the mac address
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
iface_devices = vmxml.get_devices(device_type="interface")
iface = None
for iface_dev in iface_devices:
if iface_dev.mac_address == iface_mac:
iface = iface_dev
if not iface:
raise error.TestFail("Can't find interface with mac"
" '%s' in vm xml" % iface_mac)
driver_dict = {}
if iface_driver:
driver_dict = ast.literal_eval(iface_driver)
for driver_opt in driver_dict.keys():
if not driver_dict[driver_opt] == iface.driver.driver_attr[driver_opt]:
raise error.TestFail("Can't see driver option %s=%s in vm xml"
% (driver_opt, driver_dict[driver_opt]))
if iface_target:
if (not iface.target.has_key("dev") or
not iface.target["dev"].startswith(iface_target)):
raise error.TestFail("Can't see device target dev in vm xml")
# Check macvtap mode by ip link command
if iface_target == "macvtap" and iface.source.has_key("mode"):
cmd = "ip -d link show %s" % iface.target["dev"]
output = utils.run(cmd).stdout
logging.debug("ip link output: %s", output)
mode = iface.source["mode"]
if mode == "passthrough":
mode = "passthru"
if not output.count("macvtap mode %s" % mode):
raise error.TestFail("Failed to verify macvtap mode")
def run_cmdline_test(iface_mac):
"""
Test for qemu-kvm command line options
"""
cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
ret = utils.run(cmd)
logging.debug("Command line %s", ret.stdout)
if test_vhost_net:
if not ret.stdout.count("vhost=on") and not rm_vhost_driver:
raise error.TestFail("Can't see vhost options in"
" qemu-kvm command line")
if iface_model == "virtio":
model_option = "device virtio-net-pci"
else:
model_option = "device rtl8139"
iface_cmdline = re.findall(r"%s,(.+),mac=%s" %
(model_option, iface_mac), ret.stdout)
if not iface_cmdline:
raise error.TestFail("Can't see %s with mac %s in command"
" line" % (model_option, iface_mac))
cmd_opt = {}
for opt in iface_cmdline[0].split(','):
tmp = opt.rsplit("=")
cmd_opt[tmp[0]] = tmp[1]
logging.debug("Command line options %s", cmd_opt)
driver_dict = {}
# Test <driver> xml options.
if iface_driver:
iface_driver_dict = ast.literal_eval(iface_driver)
for driver_opt in iface_driver_dict.keys():
if driver_opt == "name":
continue
elif driver_opt == "txmode":
if iface_driver_dict["txmode"] == "iothread":
driver_dict["tx"] = "bh"
else:
driver_dict["tx"] = iface_driver_dict["txmode"]
elif driver_opt == "queues":
driver_dict["mq"] = "on"
driver_dict["vectors"] = str(int(
iface_driver_dict["queues"]) * 2 + 2)
else:
driver_dict[driver_opt] = iface_driver_dict[driver_opt]
# Test <driver><host/><driver> xml options.
if iface_driver_host:
driver_dict.update(ast.literal_eval(iface_driver_host))
# Test <driver><guest/><driver> xml options.
if iface_driver_guest:
driver_dict.update(ast.literal_eval(iface_driver_guest))
for driver_opt in driver_dict.keys():
if (not cmd_opt.has_key(driver_opt) or
not cmd_opt[driver_opt] == driver_dict[driver_opt]):
raise error.TestFail("Can't see option '%s=%s' in qemu-kvm "
" command line" %
(driver_opt, driver_dict[driver_opt]))
if test_backend:
guest_pid = ret.stdout.rsplit()[1]
cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
if utils.system(cmd, ignore_status=True):
raise error.TestFail("Guest process didn't open backend file"
" %s" % backend["tap"])
cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
if utils.system(cmd, ignore_status=True):
raise error.TestFail("Guest process didn't open backend file"
" %s" % backend["tap"])
def get_guest_ip(session, mac):
"""
Wrapper function to get guest ip address
"""
utils_net.restart_guest_network(session, mac)
# Wait for IP address is ready
utils_misc.wait_for(
lambda: utils_net.get_guest_ip_addr(session, mac), 10)
return utils_net.get_guest_ip_addr(session, mac)
def check_user_network(session):
"""
Check user network ip address on guest
"""
vm_ips = []
vm_ips.append(get_guest_ip(session, iface_mac_old))
if attach_device:
vm_ips.append(get_guest_ip(session, iface_mac))
logging.debug("IP address on guest: %s", vm_ips)
if len(vm_ips) != len(set(vm_ips)):
raise error.TestFail("Duplicated IP address on guest. "
"Check bug: https://bugzilla.redhat."
"com/show_bug.cgi?id=1147238")
for vm_ip in vm_ips:
if vm_ip is None or not vm_ip.startswith("10.0.2."):
raise error.TestFail("Found wrong IP address"
" on guest")
# Check gateway address
gateway = utils_net.get_net_gateway(session.cmd_output)
if gateway != "10.0.2.2":
raise error.TestFail("The gateway on guest is not"
" right")
# Check dns server address
ns_list = utils_net.get_net_nameserver(session.cmd_output)
if "10.0.2.3" not in ns_list:
raise error.TestFail("The dns server can't be found"
" on guest")
def check_mcast_network(session):
"""
Check multicast ip address on guests
"""
username = params.get("username")
password = params.get("password")
src_addr = ast.literal_eval(iface_source)['address']
add_session = additional_vm.wait_for_serial_login(username=username,
password=password)
vms_sess_dict = {vm_name: session,
additional_vm.name: add_session}
# Check mcast address on host
cmd = "netstat -g | grep %s" % src_addr
if utils.run(cmd, ignore_status=True).exit_status:
raise error.TestFail("Can't find multicast ip address"
" on host")
vms_ip_dict = {}
# Get ip address on each guest
for vms in vms_sess_dict.keys():
vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
if not vm_ip:
raise error.TestFail("Can't get multicast ip"
" address on guest")
vms_ip_dict.update({vms: vm_ip})
if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
raise error.TestFail("Got duplicated multicast ip address")
logging.debug("Found ips on guest: %s", vms_ip_dict)
# Run omping server on host
if not utils_misc.yum_install(["omping"]):
raise error.TestError("Failed to install omping"
" on host")
cmd = ("iptables -F;omping -m %s %s" %
(src_addr, "192.168.122.1 %s" %
' '.join(vms_ip_dict.values())))
# Run a backgroup job waiting for connection of client
bgjob = utils.AsyncJob(cmd)
# Run omping client on guests
for vms in vms_sess_dict.keys():
# omping should be installed first
if not utils_misc.yum_install(["omping"], vms_sess_dict[vms]):
raise error.TestError("Failed to install omping"
" on guest")
cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
(src_addr, "192.168.122.1 %s" %
vms_ip_dict[vms]))
ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
logging.debug("omping ret: %s, output: %s", ret, output)
if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or
not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
raise error.TestFail("omping failed on guest")
# Kill the backgroup job
bgjob.kill_func()
status_error = "yes" == params.get("status_error", "no")
start_error = "yes" == params.get("start_error", "no")
unprivileged_user = params.get("unprivileged_user")
# Interface specific attributes.
iface_type = params.get("iface_type", "network")
iface_source = params.get("iface_source", "{}")
iface_driver = params.get("iface_driver")
iface_model = params.get("iface_model", "virtio")
iface_target = params.get("iface_target")
iface_backend = params.get("iface_backend", "{}")
iface_driver_host = params.get("iface_driver_host")
iface_driver_guest = params.get("iface_driver_guest")
attach_device = params.get("attach_iface_device")
change_option = "yes" == params.get("change_iface_options", "no")
update_device = "yes" == params.get("update_iface_device", "no")
additional_guest = "yes" == params.get("additional_guest", "no")
serial_login = "yes" == params.get("serial_login", "no")
rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
test_option_cmd = "yes" == params.get(
"test_iface_option_cmd", "no")
test_option_xml = "yes" == params.get(
"test_iface_option_xml", "no")
test_vhost_net = "yes" == params.get(
"test_vhost_net", "no")
test_option_offloads = "yes" == params.get(
"test_option_offloads", "no")
test_iface_user = "yes" == params.get(
"test_iface_user", "no")
test_iface_mcast = "yes" == params.get(
"test_iface_mcast", "no")
test_libvirtd = "yes" == params.get("test_libvirtd", "no")
test_guest_ip = "yes" == params.get("test_guest_ip", "no")
test_backend = "yes" == params.get("test_backend", "no")
if iface_driver_host or iface_driver_guest or test_backend:
if not libvirt_version.version_compare(1, 2, 8):
raise error.TestNAError("Offloading/backend options not "
"supported in this libvirt version")
if iface_driver and "queues" in ast.literal_eval(iface_driver):
if not libvirt_version.version_compare(1, 0, 6):
raise error.TestNAError("Queues options not supported"
" in this libvirt version")
if unprivileged_user:
if not libvirt_version.version_compare(1, 1, 1):
raise error.TestNAError("qemu-bridge-helper not supported"
" on this host")
virsh_dargs["unprivileged_user"] = unprivileged_user
# Create unprivileged user if needed
cmd = ("grep {0} /etc/passwd || "
"useradd {0}".format(unprivileged_user))
utils.run(cmd)
# Need another disk image for unprivileged user to access
dst_disk = "/tmp/%s.img" % unprivileged_user
# Destroy VM first
if vm.is_alive():
vm.destroy(gracefully=False)
# Back up xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
# iface_mac will update if attach a new interface
iface_mac = iface_mac_old
# Additional vm for test
additional_vm = None
libvirtd = utils_libvirtd.Libvirtd()
try:
# Build the xml and run test.
try:
# Prepare interface backend files
if test_backend:
if not os.path.exists("/dev/vhost-net"):
utils.run("modprobe vhost-net")
backend = ast.literal_eval(iface_backend)
backend_tap = "/dev/net/tun"
backend_vhost = "/dev/vhost-net"
if not backend:
backend["tap"] = backend_tap
backend["vhost"] = backend_vhost
if not start_error:
# Create backend files for normal test
if not os.path.exists(backend["tap"]):
os.rename(backend_tap, backend["tap"])
if not os.path.exists(backend["vhost"]):
os.rename(backend_vhost, backend["vhost"])
# Edit the interface xml.
if change_option:
modify_iface_xml(update=False)
if rm_vhost_driver:
# Check vhost driver.
kvm_version = os.uname()[2]
driver_path = ("/lib/modules/%s/kernel/drivers/vhost/"
"vhost_net.ko" % kvm_version)
driver_backup = driver_path + ".bak"
cmd = ("modprobe -r {0}; lsmod | "
"grep {0}".format("vhost_net"))
if not utils.system(cmd, ignore_status=True):
raise error.TestError("Failed to remove vhost_net driver")
# Move the vhost_net driver
if os.path.exists(driver_path):
os.rename(driver_path, driver_backup)
else:
# Load vhost_net driver by default
cmd = "modprobe vhost_net"
utils.system(cmd)
# Attach a interface when vm is shutoff
if attach_device == 'config':
iface_mac = utils_net.generate_mac_address_simple()
iface_xml_obj = create_iface_xml(iface_mac)
iface_xml_obj.xmltreefile.write()
ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
flagstr="--config",
ignore_status=True)
libvirt.check_exit_status(ret)
# Clone additional vm
if additional_guest:
guest_name = "%s_%s" % (vm_name, '1')
# Clone additional guest
timeout = params.get("clone_timeout", 360)
utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
True, timeout=timeout)
additional_vm = vm.clone(guest_name)
additional_vm.start()
# additional_vm.wait_for_login()
# Start the VM.
if unprivileged_user:
virsh.start(vm_name, **virsh_dargs)
cmd = ("su - %s -c 'virsh console %s'"
% (unprivileged_user, vm_name))
session = aexpect.ShellSession(cmd)
session.sendline()
remote.handle_prompts(session, params.get("username"),
params.get("password"), "[\#\$]", 30)
# Get ip address on guest
if not get_guest_ip(session, iface_mac):
raise error.TestError("Can't get ip address on guest")
else:
# Will raise VMStartError exception if start fails
vm.start()
if serial_login:
session = vm.wait_for_serial_login()
else:
session = vm.wait_for_login()
if start_error:
raise error.TestFail("VM started unexpectedly")
# Attach a interface when vm is running
if attach_device == 'live':
iface_mac = utils_net.generate_mac_address_simple()
iface_xml_obj = create_iface_xml(iface_mac)
iface_xml_obj.xmltreefile.write()
ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
flagstr="--live",
ignore_status=True)
libvirt.check_exit_status(ret)
# Need sleep here for attachment take effect
time.sleep(5)
# Update a interface options
if update_device:
modify_iface_xml(update=True, status_error=status_error)
# Run tests for qemu-kvm command line options
if test_option_cmd:
run_cmdline_test(iface_mac)
# Run tests for vm xml
if test_option_xml:
run_xml_test(iface_mac)
# Run tests for offloads options
if test_option_offloads:
if iface_driver_host:
ifname_guest = utils_net.get_linux_ifname(
session, iface_mac)
check_offloads_option(
ifname_guest, ast.literal_eval(
iface_driver_host), session)
if iface_driver_guest:
ifname_host = libvirt.get_ifname_host(vm_name,
iface_mac)
check_offloads_option(
ifname_host, ast.literal_eval(iface_driver_guest))
if test_iface_user:
# Test user type network
check_user_network(session)
if test_iface_mcast:
# Test mcast type network
check_mcast_network(session)
# Check guest ip address
if test_guest_ip:
if not get_guest_ip(session, iface_mac):
raise error.TestFail("Guest can't get a"
" valid ip address")
session.close()
# Restart libvirtd and guest, then test again
if test_libvirtd:
libvirtd.restart()
vm.destroy()
vm.start()
if test_option_xml:
run_xml_test(iface_mac)
# Detach hot/cold-plugged interface at last
if attach_device:
ret = virsh.detach_device(vm_name, iface_xml_obj.xml,
flagstr="", ignore_status=True)
libvirt.check_exit_status(ret)
except virt_vm.VMStartError as e:
logging.info(str(e))
if not start_error:
raise error.TestFail('VM failed to start\n%s' % e)
finally:
# Recover VM.
logging.info("Restoring vm...")
# Restore interface backend files
if test_backend:
if not os.path.exists(backend_tap):
os.rename(backend["tap"], backend_tap)
if not os.path.exists(backend_vhost):
os.rename(backend["vhost"], backend_vhost)
if rm_vhost_driver:
# Restore vhost_net driver
if os.path.exists(driver_backup):
os.rename(driver_backup, driver_path)
if unprivileged_user:
virsh.remove_domain(vm_name, "--remove-all-storage",
**virsh_dargs)
if additional_vm:
virsh.remove_domain(additional_vm.name,
"--remove-all-storage")
# Kill all omping server process on host
utils.system("pidof omping && killall omping",
ignore_status=True)
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml_backup.sync()
0
Example 74
def main():
# login credentials
username = myBot.config.login.username
password = myBot.config.login.password
print("Welcome to Reddit Analysis Bot.")
print("Type \"quit\", \".quit\", or \'q\' to exit the program.")
login(username, password)
while True:
try:
# list of subreddits you want to analyze
drilldownList = raw_input("Enter the subreddits you wish to target.~/> ").split()
except NameError:
# python 3 support
drilldownList = input("Enter the subreddits you wish to target.~/> ").split()
# check to make sure each subreddit is valid
check_subreddits(drilldownList)
# iterate through the drilldownList to get data
for subreddit in drilldownList:
# check to see if a drilldown for this subreddit
# was already done
dbFile = "{0}.db".format(subreddit)
if(subreddit in ["quit", ".quit", 'q']):
print("Quitting...")
sys.exit(0)
elif(os.path.isfile("subreddits/{0}".format(dbFile))):
con = db.connect("subreddits/{0}".format(dbFile))
cur = con.cursor()
sub = (subreddit,)
cur.execute("SELECT users FROM drilldown WHERE overlaps=?", sub)
for element in cur:
userList = operator.getitem(element, 0)
try:
# format the data for Reddit
text = myBot.format_post(subreddit, userList)
except Exception as e:
myBot.add_msg(e)
logging.error("Failed to format post. " + str(e) + "\n\n")
continue
try:
while True:
try:
# submit the post for Reddit
post = myBot.submit_post(subreddit, text)
break
except (ConnectionResetError, HTTPError, timeout) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
myBot.add_msg("Waiting to try again...")
sleep(60)
continue
except (APIException, ClientException, Exception) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
raise SkipThis("Something went wrong. Skipping...")
except SkipThis:
logging.error(str(e) + "\n\n")
myBot.log_post(subreddit, text)
continue
if(post != None):
try:
try:
myBot.give_flair(post, subreddit)
except (APIException, ClientException, Exception) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
raise SkipThis("Couldn't flair post. Skipping...")
except SkipThis:
continue
con.close()
continue
else:
try:
while True:
# get the list of users
try:
userList = myBot.get_users(subreddit)
myBot.userList = []
break
except (InvalidSubreddit, RedirectException) as e:
myBot.add_msg(e)
logging.error("Invalid subreddit. Removing from list." + str(e) + "\n\n")
drilldownList.remove(subreddit)
raise SkipThis("Skipping invalid subreddit...")
except (APIException, ClientException, Exception) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
raise SkipThis("Couldn't get users. Skipping...")
except SkipThis:
continue
for user in userList:
myBot.log_info(user + ',')
myBot.log_info("\n\n")
try:
while True:
try:
# get the list of subreddits
subredditList = myBot.get_subs(userList)
myBot.subredditList = []
break
except (APIException, ClientException, OperationalError) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
raise SkipThis("Couldn't get overlapping subreddits. Skipping...")
except SkipThis:
continue
for sub in subredditList:
myBot.log_info(sub + ',')
myBot.log_info("\n\n")
try:
# get the list of tuples
subredditTuple = myBot.create_tuples(subreddit, subredditList)
for item in subredditTuple:
myBot.log_info(item)
myBot.log_info(',')
myBot.log_info("\n\n")
except Exception as e:
myBot.add_msg(e)
logging.error("Failed to create tuples. " + str(e) + "\n\n")
continue
try:
myBot.add_db(subreddit, subredditTuple, len(userList))
except Exception as e:
myBot.add_msg(e)
logging.error("Failed to add to database. " + str(e) + "\n\n")
continue
try:
# format the data for Reddit
text = myBot.format_post(subreddit, userList)
except Exception as e:
myBot.add_msg(e)
logging.error("Failed to format post. " + str(e) + "\n\n")
continue
try:
while True:
try:
# submit the post for Reddit
post = myBot.submit_post(subreddit, text)
break
except (ConnectionResetError, HTTPError, timeout) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
myBot.add_msg("Waiting to try again...")
sleep(60)
continue
except (APIException, ClientException, Exception) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
raise SkipThis("Couldn't submit post. Skipping...")
except SkipThis:
logging.error(str(e) + "\n\n")
myBot.log_post(subreddit, text)
continue
if(post != None):
try:
try:
print("Setting post's flair...")
myBot.give_flair(post, subreddit)
except ModeratorRequired as e:
myBot.add_msg(e)
logging.error("Failed to set flair. " + str(e) + '\n' + str(post.permalink) + "\n\n")
raise SkipThis("Need moderator privileges to set flair. Skipping...")
except (APIException, ClientException, Exception) as e:
myBot.add_msg(e)
logging.error(str(e) + "\n\n")
raise SkipThis("Couldn't set flair. Skipping...")
except SkipThis:
continue
0
Example 75
Project: ray Source File: ec2.py
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print("ERROR: Must provide an identity file (-i) for ssh connections.", file=stderr)
sys.exit(1)
if opts.key_pair is None:
print("ERROR: Must provide a key pair name (-k) to use on instances.", file=stderr)
sys.exit(1)
user_data_content = None
print("Setting up security groups...")
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print("ERROR: There are already instances running in group %s or %s" %
(master_group.name, slave_group.name), file=stderr)
sys.exit(1)
# Use the default Ubuntu AMI.
if opts.ami is None:
if opts.region == "us-east-1":
opts.ami = "ami-2d39803a"
elif opts.region == "us-west-1":
opts.ami = "ami-06116566"
elif opts.region == "us-west-2":
opts.ami = "ami-9abea4fb"
elif opts.region == "eu-west-1":
opts.ami = "ami-f95ef58a"
elif opts.region == "eu-central-1":
opts.ami = "ami-87564feb"
elif opts.region == "ap-northeast-1":
opts.ami = "ami-a21529cc"
elif opts.region == "ap-northeast-2":
opts.ami = "ami-09dc1267"
elif opts.region == "ap-southeast-1":
opts.ami = "ami-25c00c46"
elif opts.region == "ap-southeast-2":
opts.ami = "ami-6c14310f"
elif opts.region == "ap-south-1":
opts.ami = "ami-4a90fa25"
elif opts.region == "sa-east-1":
opts.ami = "ami-0fb83963"
else:
raise Exception("The specified region is unknown.")
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print("Launching instances...")
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print("Could not find AMI " + opts.ami, file=stderr)
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.ascii_letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_profile_name=opts.instance_profile_name)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print("Waiting for spot instances to be granted...")
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print("All %d slaves granted" % opts.slaves)
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print("%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves))
except:
print("Canceling spot instance requests")
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print(("WARNING: %d instances are still running" % running), file=stderr)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
slave_nodes += slave_res.instances
print("Launched {s} slave{plural_s} in {z}, regid = {r}".format(
s=num_slaves_this_zone,
plural_s=('' if num_slaves_this_zone == 1 else 's'),
z=zone,
r=slave_res.id))
i += 1
# Launch or resume masters
if existing_masters:
print("Starting master...")
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(
key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
master_nodes = master_res.instances
print("Launched master in %s, regid = %s" % (zone, master_res.id))
# This wait time corresponds to SPARK-4983
print("Waiting for AWS to propagate instance metadata...")
time.sleep(15)
# Give the instances descriptive names and set additional tags
additional_tags = {}
if opts.additional_tags.strip():
additional_tags = dict(
map(str.strip, tag.split(':', 1)) for tag in opts.additional_tags.split(',')
)
for master in master_nodes:
master.add_tags(
dict(additional_tags, Name='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
)
for slave in slave_nodes:
slave.add_tags(
dict(additional_tags, Name='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
)
# Return all the instances
return (master_nodes, slave_nodes)
0
Example 76
Project: linked-jazz-name-directory Source File: mergeLOCandDBpedia.py
def main():
if not os.path.exists('data/loc_single'):
os.makedirs('data/loc_single')
dbData = open('data/jazzPeople.nt', 'r')
personNames = {}
personBirthDates = {}
personDeathDates = {}
nameCollisons = {}
matchesBothDate = []
matchesBothDateURIs = []
matchesSingleDate = []
foundCheckList = []
possibleLOC={}
allLOC = {}
for line in dbData:
quad = line.split()
if quad[1] == '<http://xmlns.com/foaf/0.1/name>':
name = ''
name = " ".join(quad[2:])
name = name[1:name[1:].find('@en')]
if len(name) < 5:
print name, line
name = name.replace('\\','')
if personNames.has_key(name) == False:
personNames[name] = quad[0]
else:
if personNames[name] != quad[0]:
if nameCollisons.has_key(name):
if quad[0] not in nameCollisons[name]:
nameCollisons[name].append(quad[0])
else:
nameCollisons[name]=[quad[0]]
print "1Name collision", name, line
print personNames[name], nameCollisons[name]
addNames = []
if name.find('"') != -1:
print name
name = name.split('"')[0].strip() + ' ' + name.split('"')[2].strip()
addNames.append(name)
#we also want to pull their name from the URL, because that is often the most common variant of their name
uri = quad[0]
name = formatName(quad[0].split('/resource/')[len(quad[0].split('/resource/'))-1])
name = name.replace('\\','')
addNames.append(name)
#print name
#remove any nick name and add that as well
if name.find('"') != -1:
print name
name = name.split('"')[0].strip() + ' ' + name.split('"')[2].strip()
addNames.append(name)
for aName in addNames:
#is this name already in the lookup:
print aName
if personNames.has_key(aName):
print "\t Name already in personNames"
#yes, is it the same uir as this one?
if personNames[aName] != quad[0]:
print "\t Name Has Diffrent URI Attached"
#no, it is a new UIR, is it aleady in the collision lookup?
if nameCollisons.has_key(aName):
print "\t Name already in collission"
#yes, is this URI already in it?
if quad[0] not in nameCollisons[aName]:
print "\t Diffrent Name, adding to it"
#no, add it
nameCollisons[aName].appen(quad[0])
else:
#no, add a new array to the collison with it
nameCollisons[aName] = [quad[0]]
print "\t Creating new collission record"
else:
print "\t not yet in personNames, adding it"
personNames[aName] = quad[0]
if quad[1] == '<http://dbpedia.org/ontology/deathDate>':
deathDate = ''
deathDate = " ".join(quad[2:])
deathDate = deathDate[1:deathDate[1:].find('-')+1]
if len(deathDate) != 4:
print "Error death date: ", line
else:
personDeathDates[quad[0]] = deathDate
#print deathDate
if quad[1] == '<http://dbpedia.org/ontology/birthDate>':
birthDate = ''
birthDate = " ".join(quad[2:])
birthDate = birthDate[1:birthDate[1:].find('-')+1]
if len(birthDate) != 4:
print "Error birth date: ", line
else:
personBirthDates[quad[0]] = birthDate
print len(personNames), len(personBirthDates), len(personDeathDates)
temp = open("db_tmp.txt","w")
for key, value in personNames.iteritems():
line = key + ' ' + value
if personBirthDates.has_key(value):
line = line + ' ' + personBirthDates[value]
if personDeathDates.has_key(value):
line = line + ' ' + personDeathDates[value]
temp.writelines(line + "\n")
for key, value in nameCollisons.iteritems():
for x in value:
line = key + ' ' + x
if personBirthDates.has_key(x):
line = line + ' ' + personBirthDates[x]
if personDeathDates.has_key(x):
line = line + ' ' + personDeathDates[x]
temp.writelines(line + "\n")
print line
locFile = open('data/personauthoritiesnames.nt.skos', 'r')
counter = 0
counterMatched = 0
print "building name list"
locDebug = open("loc_tmp.txt","w")
for line in locFile:
counter = counter+1
#if counter % 100000 == 0:
# print "procssed " + str(counter / 100000) + "00k names"
if counter % 1000000 == 0:
print "procssed ", counter / 1000000,"Million names!"
quad = line.split();
name = " ".join(quad[2:])
name = name[1:name[1:].find('@EN')]
name = name.replace('?','')
year = re.findall(r'\d{4}', name)
born = 0
died = 0
possibleNames = []
if len(year) != 0:
if len(year) == 1 and name[len(name)-1:] != '-':
if name.find(' b.') != -1:
born = year[0]
#print "Born : ",year[0]
elif name.find(' d.') != -1:
died = year[0]
#print "died : ",year[0]
elif name.find(' fl.') != -1:
born = year[0]
#print "born(flourished) : ",year[0]
elif name.find('jin shi') != -1:
born = year[0]
#print "born(third stage) : ",year[0]
elif name.find('ju ren') != -1:
born = year[0]
#print "born(second stage) : ",year[0]
elif len(re.findall(r'\d{3}\-', name)) != 0:
year = re.findall(r'\d{3}\-', name)
born = year[0][0:3]
#print "born : ", year[0][0:3]
#now get the death year
died = re.findall(r'\d{4}', name)[0]
elif len(re.findall(r'\-\d{4}', name)) != 0:
died = re.findall(r'\-\d{4}', name)[0][1:]
elif name.find(' ca. ') != -1 or name.find(' ca ') != -1:
born = year[0]
#print "born(ca) : ",year[0]
elif name.find(' b ') != -1:
born = year[0]
#print "Born : ",year[0]
elif name.find(' d ') != -1:
died = year[0]
#print "died : ",year[0]
elif name.find(' born ') != -1:
born = year[0]
#print "Born : ",year[0]
elif name.find(' died ') != -1:
died = year[0]
#print "died : ",year[0]
else:
#print name, "\n"
#print "error: cannot figure out this date, update the regex"
#we have hit like 90% of the cases here, now just stright up weird sutff, so just grab the date
born = year[0]
#print len(year)
elif len(year) == 1 and name[len(name)-1:] == '-':
born = year[0]
elif len(year) == 2:
born = year[0]
died = year[1]
elif len(year) == 3:
#they are doing "1999 or 2000 - blah blah blah" take first and last
born = year[0]
died = year[2]
elif len(year) == 4:
#they are doing "1999 or 2000 - blah blah blah" take first and last
born = year[0]
died = year[3]
else:
print name, "Coluld not process date \n"
sys.exit()
#print name, born, died
#else:
#these people would have lived < 0 bce - 999 AD, we currently do not care about them.
#if len(re.findall(r'\d{3}', name)) != 0:
#print name
#personDates[quad[0]] = [born,died]
#now process the name part
#chop off the rest where a number is detected to get rid of any date
if re.search(r'\d{1}',name) != None:
name = name[0:name.find(re.search(r'\d{1}',name).group())]
name=name.strip()
#now chop off anything past the second comma, it is not name stuff afterwards, also with 3 commas are a lot of "sir" and "duke of earl" etc, dont care about that stuff
if len(re.findall(',', name)) == 2 or len(re.findall(',', name)) == 3:
name = name.split(',')[0] + ', ' + name.split(',')[1]
#print name, '|', newname
if name.find('\"') != -1:
name = name.replace("\\",'')
if len(re.findall(',', name)) == 1:
if name.find('(') == -1:
#there is no pranthetical name
newname = name.split(',')
newname = newname[1] + ' ' + newname[0]
#print name, '|', newname
possibleNames.append(newname.strip())
#we want to add that name, but also add a version with out a middle intial, if that it is present
if len(newname.split()) == 3 and (newname.split()[1][len(newname.split()[1])-1] == '.' or len(newname.split()[1]) == 1):
newname = newname.split()[0] + ' ' + newname.split()[2]
#print "\t" + newname
possibleNames.append(newname.strip())
#we also want to add a name, that if they only have an inital for the first part and a full middle name drop the first intital
if len(newname.split()) == 3 and len(newname.split()[1]) > 2 and (newname.split()[0][len(newname.split()[0])-1] == '.' or len(newname.split()[1]) == 1):
newname = newname.split()[1] + ' ' + newname.split()[2]
#print "\t" + newname
possibleNames.append(newname.strip())
else:
#they have prenthasis in their name meaning that their long form of the name is contained in the pranthesis
newname = name.split(',')
newname = newname[1] + ' ' + newname[0]
#cut out the stuff before the pran
newname = newname[newname.find('(')+1:]
newname = newname.replace(')','')
#print name, '|', newname
possibleNames.append(newname.strip())
#now also cut out the middle inital if it is there and add that version
if len(newname.split()) == 3 and (newname.split()[1][len(newname.split()[1])-1] == '.' or len(newname.split()[1]) == 1):
newname = newname.split()[0] + ' ' + newname.split()[2]
#print "\t" + newname
possibleNames.append(newname.strip())
else:
#so here we are... the depths of the quirks
if name.find('(') != -1:
#if the very first thing is a inital, it is likely a abrrivated name and the full name is in the prans
if len(name.split()[0])==2:
if name.split()[0][1] == '.':
newname = name.split('(')[1]
newname = newname.replace(')','')
possibleNames.append(newname.strip())
#print name, '|', newname
#if len(name.split()[len(name.split())-1])==2:
# if name.split()[len(name.split())-1][1] == '.':
# print name, '|'
else:
#this will be stuff like P-King (Musician), or Shyne (Rapper), stuff we are intrested in, nicknames, so cut out the descriptor
newname = name.split('(')[0].strip()
#TODO: if we really care to take this further here is a spot where we will lose some names
#the quirks get very specific and would need a lot more rules
#print name, '|', newname
possibleNames.append(newname.strip())
else:
#print name, '|'
newname = name.strip()
#single names here, add them in
possibleNames.append(newname.strip())
#print possibleNames
#skip logic:
if int(born) != 0 and int(born) < 1875:
continue
for aPossible in possibleNames:
if personNames.has_key(aPossible):
#we have a match (!)
#add all the Ids we are going to check into a list
useURIs = []
#the main one
useURIs.append(personNames[aPossible])
#check for collision names, names that are the same but reflect diffrent URIs
if nameCollisons.has_key(aPossible):
for collison in nameCollisons[aPossible]:
useURIs.append(collison)
for useURI in useURIs:
locDebug.writelines(aPossible + ' ' + str(born) + ' ' + str(died) + "\n")
if allLOC.has_key(aPossible):
#it is in here already, see if it has this URI
if quad[0] not in allLOC[aPossible]:
allLOC[aPossible].append(quad[0])
else:
allLOC[aPossible] = [quad[0]]
didMatched = False
if personBirthDates.has_key(useURI) and personDeathDates.has_key(useURI):
if int(born) != 0 and int(died) != 0 and int(personBirthDates[useURI]) != 0 and int(personDeathDates[useURI]) != 0:
if (int(personBirthDates[useURI]) == int(born)) and (int(died) == int(personDeathDates[useURI])):
if [useURI, quad[0]] not in matchesBothDate:
didMatched=True
counterMatched = counterMatched + 1
matchesBothDate.append([useURI, quad[0]])
foundCheckList.append(useURI)
matchesBothDateURIs.append(useURI)
#print aPossible, quad[0], born, died
#print aPossible, useURI, personBirthDates[useURI], personDeathDates[useURI]
continue
#see if birth years match
if personBirthDates.has_key(useURI):
if int(personBirthDates[useURI]) == int(born) and int(personBirthDates[useURI]) != 0 and int(born) != 0:
if [useURI, quad[0]] not in matchesSingleDate:
#print personNames[aPossible], '=', quad[0]
didMatched=True
counterMatched = counterMatched + 1
matchesSingleDate.append([useURI, quad[0]])
foundCheckList.append(useURI)
#print aPossible, quad[0], born, "born match"
#print aPossible, useURI, personBirthDates[useURI]
continue
#does it have a death date match?
if personDeathDates.has_key(useURI):
if int(personDeathDates[useURI]) == int(died) and int(personDeathDates[useURI]) != 0 and int(died) != 0:
if [useURI, quad[0]] not in matchesSingleDate:
#print personNames[aPossible], '=', quad[0]
matchesSingleDate.append([useURI, quad[0]])
didMatched=True
counterMatched = counterMatched + 1
foundCheckList.append(useURI)
#print aPossible, quad[0], died, "death match"
#print aPossible, useURI, personDeathDates[useURI]
continue
#we are now going to remove any matches from matchesSingleDate where there is a perfect date match already
temp = []
for aSingleDateMatch in matchesSingleDate:
if aSingleDateMatch[0] not in matchesBothDateURIs:
temp.append(aSingleDateMatch)
else:
for x in matchesBothDate:
if x[0] == aSingleDateMatch[0]:
print "Attempted Dupe", aSingleDateMatch
print "With", x
matchesSingleDate = list(temp)
matchedSingle = []
matchedMany = []
matchedNone = []
for key, value in personNames.iteritems():
if value not in foundCheckList:
#print "Not matched " + value + ' ' + key
if allLOC.has_key(key):
if len(allLOC[key]) == 1:
#print "\tOnly one possible LOC match:" + allLOC[key][0]
matchedSingle.append([value,allLOC[key][0]])
else:
#print "\t 1+ possible LOC match:", allLOC[key]
matchedMany.append([value,allLOC[key]])
else:
matchedNone.append(value)
print " \ncuemCollision***\n"
for key, value in nameCollisons.iteritems():
for x in value:
if x not in foundCheckList:
#print "Not matched " + x + ' ' + key
if allLOC.has_key(key):
if len(allLOC[key]) == 1:
#print "\tOnly one possible LOC match:" + allLOC[key][0]
matchedSingle.append([x,allLOC[key][0]])
else:
#print "\t 1+ possible LOC match:", allLOC[key]
matchedMany.append([x,allLOC[key]])
else:
matchedNone.append(x)
#for key, value in possibleLOC.iteritems():
#if len(value) == 1:
#if value not in matches:
# matches.append(value)
#print key, '=', value
#make sure there are no duplicates, as in same DB to LOC records in the singles
tempCopy = []
for aSingle in matchedSingle:
add = True
for anotherSingle in tempCopy:
if aSingle[0] == anotherSingle[0] and aSingle[1] == anotherSingle[1]:
add = False
if add:
tempCopy.append(aSingle)
matchedSingle = list(tempCopy)
#now we are going to go through the singles and pull out anyone that has been added twice
#this can happen for common names born in the same year, move them to the 1->many list
matchedSingleCheck = []
matchedSingleDupes = []
for aSingle in matchedSingle:
if aSingle[0] not in matchedSingleCheck:
matchedSingleCheck.append(aSingle[0])
else:
print "Dupe in singles found:", aSingle
matchedSingleDupes.append(aSingle[0])
singleDupes = {}
tempCopy = []
print len(matchedSingle)
for aSingle in matchedSingle:
if aSingle[0] in matchedSingleDupes:
if singleDupes.has_key(aSingle[0]):
singleDupes[aSingle[0]].append(aSingle[1])
else:
singleDupes[aSingle[0]] = [aSingle[1]]
else:
tempCopy.append(aSingle)
matchedSingle = list(tempCopy)
print len(matchedSingle)
print singleDupes
#add them to the matchedmany list
for key, value in singleDupes.iteritems():
matchedMany.append([key,value])
#we now need to do the same for matchesSingleDate, they could have matched a single date true, but it could matched to other people
matchesSingleDateCheck = []
matchesSingleDateDupes = []
for aSingle in matchesSingleDate:
if aSingle[0] not in matchesSingleDateCheck:
matchesSingleDateCheck.append(aSingle[0])
else:
print "Dupe in single date found:", aSingle
matchesSingleDateDupes.append(aSingle[0])
singleDateDupes = {}
tempCopy = []
print len(matchesSingleDate)
for aSingle in matchesSingleDate:
if aSingle[0] in matchesSingleDateDupes:
if singleDateDupes.has_key(aSingle[0]):
singleDateDupes[aSingle[0]].append(aSingle[1])
else:
singleDateDupes[aSingle[0]] = [aSingle[1]]
else:
tempCopy.append(aSingle)
matchesSingleDate = list(tempCopy)
print len(matchesSingleDate)
#add them to the matchedmany list
for key, value in singleDateDupes.iteritems():
matchedMany.append([key,value])
print singleDateDupes
#TODO: This part needs to be fixed so the call to the LOC site is syncrounous, and wait for the file to be ready...
machtedSingleJazz = []
machtedSingleNoJazz = []
machtedSingleNoJazzLOC = []
for x in matchedSingle:
url = x[1]
id = formatName(url.split('/names/')[len(url.split('/names/'))-1])
foundJazz = False
if os.path.exists('data/loc_single/' + id + '.nt') == False:
os.system('wget --output-do****ent="data/loc_single/' + id + '.nt" "http://id.loc.gov/authorities/names/' + id + '.nt"')
#sleep as a TODO fix,
time.sleep( 1.5 )
if os.path.exists('data/loc_single/' + id + '.nt'):
f = open('data/loc_single/' + id + '.nt', 'r')
for line in f:
line = line.lower()
if line.find('jazz') != -1 or line.find('music') != -1 or line.find('blues') != -1 or line.find('jazz') != -1 or line.find('vocal') != -1:
print line
foundJazz = True
f.close()
else:
print 'data/loc_single/' + id + '.nt does not exist'
if id in machtedSingleNoJazzLOC:
foundJazz = False
print "Dupe detected trying to assign" ,x
if foundJazz:
machtedSingleJazz.append(x)
machtedSingleNoJazzLOC.append(id)
else:
machtedSingleNoJazz.append(x)
print len(matchesBothDate), " BothDate Matches", len(matchesSingleDate), " Single Date Matches", len(matchedSingle), "Single LOC", len(matchedMany), "Multiple LOC matches", len(matchedNone), "No Matches"
#print len(matches)+ len(matchedSingle)+len(matchedMany) , " matched out of Total of about ", len(personNames)
print len(matchedSingle) , " = " , len(machtedSingleJazz) , " keyword found and ", len(machtedSingleNoJazz), " no keyword found"
#make the sameas files
allLines=[]
temp = open("data/sameAs_perfect.nt","w")
for value in matchesBothDate:
line = value[0] + ' <http://www.w3.org/2002/07/owl#sameAs> ' + value[1] + " . \n";
if line not in allLines:
temp.writelines(line)
allLines.append(line)
temp = open("data/sameAs_high.nt","w")
for value in matchesSingleDate:
line = value[0] + ' <http://www.w3.org/2002/07/owl#sameAs> ' + value[1] + " . \n";
if line not in allLines:
temp.writelines(line)
allLines.append(line)
temp = open("data/sameAs_medium.nt","w")
for value in machtedSingleJazz:
line = value[0] + ' <http://www.w3.org/2002/07/owl#sameAs> ' + value[1] + " . \n";
if line not in allLines:
temp.writelines(line)
allLines.append(line)
temp = open("data/sameAs_low.nt","w")
for value in machtedSingleNoJazz:
line = value[0] + ' <http://www.w3.org/2002/07/owl#sameAs> ' + value[1] + " . \n";
if line not in allLines:
temp.writelines(line)
allLines.append(line)
temp = open("data/sameAs_many.nt","w")
for value in matchedMany:
for x in value[1]:
temp.writelines(value[0] + ' <http://www.w3.org/2004/02/skos/core#closeMatch> ' + x + " . \n")
temp = open("data/sameAs_none.nt","w")
for value in matchedNone:
temp.writelines(value + ' <http://www.w3.org/2002/07/owl#sameAs> ' + '<none>' + " . \n")
0
Example 77
Project: plugin.video.streamondemand Source File: xbmctools.py
def play_video(item,desdefavoritos=False,desdedescargados=False,desderrordescargas=False,strmfile=False):
from core import servertools
logger.info("streamondemand.platformcode.xbmctools play_video")
#logger.info(item.tostring('\n'))
try:
item.server = item.server.lower()
except:
item.server = ""
if item.server=="":
item.server="directo"
view = False
# Abre el diálogo de selección
opciones = []
default_action = config.get_setting("default_action")
logger.info("default_action="+default_action)
# Si no es el modo normal, no muestra el diálogo porque cuelga XBMC
muestra_dialogo = (config.get_setting("player_mode")=="0" and not strmfile)
# Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo
video_urls,puedes,motivo = servertools.resolve_video_urls_for_playing(item.server,item.url,item.password,muestra_dialogo)
# Si puedes ver el vídeo, presenta las opciones
if puedes:
for video_url in video_urls:
opciones.append(config.get_localized_string(30151) + " " + video_url[0])
if item.server=="local":
opciones.append(config.get_localized_string(30164))
else:
opcion = config.get_localized_string(30153)
opciones.append(opcion) # "Descargar"
if item.channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
else:
opciones.append(config.get_localized_string(30155)) # "Añadir a favoritos"
if not strmfile:
opciones.append(config.get_localized_string(30161)) # "Añadir a Biblioteca"
if item.channel!="descargas":
opciones.append(config.get_localized_string(30157)) # "Añadir a lista de descargas"
else:
if item.category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
opciones.append(config.get_localized_string(30160)) # "Pasar de nuevo a lista de descargas"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
if config.get_setting("jdownloader_enabled")=="true":
opciones.append(config.get_localized_string(30158)) # "Enviar a JDownloader"
if default_action=="3":
seleccion = len(opciones)-1
# Busqueda de trailers en youtube
if not item.channel in ["Trailer","ecarteleratrailers"]:
opciones.append(config.get_localized_string(30162)) # "Buscar Trailer"
# Si no puedes ver el vídeo te informa
else:
if item.server!="":
advertencia = xbmcgui.Dialog()
if "<br/>" in motivo:
resultado = advertencia.ok("Non è possibile guardare il video perché...", motivo.split("<br/>")[0],
motivo.split("<br/>")[1], item.url)
else:
resultado = advertencia.ok("Non è possibile guardare il video perché...", motivo, item.url)
else:
resultado = advertencia.ok("Non è possibile guardare il video perché...", "Il server che lo ospita non è",
"ancora supportato da streamondemand", item.url)
if item.channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
if item.channel=="descargas":
if item.category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
if len(opciones)==0:
return
# Si la accion por defecto es "Preguntar", pregunta
if default_action=="0": # and server!="torrent":
dia = xbmcgui.Dialog()
seleccion = dia.select(config.get_localized_string(30163), opciones) # "Elige una opción"
#dia.close()
'''
elif default_action=="0" and server=="torrent":
advertencia = xbmcgui.Dialog()
logger.info("video_urls[0]="+str(video_urls[0][1]))
if puedes and ('"status":"COMPLETED"' in video_urls[0][1] or '"percent_done":100' in video_urls[0][1]):
listo = "y está listo para ver"
else:
listo = "y se está descargando"
resultado = advertencia.ok( "Torrent" , "El torrent ha sido añadido a la lista" , listo )
seleccion=-1
'''
elif default_action=="1":
seleccion = 0
elif default_action=="2":
seleccion = len(video_urls)-1
elif default_action=="3":
seleccion = seleccion
else:
seleccion=0
logger.info("seleccion=%d" % seleccion)
logger.info("seleccion=%s" % opciones[seleccion])
# No ha elegido nada, lo más probable porque haya dado al ESC
if seleccion==-1:
#Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
listitem = xbmcgui.ListItem( item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail)
xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),False,listitem) # JUR Added
#if config.get_setting("subtitulo") == "true":
# config.set_setting("subtitulo", "false")
return
if opciones[seleccion]==config.get_localized_string(30158): # "Enviar a JDownloader"
#d = {"web": url}urllib.urlencode(d)
from core import scrapertools
if item.subtitle!="":
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail + " " + item.subtitle)
else:
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail)
return
if opciones[seleccion]==config.get_localized_string(30158).replace("jDownloader","pyLoad"): # "Enviar a pyLoad"
logger.info("Enviando a pyload...")
if item.show!="":
package_name = item.show
else:
package_name = "streamondemand"
from core import pyload_client
pyload_client.download(url=item.url,package_name=package_name)
return
elif opciones[seleccion]==config.get_localized_string(30164): # Borrar archivo en descargas
# En "extra" está el nombre del fichero en favoritos
os.remove( item.url )
xbmc.executebuiltin( "Container.Refresh" )
return
# Ha elegido uno de los vídeos
elif seleccion < len(video_urls):
mediaurl = video_urls[seleccion][1]
if len(video_urls[seleccion])>3:
wait_time = video_urls[seleccion][2]
item.subtitle = video_urls[seleccion][3]
elif len(video_urls[seleccion])>2:
wait_time = video_urls[seleccion][2]
else:
wait_time = 0
view = True
# Descargar
elif opciones[seleccion]==config.get_localized_string(30153): # "Descargar"
download_title = item.fulltitle
if item.hasContentDetails=="true":
download_title = item.contentTitle
# El vídeo de más calidad es el último
mediaurl = video_urls[len(video_urls)-1][1]
from core import downloadtools
keyboard = xbmc.Keyboard(download_title)
keyboard.doModal()
if (keyboard.isConfirmed()):
download_title = keyboard.getText()
devuelve = downloadtools.downloadbest(video_urls,download_title)
if devuelve==0:
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok("Download", "Scaricato con successo")
elif devuelve==-1:
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok("Download", "Download interrotto")
else:
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok("Download", "Errore nel download")
return
elif opciones[seleccion]==config.get_localized_string(30154): #"Quitar de favoritos"
from channels import favoritos
# En "extra" está el nombre del fichero en favoritos
favoritos.deletebookmark(urllib.unquote_plus( item.extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , item.title , config.get_localized_string(30105)) # 'Se ha quitado de favoritos'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30159): #"Borrar descarga definitivamente"
from channels import descargas
descargas.delete_error_bookmark(urllib.unquote_plus( item.extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de la lista'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30160): #"Pasar de nuevo a lista de descargas":
from channels import descargas
descargas.mover_descarga_error_a_pendiente(urllib.unquote_plus( item.extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30107)) # 'Ha pasado de nuevo a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30155): #"Añadir a favoritos":
from channels import favoritos
from core import downloadtools
download_title = item.fulltitle
download_thumbnail = item.thumbnail
download_plot = item.plot
if item.hasContentDetails=="true":
download_title = item.contentTitle
download_thumbnail = item.contentThumbnail
download_plot = item.contentPlot
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(download_title)+" ["+item.channel+"]")
keyboard.doModal()
if keyboard.isConfirmed():
title = keyboard.getText()
favoritos.savebookmark(titulo=title,url=item.url,thumbnail=download_thumbnail,server=item.server,plot=download_plot,fulltitle=title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30108)) # 'se ha añadido a favoritos'
return
elif opciones[seleccion]==config.get_localized_string(30156): #"Quitar de lista de descargas":
# La categoría es el nombre del fichero en la lista de descargas
from channels import descargas
descargas.deletebookmark((urllib.unquote_plus( item.extra )))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de lista de descargas'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30157): #"Añadir a lista de descargas":
from core import downloadtools
download_title = item.fulltitle
download_thumbnail = item.thumbnail
download_plot = item.plot
if item.hasContentDetails=="true":
download_title = item.contentTitle
download_thumbnail = item.contentThumbnail
download_plot = item.contentPlot
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(download_title))
keyboard.doModal()
if keyboard.isConfirmed():
download_title = keyboard.getText()
from channels import descargas
descargas.savebookmark(titulo=download_title,url=item.url,thumbnail=download_thumbnail,server=item.server,plot=download_plot,fulltitle=download_title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , download_title , config.get_localized_string(30109)) # 'se ha añadido a la lista de descargas'
return
elif opciones[seleccion] == config.get_localized_string(30161): # "Añadir a Biblioteca": # Library
titulo = item.fulltitle
if titulo == "":
titulo = item.title
#library.savelibrary(titulo,item.url,item.thumbnail,item.server,item.plot,canal=item.channel,category=item.category,Serie=item.show)
# TODO ¿SOLO peliculas?
#logger.debug(item.tostring('\n'))
new_item = item.clone(title=titulo, action="play_from_library", category="Cine",
fulltitle=item.fulltitle, channel=item.channel)
#logger.debug(new_item.tostring('\n'))
insertados, sobreescritos, fallidos = library.save_library_movie(new_item)
advertencia = xbmcgui.Dialog()
if fallidos == 0:
advertencia.ok(config.get_localized_string(30131), titulo,
config.get_localized_string(30135)) # 'se ha añadido a la biblioteca'
return
elif opciones[seleccion]==config.get_localized_string(30162): #"Buscar Trailer":
config.set_setting("subtitulo", "false")
xbmc.executebuiltin("XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="trailertools", action="buscartrailer", contextual=True).tourl()))
return
# Si no hay mediaurl es porque el vídeo no está :)
logger.info("streamondemand.platformcode.xbmctools mediaurl="+mediaurl)
if mediaurl=="":
if server == "unknown":
alertUnsopportedServer()
else:
alertnodisponibleserver(item.server)
return
# Si hay un tiempo de espera (como en megaupload), lo impone ahora
if wait_time>0:
continuar = handle_wait(wait_time,server,"Cargando vídeo...")
if not continuar:
return
# Obtención datos de la Biblioteca (solo strms que estén en la biblioteca)
if strmfile:
xlistitem = getLibraryInfo(mediaurl)
else:
play_title = item.fulltitle
play_thumbnail = item.thumbnail
play_plot = item.plot
if item.hasContentDetails=="true":
play_title = item.contentTitle
play_thumbnail = item.contentThumbnail
play_plot = item.contentPlot
try:
xlistitem = xbmcgui.ListItem( play_title, iconImage="DefaultVideo.png", thumbnailImage=play_thumbnail, path=mediaurl)
except:
xlistitem = xbmcgui.ListItem( play_title, iconImage="DefaultVideo.png", thumbnailImage=play_thumbnail)
xlistitem.setInfo( "video", { "Title": play_title, "Plot" : play_plot , "Studio" : item.channel , "Genre" : item.category } )
#set_infoLabels(listitem,plot) # Modificacion introducida por super_berny para añadir infoLabels al ListItem
# Lanza el reproductor
# Lanza el reproductor
if strmfile and not item.from_biblioteca: #Si es un fichero strm no hace falta el play
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem)
if item.subtitle != "":
xbmc.sleep(2000)
xbmc.Player().setSubtitles(item.subtitle)
#Movido del conector "torrent" aqui
elif item.server=="torrent":
#Opciones disponibles para Reproducir torrents
torrent_options = []
torrent_options.append(["Client (necessario libtorrent)"])
torrent_options.append(["Client interno MCT (necessario libtorrent)"])
#Plugins externos se pueden añadir otros
if xbmc.getCondVisibility('System.HasAddon("plugin.video.xbmctorrent")'):
torrent_options.append(["Plugin esterno: xbmctorrent","plugin://plugin.video.xbmctorrent/play/%s"])
if xbmc.getCondVisibility('System.HasAddon("plugin.video.pulsar")'):
torrent_options.append(["Plugin esterno: pulsar","plugin://plugin.video.pulsar/play?uri=%s"])
if xbmc.getCondVisibility('System.HasAddon("plugin.video.quasar")'):
torrent_options.append(["Plugin esterno: quasar","plugin://plugin.video.quasar/play?uri=%s"])
if xbmc.getCondVisibility('System.HasAddon("plugin.video.stream")'):
torrent_options.append(["Plugin esterno: stream","plugin://plugin.video.stream/play/%s"])
if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrenter")'):
torrent_options.append(["Plugin esterno: torrenter","plugin://plugin.video.torrenter/?action=playSTRM&url=%s"])
if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrentin")'):
torrent_options.append(["Plugin esterno: torrentin","plugin://plugin.video.torrentin/?uri=%s&image="])
if len(torrent_options)>1:
seleccion = xbmcgui.Dialog().select("Aprire torrent con...", [opcion[0] for opcion in torrent_options])
else:
seleccion = 0
#Plugins externos
if seleccion > 1:
mediaurl = urllib.quote_plus(item.url)
xbmc.executebuiltin( "PlayMedia(" + torrent_options[seleccion][1] % mediaurl +")" )
if seleccion ==1:
from platformcode import mct
mct.play( mediaurl, xbmcgui.ListItem("", iconImage=item.thumbnail, thumbnailImage=item.thumbnail), subtitle=item.subtitle )
#Reproductor propio (libtorrent)
if seleccion == 0:
import time
videourl = None
played = False
#Importamos el cliente
from btserver import Client
#Iniciamos el cliente:
c = Client(url=mediaurl, is_playing_fnc= xbmc.Player().isPlaying, wait_time=None, timeout=5, temp_path =os.path.join(config.get_data_path(),"torrent") )
#Mostramos el progreso
progreso = xbmcgui.DialogProgress()
progreso.create( "streamondemand - Torrent" , "Avviando...")
#Mientras el progreso no sea cancelado ni el cliente cerrado
while not progreso.iscanceled() and not c.closed:
try:
#Obtenemos el estado del torrent
s = c.status
#Montamos las tres lineas con la info del torrent
txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \
(s.progress_file, s.file_size, s.str_state, s._download_rate)
txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d' % \
(s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, s.trackers)
txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \
(s.trk_peers,s.dht_peers, s.pex_peers, s.lsd_peers)
progreso.update(s.buffer,txt, txt2, txt3)
time.sleep(1)
#Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia
if s.buffer == 100 and not played:
#Cerramos el progreso
progreso.close()
#Obtenemos el playlist del torrent
videourl = c.get_play_list()
#Iniciamos el reproductor
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
playlist.add( videourl, xlistitem )
xbmcPlayer = xbmc.Player()
xbmcPlayer.play(playlist)
#Marcamos como reproducido para que no se vuelva a iniciar
played = True
#Y esperamos a que el reproductor se cierre
while xbmc.Player().isPlaying():
time.sleep(1)
#Cuando este cerrado, Volvemos a mostrar el dialogo
progreso.create( "streamondemand - Torrent" , "Avviando...")
except:
import traceback
logger.info(traceback.format_exc())
break
progreso.update(100,"Terminato, elimina dati"," "," ")
#Detenemos el cliente
if not c.closed:
c.stop()
#Y cerramos el progreso
progreso.close()
return
else:
logger.info("player_mode="+config.get_setting("player_mode"))
logger.info("mediaurl="+mediaurl)
if config.get_setting("player_mode")=="3" or "megacrypter.com" in mediaurl:
import download_and_play
download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") )
return
elif config.get_setting("player_mode")=="0" or (config.get_setting("player_mode")=="3" and mediaurl.startswith("rtmp")):
# Añadimos el listitem a una lista de reproducción (playlist)
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
playlist.add( mediaurl, xlistitem )
# Reproduce
playersettings = config.get_setting('player_type')
logger.info("streamondemand.platformcode.xbmctools playersettings="+playersettings)
if config.get_system_platform()=="xbox":
player_type = xbmc.PLAYER_CORE_AUTO
if playersettings == "0":
player_type = xbmc.PLAYER_CORE_AUTO
logger.info("streamondemand.platformcode.xbmctools PLAYER_CORE_AUTO")
elif playersettings == "1":
player_type = xbmc.PLAYER_CORE_MPLAYER
logger.info("streamondemand.platformcode.xbmctools PLAYER_CORE_MPLAYER")
elif playersettings == "2":
player_type = xbmc.PLAYER_CORE_DVDPLAYER
logger.info("streamondemand.platformcode.xbmctools PLAYER_CORE_DVDPLAYER")
xbmcPlayer = xbmc.Player( player_type )
else:
xbmcPlayer = xbmc.Player()
xbmcPlayer.play(playlist)
if item.channel=="cuevana" and item.subtitle!="":
logger.info("subtitulo="+subtitle)
if item.subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
logger.info("streamondemand.platformcode.xbmctools Con subtitulos")
setSubtitles()
elif config.get_setting("player_mode")=="1":
logger.info("mediaurl :"+ mediaurl)
logger.info("Tras setResolvedUrl")
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl))
elif config.get_setting("player_mode")=="2":
xbmc.executebuiltin( "PlayMedia("+mediaurl+")" )
if item.subtitle!="" and view:
logger.info("Subtítulos externos: "+item.subtitle)
xbmc.Player().setSubtitles(item.subtitle)
0
Example 78
Project: virtmgr Source File: views.py
def index(request, host_id, vname):
if not request.user.is_authenticated():
return HttpResponseRedirect('/')
kvm_host = Host.objects.get(user=request.user.id, id=host_id)
def add_error(msg, type_err):
error_msg = Log(host_id=host_id,
type=type_err,
message=msg,
user_id=request.user.id
)
error_msg.save()
def get_vms():
try:
vname = {}
for id in conn.listDomainsID():
id = int(id)
dom = conn.lookupByID(id)
vname[dom.name()] = dom.info()[0]
for id in conn.listDefinedDomains():
dom = conn.lookupByName(id)
vname[dom.name()] = dom.info()[0]
return vname
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_storages():
try:
storages = []
for name in conn.listStoragePools():
storages.append(name)
for name in conn.listDefinedStoragePools():
storages.append(name)
return storages
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def vm_conn():
try:
flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
auth = [flags, creds, None]
uri = 'qemu+tcp://' + kvm_host.ipaddr + '/system'
conn = libvirt.openAuth(uri, auth, 0)
return conn
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_dom(vname):
try:
dom = conn.lookupByName(vname)
return dom
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
if not kvm_host.login or not kvm_host.passwd:
def creds(credentials, user_data):
for credential in credentials:
if credential[0] == libvirt.VIR_CRED_AUTHNAME:
credential[4] = request.session['login_kvm']
if len(credential[4]) == 0:
credential[4] = credential[3]
elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
credential[4] = request.session['passwd_kvm']
else:
return -1
return 0
else:
def creds(credentials, user_data):
for credential in credentials:
if credential[0] == libvirt.VIR_CRED_AUTHNAME:
credential[4] = kvm_host.login
if len(credential[4]) == 0:
credential[4] = credential[3]
elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
credential[4] = kvm_host.passwd
else:
return -1
return 0
def get_vm_active():
try:
state = dom.isActive()
return state
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_uuid():
try:
xml = dom.XMLDesc(0)
uuid = util.get_xml_path(xml, "/domain/uuid")
return uuid
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_xml():
try:
xml = dom.XMLDesc(0)
xml_spl = xml.split('\n')
return xml_spl
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_mem():
try:
xml = dom.XMLDesc(0)
mem = util.get_xml_path(xml, "/domain/currentMemory")
mem = int(mem) * 1024
return mem
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_core():
try:
xml = dom.XMLDesc(0)
cpu = util.get_xml_path(xml, "/domain/vcpu")
return cpu
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_vnc():
try:
xml = dom.XMLDesc(0)
vnc = util.get_xml_path(xml, "/domain/devices/graphics/@port")
return vnc
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_hdd():
try:
xml = dom.XMLDesc(0)
hdd_path = util.get_xml_path(xml, "/domain/devices/disk[1]/source/@file")
hdd_fmt = util.get_xml_path(xml, "/domain/devices/disk[1]/driver/@type")
#image = re.sub('\/.*\/', '', hdd_path)
size = dom.blockInfo(hdd_path, 0)[0]
#return image, size, hdd_fmt
return hdd_path, size, hdd_fmt
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_cdrom():
try:
xml = dom.XMLDesc(0)
cdr_path = util.get_xml_path(xml, "/domain/devices/disk[2]/source/@file")
if cdr_path:
#image = re.sub('\/.*\/', '', cdr_path)
size = dom.blockInfo(cdr_path, 0)[0]
#return image, cdr_path, size
return cdr_path, cdr_path, size
else:
return cdr_path
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_boot_menu():
try:
xml = dom.XMLDesc(0)
boot_menu = util.get_xml_path(xml, "/domain/os/bootmenu/@enable")
return boot_menu
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_arch():
try:
xml = dom.XMLDesc(0)
arch = util.get_xml_path(xml, "/domain/os/type/@arch")
return arch
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_nic():
try:
xml = dom.XMLDesc(0)
mac = util.get_xml_path(xml, "/domain/devices/interface/mac/@address")
nic = util.get_xml_path(xml, "/domain/devices/interface/source/@network")
if nic is None:
nic = util.get_xml_path(xml, "/domain/devices/interface/source/@bridge")
return mac, nic
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def mnt_iso_on(vol):
try:
for storage in storages:
stg = conn.storagePoolLookupByName(storage)
for img in stg.listVolumes():
if vol == img:
vl = stg.storageVolLookupByName(vol)
xml = """<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<target dev='hdc' bus='ide'/>
<source file='%s'/>
<readonly/>
</disk>""" % vl.path()
dom.attachDevice(xml)
xmldom = dom.XMLDesc(0)
conn.defineXML(xmldom)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def mnt_iso_off(vol):
try:
for storage in storages:
stg = conn.storagePoolLookupByName(storage)
for img in stg.listVolumes():
if vol == img:
vl = stg.storageVolLookupByName(vol)
xml = dom.XMLDesc(0)
iso = "<disk type='file' device='cdrom'>\n <driver name='qemu' type='raw'/>\n <source file='%s'/>" % vl.path()
xmldom = xml.replace("<disk type='file' device='cdrom'>\n <driver name='qemu' type='raw'/>", iso)
conn.defineXML(xmldom)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def umnt_iso_on():
try:
xml = """<disk type='file' device='cdrom'>
<driver name="qemu" type='raw'/>
<target dev='hdc' bus='ide'/>
<readonly/>
</disk>"""
dom.attachDevice(xml)
xmldom = dom.XMLDesc(0)
conn.defineXML(xmldom)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def umnt_iso_off():
try:
xml = dom.XMLDesc(0)
cdrom = get_vm_cdrom()[1]
xmldom = xml.replace("<source file='%s'/>\n" % cdrom,"")
conn.defineXML(xmldom)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def find_all_iso():
try:
iso = []
for storage in storages:
stg = conn.storagePoolLookupByName(storage)
stg.refresh(0)
for img in stg.listVolumes():
if re.findall(".iso", img) or re.findall(".ISO", img):
iso.append(img)
return iso
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_autostart():
try:
return dom.autostart()
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def page_refresh():
try:
return HttpResponseRedirect('/vm/' + host_id + '/' + vname + '/' )
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_vm_state():
try:
return dom.info()[0]
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def vm_cpu_usage():
try:
nbcore = conn.getInfo()[2]
cpu_use_ago = dom.info()[4]
time.sleep(1)
cpu_use_now = dom.info()[4]
diff_usage = cpu_use_now - cpu_use_ago
cpu_usage = 100 * diff_usage / (1 * nbcore * 10**9L)
return cpu_usage
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_memusage():
try:
allmem = conn.getInfo()[1] * 1048576
dom_mem = dom.info()[1] * 1024
percent = (dom_mem * 100) / allmem
return allmem, percent
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_all_core():
try:
allcore = conn.getInfo()[2]
return allcore
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def vm_create_snapshot():
try:
xml = """<domainsnapshot>\n
<name>%d</name>\n
<state>shutoff</state>\n
<creationTime>%d</creationTime>\n""" % (time.time(), time.time())
xml += dom.XMLDesc(0)
xml += """<active>0</active>\n
</domainsnapshot>"""
dom.snapshotCreateXML(xml, 0)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
def get_snapshot_num():
try:
return dom.snapshotNum(0)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
conn = vm_conn()
errors = []
if conn == None:
return HttpResponseRedirect('/overview/' + host + '/')
all_vm = get_vms()
dom = get_dom(vname)
active = get_vm_active()
state = get_vm_state()
uuid = get_vm_uuid()
memory = get_vm_mem()
core = get_vm_core()
autostart = get_vm_autostart()
vnc_port = get_vm_vnc()
hdd = get_vm_hdd()
boot_menu = get_vm_boot_menu()
vm_arch = get_vm_arch()
vm_nic = get_vm_nic()
cdrom = get_vm_cdrom()
storages = get_storages()
isos = find_all_iso()
all_core = get_all_core()
cpu_usage = vm_cpu_usage()
mem_usage = get_memusage()
num_snapshot = get_snapshot_num()
vm_xml = get_vm_xml()
# Post form html
if request.method == 'POST':
if request.POST.get('suspend',''):
try:
dom.suspend()
msg = _('Suspend VM: ')
msg = msg + vname
add_error(msg, 'user')
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
msg = _('Error: VM alredy suspended')
errors.append(msg)
if request.POST.get('resume',''):
try:
dom.resume()
msg = _('Resume VM: ')
msg = msg + vname
add_error(msg, 'user')
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
msg = _('Error: VM alredy resume')
errors.append(msg)
if request.POST.get('start',''):
try:
dom.create()
msg = _('Start VM: ')
msg = msg + vname
add_error(msg, 'user')
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
msg = _('Error: VM alredy start')
errors.append(msg)
if request.POST.get('shutdown',''):
try:
dom.shutdown()
msg = _('Shutdown VM: ')
msg = msg + vname
add_error(msg, 'user')
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
msg = _('Error: VM alredy shutdown')
errors.append(msg)
if request.POST.get('destroy',''):
try:
dom.destroy()
msg = _('Force shutdown VM: ')
msg = msg + vname
add_error(msg, 'user')
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
msg = _('Error: VM alredy shutdown')
errors.append(msg)
if request.POST.get('snapshot',''):
try:
msg = _('Create snapshot for VM: ')
msg = msg + vname
add_error(msg, 'user')
vm_create_snapshot()
message = _('Successful create snapshot')
return render_to_response('vm.html', locals())
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
msg = _('Error: create snapshot')
errors.append(msg)
if request.POST.get('auto_on',''):
try:
msg = _('Enable autostart for VM: ')
msg = msg + vname
add_error(msg, 'user')
dom.setAutostart(1)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
if request.POST.get('auto_off',''):
try:
msg = _('Disable autostart for VM: ')
msg = msg + vname
add_error(msg, 'user')
dom.setAutostart(0)
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
if request.POST.get('disconnect',''):
iso = request.POST.get('iso_img','')
if state == 1:
umnt_iso_on()
else:
umnt_iso_off()
if request.POST.get('connect',''):
iso = request.POST.get('iso_img','')
if state == 1:
mnt_iso_on(iso)
else:
mnt_iso_off(iso)
if request.POST.get('undefine',''):
try:
dom.undefine()
msg = _('Undefine VM: ')
msg = msg + vname
add_error(msg, 'user')
return HttpResponseRedirect('/overview/%s/' % (host_id))
except libvirt.libvirtError as e:
add_error(e, 'libvirt')
return "error"
if not errors:
return HttpResponseRedirect('/vm/%s/%s/' % (host_id, vname))
else:
return render_to_response('vm.html', locals())
conn.close()
return render_to_response('vm.html', locals())
0
Example 79
Project: script.trakt Source File: service.py
def onPlayBackStarted(self):
xbmc.sleep(1000)
self.type = None
self.id = None
# take the user start scrobble offset into account
scrobbleStartOffset = kodiUtilities.getSettingAsInt('scrobble_start_offset')*60
if scrobbleStartOffset > 0:
waitFor = 10
waitedFor = 0
# check each 10 seconds if we can abort or proceed
while not xbmc.abortRequested and scrobbleStartOffset > waitedFor:
waitedFor += waitFor
time.sleep(waitFor)
if not self.isPlayingVideo():
logger.debug('[traktPlayer] Playback stopped before reaching the scrobble offset')
return
# only do anything if we're playing a video
if self.isPlayingVideo():
# get item data from json rpc
logger.debug("[traktPlayer] onPlayBackStarted() - Doing Player.GetItem kodiJsonRequest")
result = kodiUtilities.kodiJsonRequest({'jsonrpc': '2.0', 'method': 'Player.GetItem', 'params': {'playerid': 1}, 'id': 1})
if result:
logger.debug("[traktPlayer] onPlayBackStarted() - %s" % result)
# check for exclusion
_filename = None
try:
_filename = self.getPlayingFile()
except:
logger.debug("[traktPlayer] onPlayBackStarted() - Exception trying to get playing filename, player suddenly stopped.")
return
if kodiUtilities.checkExclusion(_filename):
logger.debug("[traktPlayer] onPlayBackStarted() - '%s' is in exclusion settings, ignoring." % _filename)
return
self.type = result['item']['type']
data = {'action': 'started'}
if (kodiUtilities.getSettingAsBool('scrobble_mythtv_pvr')):
logger.debug('[traktPlayer] Setting is enabled to try scrobbling mythtv pvr recording, if necessary.')
# check type of item
if 'id' not in result['item']:
# do a deeper check to see if we have enough data to perform scrobbles
logger.debug("[traktPlayer] onPlayBackStarted() - Started playing a non-library file, checking available data.")
season = xbmc.getInfoLabel('VideoPlayer.Season')
episode = xbmc.getInfoLabel('VideoPlayer.Episode')
showtitle = xbmc.getInfoLabel('VideoPlayer.TVShowTitle')
year = xbmc.getInfoLabel('VideoPlayer.Year')
video_ids = xbmcgui.Window(10000).getProperty('script.trakt.ids')
if video_ids:
data['video_ids'] = json.loads(video_ids)
logger.debug("[traktPlayer] info - ids: %s, showtitle: %s, Year: %s, Season: %s, Episode: %s" % (video_ids, showtitle, year, season, episode))
if season and episode and (showtitle or video_ids):
# we have season, episode and either a show title or video_ids, can scrobble this as an episode
self.type = 'episode'
data['type'] = 'episode'
data['season'] = int(season)
data['episode'] = int(episode)
data['showtitle'] = showtitle
data['title'] = xbmc.getInfoLabel('VideoPlayer.Title')
if year.isdigit():
data['year'] = int(year)
logger.debug("[traktPlayer] onPlayBackStarted() - Playing a non-library 'episode' - %s - S%02dE%02d - %s." % (data['showtitle'], data['season'], data['episode'], data['title']))
elif (year or video_ids) and not season and not showtitle:
# we have a year or video_id and no season/showtitle info, enough for a movie
self.type = 'movie'
data['type'] = 'movie'
if year.isdigit():
data['year'] = int(year)
data['title'] = xbmc.getInfoLabel('VideoPlayer.Title')
logger.debug("[traktPlayer] onPlayBackStarted() - Playing a non-library 'movie' - %s (%s)." % (data['title'], data.get('year', 'NaN')))
elif showtitle:
title, season, episode = utilities.regex_tvshow(showtitle)
data['type'] = 'episode'
data['season'] = season
data['episode'] = episode
data['title'] = data['showtitle'] = title
logger.debug("[traktPlayer] onPlayBackStarted() - Title: %s, showtitle: %s, season: %d, episode: %d" % (title, showtitle, season, episode))
else:
logger.debug("[traktPlayer] onPlayBackStarted() - Non-library file, not enough data for scrobbling, skipping.")
return
elif self.type == 'episode' or self.type == 'movie':
# get library id
self.id = result['item']['id']
data['id'] = self.id
data['type'] = self.type
if self.type == 'episode':
logger.debug("[traktPlayer] onPlayBackStarted() - Doing multi-part episode check.")
result = kodiUtilities.kodiJsonRequest({'jsonrpc': '2.0', 'method': 'VideoLibrary.GetEpisodeDetails', 'params': {'episodeid': self.id, 'properties': ['tvshowid', 'season', 'episode', 'file']}, 'id': 1})
if result:
logger.debug("[traktPlayer] onPlayBackStarted() - %s" % result)
tvshowid = int(result['episodedetails']['tvshowid'])
season = int(result['episodedetails']['season'])
currentfile = result['episodedetails']['file']
result = kodiUtilities.kodiJsonRequest({'jsonrpc': '2.0', 'method': 'VideoLibrary.GetEpisodes', 'params': {'tvshowid': tvshowid, 'season': season, 'properties': ['episode', 'file'], 'sort': {'method': 'episode'}}, 'id': 1})
if result:
logger.debug("[traktPlayer] onPlayBackStarted() - %s" % result)
# make sure episodes array exists in results
if 'episodes' in result:
multi = []
for i in range(result['limits']['start'], result['limits']['total']):
if currentfile == result['episodes'][i]['file']:
multi.append(result['episodes'][i]['episodeid'])
if len(multi) > 1:
data['multi_episode_data'] = multi
data['multi_episode_count'] = len(multi)
logger.debug("[traktPlayer] onPlayBackStarted() - This episode is part of a multi-part episode.")
else:
logger.debug("[traktPlayer] onPlayBackStarted() - This is a single episode.")
elif (kodiUtilities.getSettingAsBool('scrobble_mythtv_pvr') and self.type == 'unknown' and result['item']['label']):
# If we have label/id but no show type, then this might be a PVR recording.
# DEBUG INFO: This code is useful when trying to figure out what info is available. Many of the fields
# that you'd expect (TVShowTitle, episode, season, etc) are always blank. In Kodi v15, we got the show
# and episode name in the VideoPlayer label. In v16, that's gone, but the Player.Filename infolabel
# is populated with several interesting things. If these things change in future versions, uncommenting
# this code will hopefully provide some useful info in the debug log.
#logger.debug("[traktPlayer] onPlayBackStarted() - TEMP Checking all videoplayer infolabels.")
#for il in ['VideoPlayer.Time','VideoPlayer.TimeRemaining','VideoPlayer.TimeSpeed','VideoPlayer.Duration','VideoPlayer.Title','VideoPlayer.TVShowTitle','VideoPlayer.Season','VideoPlayer.Episode','VideoPlayer.Genre','VideoPlayer.Director','VideoPlayer.Country','VideoPlayer.Year','VideoPlayer.Rating','VideoPlayer.UserRating','VideoPlayer.Votes','VideoPlayer.RatingAndVotes','VideoPlayer.mpaa','VideoPlayer.IMDBNumber','VideoPlayer.EpisodeName','VideoPlayer.PlaylistPosition','VideoPlayer.PlaylistLength','VideoPlayer.Cast','VideoPlayer.CastAndRole','VideoPlayer.Album','VideoPlayer.Artist','VideoPlayer.Studio','VideoPlayer.Writer','VideoPlayer.Tagline','VideoPlayer.PlotOutline','VideoPlayer.Plot','VideoPlayer.LastPlayed','VideoPlayer.PlayCount','VideoPlayer.VideoCodec','VideoPlayer.VideoResolution','VideoPlayer.VideoAspect','VideoPlayer.AudioCodec','VideoPlayer.AudioChannels','VideoPlayer.AudioLanguage','VideoPlayer.SubtitlesLanguage','VideoPlayer.StereoscopicMode','VideoPlayer.EndTime','VideoPlayer.NextTitle','VideoPlayer.NextGenre','VideoPlayer.NextPlot','VideoPlayer.NextPlotOutline','VideoPlayer.NextStartTime','VideoPlayer.NextEndTime','VideoPlayer.NextDuration','VideoPlayer.ChannelName','VideoPlayer.ChannelNumber','VideoPlayer.SubChannelNumber','VideoPlayer.ChannelNumberLabel','VideoPlayer.ChannelGroup','VideoPlayer.ParentalRating','Player.FinishTime','Player.FinishTime(format)','Player.Chapter','Player.ChapterCount','Player.Time','Player.Time(format)','Player.TimeRemaining','Player.TimeRemaining(format)','Player.Duration','Player.Duration(format)','Player.SeekTime','Player.SeekOffset','Player.SeekOffset(format)','Player.SeekStepSize','Player.ProgressCache','Player.Folderpath','Player.Filenameandpath','Player.StartTime','Player.StartTime(format)','Player.Title','Player.Filename']:
# logger.debug("[traktPlayer] TEMP %s : %s" % (il, xbmc.getInfoLabel(il)))
#for k,v in result.iteritems():
# logger.debug("[traktPlayer] onPlayBackStarted() - result - %s : %s" % (k,v))
#for k,v in result['item'].iteritems():
# logger.debug("[traktPlayer] onPlayBackStarted() - result.item - %s : %s" % (k,v))
# As of Kodi v16 with the MythTV PVR addon, the only way I could find to get the TV show and episode
# info is from the Player.Filename infolabel. It shows up like this:
# ShowName [sXXeYY ](year) EpisodeName, channel, PVRFileName
# The season and episode info may or may not be present. For example:
# Elementary s04e10 (2016) Alma Matters, TV (WWMT-HD), 20160129_030000.pvr
# DC's Legends of Tomorrow (2016) Pilot, Part 2, TV (CW W MI), 20160129_010000.pvr
foundLabel = xbmc.getInfoLabel('Player.Filename')
logger.debug("[traktPlayer] onPlayBackStarted() - Found unknown video type with label: %s. Might be a PVR episode, searching Trakt for it." % foundLabel)
splitLabel = foundLabel.rsplit(", ", 2)
logger.debug("[traktPlayer] onPlayBackStarted() - Post-split of label: %s " % splitLabel)
if len(splitLabel) != 3:
logger.debug("[traktPlayer] onPlayBackStarted() - Label doesn't have the ShowName sXXeYY (year) EpisodeName, channel, PVRFileName format that was expected. Giving up.")
return
foundShowAndEpInfo = splitLabel[0]
logger.debug("[traktPlayer] onPlayBackStarted() - show plus episode info: %s" % foundShowAndEpInfo)
splitShowAndEpInfo = re.split(' (s\d\de\d\d)? ?\((\d\d\d\d)\) ',foundShowAndEpInfo, 1)
logger.debug("[traktPlayer] onPlayBackStarted() - Post-split of show plus episode info: %s " % splitShowAndEpInfo)
if len(splitShowAndEpInfo) != 4:
logger.debug("[traktPlayer] onPlayBackStarted() - Show plus episode info doesn't have the ShowName sXXeYY (year) EpisodeName format that was expected. Giving up.")
return
foundShowName = splitShowAndEpInfo[0]
logger.debug("[traktPlayer] onPlayBackStarted() - using show name: %s" % foundShowName)
foundEpisodeName = splitShowAndEpInfo[3]
logger.debug("[traktPlayer] onPlayBackStarted() - using episode name: %s" % foundEpisodeName)
foundEpisodeYear = splitShowAndEpInfo[2]
logger.debug("[traktPlayer] onPlayBackStarted() - using episode year: %s" % foundEpisodeYear)
epYear = None
try:
epYear = int(foundEpisodeYear)
except ValueError:
epYear = None
logger.debug("[traktPlayer] onPlayBackStarted() - verified episode year: %d" % epYear)
# All right, now we have the show name, episode name, and (maybe) episode year. All good, but useless for
# scrobbling since Trakt only understands IDs, not names.
data['video_ids'] = None
data['season'] = None
data['episode'] = None
data['episodeTitle'] = None
# First thing to try, a text query to the Trakt DB looking for this episode. Note
# that we can't search for show and episode together, because the Trakt function gets confused and returns nothing.
newResp = globals.traktapi.getTextQuery(foundEpisodeName, "episode", epYear)
if not newResp:
logger.debug("[traktPlayer] onPlayBackStarted() - Empty Response from getTextQuery, giving up")
else:
logger.debug("[traktPlayer] onPlayBackStarted() - Got Response from getTextQuery: %s" % str(newResp))
# We got something back. See if one of the returned values is for the show we're looking for. Often it's
# not, but since there's no way to tell the search which show we want, this is all we can do.
rightResp = None
for thisResp in newResp:
compareShowName = thisResp.show.title
logger.debug("[traktPlayer] onPlayBackStarted() - comparing show name: %s" % compareShowName)
if thisResp.show.title == foundShowName:
logger.debug("[traktPlayer] onPlayBackStarted() - found the right show, using this response")
rightResp = thisResp
break
if rightResp is None:
logger.debug("[traktPlayer] onPlayBackStarted() - Failed to find matching episode/show via text search.")
else:
# OK, now we have a episode object to work with.
self.type = 'episode'
data['type'] = 'episode'
# You'd think we could just use the episode key that Trakt just returned to us, but the scrobbler
# function (see scrobber.py) only understands the show key plus season/episode values.
showKeys = { }
for eachKey in rightResp.show.keys:
showKeys[eachKey[0]] = eachKey[1]
data['video_ids'] = showKeys
# For some reason, the Trakt search call returns the season and episode as an array in the pk field.
# You'd think individual episode and season fields would be better, but whatever.
data['season'] = rightResp.pk[0];
data['episode'] = rightResp.pk[1];
# At this point if we haven't found the episode data yet, the episode-title-text-search method
# didn't work.
if (not data['season']):
# This text query API is basically the same as searching on the website. Works with alternative
# titles, unlike the scrobble function. Though we can't use the episode year since that would only
# match the show if we're dealing with season 1.
logger.debug("[traktPlayer] onPlayBackStarted() - Searching for show title via getTextQuery: %s" % foundShowName)
newResp = globals.traktapi.getTextQuery(foundShowName, "show", None)
if not newResp:
logger.debug("[traktPlayer] onPlayBackStarted() - Empty Show Response from getTextQuery, falling back on episode text query")
else:
logger.debug("[traktPlayer] onPlayBackStarted() - Got Show Response from getTextQuery: %s" % str(newResp))
# We got something back. Have to assume the first show found is the right one; if there's more than
# one, there's no way to know which to use. Pull the ids from the show data, and store 'em for scrobbling.
showKeys = { }
for eachKey in newResp[0].keys:
showKeys[eachKey[0]] = eachKey[1]
data['video_ids'] = showKeys
# Now to find the episode. There's no search function to look for an episode within a show, but
# we can get all the episodes and look for the title.
while (not data['season']):
logger.debug("[traktPlayer] onPlayBackStarted() - Querying for all seasons/episodes of this show")
epQueryResp = globals.traktapi.getShowWithAllEpisodesList(data['video_ids']['trakt'])
if not epQueryResp:
# Nothing returned. Giving up.
logger.debug("[traktPlayer] onPlayBackStarted() - No response received")
break;
else:
# Got the list back. Go through each season.
logger.debug("[traktPlayer] onPlayBackStarted() - Got response with seasons: %s" % str(epQueryResp))
for eachSeason in epQueryResp:
# For each season, check each episode.
logger.debug("[traktPlayer] onPlayBackStarted() - Processing season: %s" % str(eachSeason))
for eachEpisodeNumber in eachSeason.episodes:
thisEpTitle = None
# Get the title. The try block is here in case the title doesn't exist for some entries.
try:
thisEpTitle = eachSeason.episodes[eachEpisodeNumber].title
except:
thisEpTitle = None
logger.debug("[traktPlayer] onPlayBackStarted() - Checking episode number %d with title %s" % (eachEpisodeNumber, thisEpTitle))
if (foundEpisodeName == thisEpTitle):
# Found it! Save the data. The scrobbler wants season and episode number. Which for some
# reason is stored as a pair in the first item in the keys array.
data['season'] = eachSeason.episodes[eachEpisodeNumber].keys[0][0]
data['episode'] = eachSeason.episodes[eachEpisodeNumber].keys[0][1]
# Title too, just for the heck of it. Though it's not actually used.
data['episodeTitle'] = thisEpTitle
break
# If we already found our data, no need to go through the rest of the seasons.
if (data['season']):
break;
# Now we've done all we can.
if (data['season']):
# OK, that's everything. Data should be all set for scrobbling.
logger.debug("[traktPlayer] onPlayBackStarted() - Playing a non-library 'episode' : show trakt key %s, season: %d, episode: %d" % (data['video_ids'], data['season'], data['episode']))
else:
# Still no data? Too bad, have to give up.
logger.debug("[traktPlayer] onPlayBackStarted() - Did our best, but couldn't get info for this show and episode. Skipping.")
return;
else:
logger.debug("[traktPlayer] onPlayBackStarted() - Video type '%s' unrecognized, skipping." % self.type)
return
pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
plSize = len(pl)
if plSize > 1:
pos = pl.getposition()
if not self.plIndex is None:
logger.debug("[traktPlayer] onPlayBackStarted() - User manually skipped to next (or previous) video, forcing playback ended event.")
self.onPlayBackEnded()
self.plIndex = pos
logger.debug("[traktPlayer] onPlayBackStarted() - Playlist contains %d item(s), and is currently on item %d" % (plSize, (pos + 1)))
self._playing = True
# send dispatch
self.action(data)
0
Example 80
Project: tp-qemu Source File: openflow_test.py
@error.context_aware
def run(test, params, env):
"""
Test Step:
1. Boot up two virtual machine
2. Set openflow rules
3. Run ping test, nc(tcp, udp) test, check whether openflow rules take
effect.
Params:
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def run_tcpdump_bg(session, addresses, dump_protocol):
"""
Run tcpdump in background, tcpdump will exit once catch a packet
match the rules.
"""
tcpdump_cmd = "killall -9 tcpdump; "
tcpdump_cmd += "tcpdump -iany -n -v %s and 'src %s and dst %s' -c 1 &"
session.cmd_output_safe(tcpdump_cmd % (dump_protocol,
addresses[0], addresses[1]))
if not utils_misc.wait_for(lambda: tcpdump_is_alive(session),
30, 0, 1, "Waiting tcpdump start..."):
raise error.TestNAError("Error, can not run tcpdump")
def dump_catch_data(session, dump_log, catch_reg):
"""
Search data from dump_log
"""
dump_info = session.cmd_output("cat %s" % dump_log)
if re.findall(catch_reg, dump_info, re.I):
return True
return False
def tcpdump_is_alive(session):
"""
Check whether tcpdump is alive
"""
if session.cmd_status("pidof tcpdump"):
return False
return True
def tcpdump_catch_packet_test(session, drop_flow=False):
"""
Check whether tcpdump catch match rules packets, once catch a packet
match rules tcpdump will exit.
when drop_flow is 'True', tcpdump couldn't catch any packets.
"""
packet_receive = not tcpdump_is_alive(session)
if packet_receive == drop_flow:
err_msg = "Error, flow %s" % (drop_flow and "was" or "wasn't")
err_msg += " dropped, tcpdump "
err_msg += "%s " % (packet_receive and "can" or "can not")
err_msg += "receive the packets"
raise error.TestError(err_msg)
logging.info("Correct, flow %s dropped, tcpdump %s receive the packet"
% ((drop_flow and "was" or "was not"),
(packet_receive and "can" or "can not")))
def arp_entry_clean(entry=None):
"""
Clean arp catch in guest
"""
if not entry:
arp_clean_cmd = "arp -n | awk '/^[1-2]/{print \"arp -d \" $1}'|sh"
else:
arp_clean_cmd = "arp -d %s" % entry
for session in sessions:
session.cmd_output_safe(arp_clean_cmd)
def check_arp_info(session, entry, vm, match_mac=None):
arp_info = session.cmd_output("arp -n")
arp_entries = [_ for _ in arp_info.splitlines() if re.match(entry, _)]
match_string = match_mac or "incomplete"
if not arp_entries:
raise error.TestError("Can not find arp entry "
"in %s: %s" % (vm.name, arp_info))
if not re.findall(match_string, arp_entries[0], re.I):
raise error.TestFail("Can not find the mac address"
" %s of %s in arp"
" entry %s" % (mac, vm.name, arp_entries[0]))
def ping_test(session, dst, drop_flow=False):
"""
Ping test, check icmp
"""
ping_status, ping_output = utils_test.ping(dest=dst, count=10,
timeout=20, session=session)
# when drop_flow is true, ping should failed(return not zero)
# drop_flow is false, ping should success
packets_lost = 100
if ping_status and not drop_flow:
raise error.TestError("Ping should success when not drop_icmp")
elif not ping_status:
packets_lost = utils_test.get_loss_ratio(ping_output)
if drop_flow and packets_lost != 100:
raise error.TestError("When drop_icmp, ping shouldn't works")
if not drop_flow and packets_lost == 100:
raise error.TestError("When not drop_icmp, ping should works")
info_msg = "Correct, icmp flow %s dropped, ping '%s', "
info_msg += "packets lost rate is: '%s'"
logging.info(info_msg % ((drop_flow and "was" or "was not"),
(ping_status and "failed" or "success"),
packets_lost))
def run_ping_bg(vm, dst):
"""
Run ping in background
"""
ping_cmd = "ping %s" % dst
session = vm.wait_for_login()
logging.info("Ping %s in background" % dst)
session.sendline(ping_cmd)
return session
def check_bg_ping(session):
ping_pattern = r"\d+ bytes from \d+.\d+.\d+.\d+:"
ping_pattern += r" icmp_seq=\d+ ttl=\d+ time=.*? ms"
ping_failed_pattern = r"From .*? icmp_seq=\d+ Destination"
ping_failed_pattern += r" Host Unreachable"
try:
out = session.read_until_output_matches([ping_pattern,
ping_failed_pattern])
if re.search(ping_failed_pattern, out[1]):
return False, out[1]
else:
return True, out[1]
except Exception, msg:
return False, msg
def file_transfer(sessions, addresses, timeout):
prepare_cmd = "dd if=/dev/zero of=/tmp/copy_file count=1024 bs=1M"
md5_cmd = "md5sum /tmp/copy_file"
port = params.get("shell_port")
prompt = params.get("shell_prompt")
username = params.get("username")
password = params.get("password")
sessions[0].cmd(prepare_cmd, timeout=timeout)
ori_md5 = sessions[0].cmd_output(md5_cmd)
scp_cmd = ("scp -v -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -r "
"-P %s /tmp/copy_file %s@\[%s\]:/tmp/copy_file" %
(port, username, addresses[1]))
sessions[0].sendline(scp_cmd)
remote.handle_prompts(sessions[0], username, password, prompt, 600)
new_md5 = sessions[1].cmd_output(md5_cmd)
for session in sessions:
session.cmd("rm -f /tmp/copy_file")
if new_md5 != ori_md5:
raise error.TestFail("Md5 value changed after file transfer, "
"original is %s and the new file"
" is: %s" % (ori_md5, new_md5))
def nc_connect_test(sessions, addresses, drop_flow=False, nc_port="8899",
udp_model=False):
"""
Nc connect test, check tcp and udp
"""
nc_log = "/tmp/nc_log"
server_cmd = "nc -l %s"
client_cmd = "echo client | nc %s %s"
if udp_model:
server_cmd += " -u -w 3"
client_cmd += " -u -w 3"
server_cmd += " > %s &"
client_cmd += " &"
try:
sessions[1].cmd_output_safe(server_cmd % (nc_port, nc_log))
sessions[0].cmd_output_safe(client_cmd % (addresses[1], nc_port))
nc_protocol = udp_model and "UDP" or "TCP"
nc_connect = False
if utils_misc.wait_for(
lambda: dump_catch_data(sessions[1], nc_log, "client"),
10, 0, 2, text="Wait '%s' connect" % nc_protocol):
nc_connect = True
if nc_connect == drop_flow:
err_msg = "Error, '%s' " % nc_protocol
err_msg += "flow %s " % (drop_flow and "was" or "was not")
err_msg += "dropped, nc connect should"
err_msg += " '%s'" % (nc_connect and "failed" or "success")
raise error.TestError(err_msg)
logging.info("Correct, '%s' flow %s dropped, and nc connect %s" %
(nc_protocol, (drop_flow and "was" or "was not"),
(nc_connect and "success" or "failed")))
finally:
for session in sessions:
session.cmd_output_safe("killall nc || killall ncat")
session.cmd("%s %s" % (clean_cmd, nc_log),
ignore_all_errors=True)
def acl_rules_check(acl_rules, flow_options):
flow_options = re.sub("action=", "actions=", flow_options)
if "arp" in flow_options:
flow_options = re.sub("nw_src=", "arp_spa=", flow_options)
flow_options = re.sub("nw_dst=", "arp_tpa=", flow_options)
acl_options = re.split(",", flow_options)
for line in acl_rules.splitlines():
rule = [_.lower() for _ in re.split("[ ,]", line) if _]
item_in_rule = 0
for acl_item in acl_options:
if acl_item.lower() in rule:
item_in_rule += 1
if item_in_rule == len(acl_options):
return True
return False
def remove_plus_items(open_flow_rules):
plus_items = ["duration", "n_packets", "n_bytes", "idle_age", "hard_age"]
for plus_item in plus_items:
open_flow_rules = re.sub("%s=.*?," % plus_item, "",
open_flow_rules)
return open_flow_rules
timeout = int(params.get("login_timeout", '360'))
prepare_timeout = int(params.get("prepare_timeout", '360'))
clean_cmd = params.get("clean_cmd", "rm -f")
sessions = []
addresses = []
vms = []
bg_ping_session = None
error.context("Init boot the vms")
for vm_name in params.get("vms", "vm1 vm2").split():
vms.append(env.get_vm(vm_name))
for vm in vms:
vm.verify_alive()
sessions.append(vm.wait_for_login(timeout=timeout))
addresses.append(vm.get_address())
# set openflow rules:
br_name = params.get("netdst", "ovs0")
f_protocol = params.get("flow", "arp")
f_base_options = "%s,nw_src=%s,nw_dst=%s" % (f_protocol, addresses[0],
addresses[1])
for session in sessions:
session.cmd("service iptables stop; iptables -F",
ignore_all_errors=True)
try:
for drop_flow in [True, False]:
if drop_flow:
f_command = "add-flow"
f_options = f_base_options + ",action=drop"
drop_icmp = eval(params.get("drop_icmp", 'True'))
drop_tcp = eval(params.get("drop_tcp", 'True'))
drop_udp = eval(params.get("drop_udp", 'True'))
else:
f_command = "mod-flows"
f_options = f_base_options + ",action=normal"
drop_icmp = False
drop_tcp = False
drop_udp = False
error.base_context("Test prepare")
error.context("Do %s %s on %s" % (f_command, f_options, br_name))
utils_net.openflow_manager(br_name, f_command, f_options)
acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
if not acl_rules_check(acl_rules, f_options):
raise error.TestFail("Can not find the rules from"
" ovs-ofctl: %s" % acl_rules)
error.context("Run tcpdump in guest %s" % vms[1].name, logging.info)
run_tcpdump_bg(sessions[1], addresses, f_protocol)
if drop_flow or f_protocol is not "arp":
error.context("Clean arp cache in both guest", logging.info)
arp_entry_clean(addresses[1])
error.base_context("Exec '%s' flow '%s' test" %
(f_protocol, drop_flow and "drop" or "normal"))
if drop_flow:
error.context("Ping test form vm1 to vm2", logging.info)
ping_test(sessions[0], addresses[1], drop_icmp)
if params.get("run_file_transfer") == "yes":
error.context("Transfer file form vm1 to vm2", logging.info)
file_transfer(sessions, addresses, prepare_timeout)
else:
error.context("Ping test form vm1 to vm2 in background",
logging.info)
bg_ping_session = run_ping_bg(vms[0], addresses[1])
if f_protocol == 'arp' and drop_flow:
error.context("Check arp inside %s" % vms[0].name, logging.info)
check_arp_info(sessions[0], addresses[1], vms[0])
elif f_protocol == 'arp' or params.get("check_arp") == "yes":
time.sleep(2)
error.context("Check arp inside guests.", logging.info)
for index, address in enumerate(addresses):
sess_index = (index + 1) % 2
mac = vms[index].virtnet.get_mac_address(0)
check_arp_info(sessions[sess_index], address, vms[index],
mac)
error.context("Run nc connect test via tcp", logging.info)
nc_connect_test(sessions, addresses, drop_tcp)
error.context("Run nc connect test via udp", logging.info)
nc_connect_test(sessions, addresses, drop_udp, udp_model=True)
error.context("Check tcpdump data catch", logging.info)
tcpdump_catch_packet_test(sessions[1], drop_flow)
finally:
openflow_rules_ori = utils_net.openflow_manager(br_name,
"dump-flows").stdout
openflow_rules_ori = remove_plus_items(openflow_rules_ori)
utils_net.openflow_manager(br_name, "del-flows", f_protocol)
openflow_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
openflow_rules = remove_plus_items(openflow_rules)
removed_rule = list(set(openflow_rules_ori.splitlines()) -
set(openflow_rules.splitlines()))
if f_protocol == "tcp":
error.context("Run nc connect test via tcp", logging.info)
nc_connect_test(sessions, addresses)
elif f_protocol == "udp":
error.context("Run nc connect test via udp", logging.info)
nc_connect_test(sessions, addresses, udp_model=True)
for session in sessions:
session.close()
failed_msg = []
if (not removed_rule or
not acl_rules_check(removed_rule[0], f_options)):
failed_msg.append("Failed to delete %s" % f_options)
if bg_ping_session:
bg_ping_ok = check_bg_ping(bg_ping_session)
bg_ping_session.close()
if not bg_ping_ok[0]:
failed_msg.append("There is something wrong happen in "
"background ping: %s" % bg_ping_ok[1])
if failed_msg:
raise error.TestFail(failed_msg)
0
Example 81
Project: LTLMoP Source File: BugControllerHandler.py
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
q_gBundle = [[],[]] # goal bundle
q_overlap = [[],[]] # overlapping points with robot range
pose = self.pose_handler.getPose() # Find our current configuration
#Plot the robot on the map in figure 1
if self.PLOT == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return False
# Check if Vicon has cut out
if math.isnan(pose[2]):
print "no vicon pose"
print "WARNING: No Vicon data! Pausing."
#self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part is run when the robot goes to a new region, otherwise, the original map will be used.
if not self.previous_current_reg == current_reg:
#print 'getting into bug alogorithm'
#clean up the previous self.map_work
self.map_work = Polygon.Polygon()
# NOTE: Information about region geometry can be found in self.proj.rfi.regions
# create polygon list for regions other than the current_reg and the next_reg
self.map_work += self.map[self.proj.rfi.regions[current_reg].name]
self.map_work += self.map[self.proj.rfi.regions[next_reg].name]
# building current polygon and destination polygon
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
#set to zero velocity before finding the tranFace
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
print "Current reg is " + str(self.proj.rfi.regions[current_reg].name.lower())
print "Next reg is "+ str(self.proj.rfi.regions[next_reg].name.lower())
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
q_gBundle = q_gBundle.transpose()
# Find the closest face to the current position
max_magsq = 1000000
for tf in q_gBundle:
magsq = (tf[0] - pose[0])**2 + (tf[1] - pose[1])**2
if magsq < max_magsq:
connection = 0
tf = tf+(tf-asarray(self.currentRegionPoly.center()))/norm(tf-asarray(self.currentRegionPoly.center()))*2.1*self.PioneerLengthHalf
if not self.nextRegionPoly.covers(PolyShapes.Circle(self.PioneerLengthHalf*2,(tf[0],tf[1]))):
tf = tf-(tf-asarray(self.currentRegionPoly.center()))/norm(tf-asarray(self.currentRegionPoly.center()))*4.2*self.PioneerLengthHalf
if self.nextRegionPoly.covers(PolyShapes.Circle(self.PioneerLengthHalf*2,(tf[0],tf[1]))):
connection = 1
else:
connection = 1
if connection == 1:
pt1 = tf
max_magsq = magsq
transFace = 1
self.q_g[0] = pt1[0]
self.q_g[1] = pt1[1]
else:
sample = False
while not sample:
self.q_g[0],self.q_g[1] = self.nextRegionPoly.sample(random.random)
robo = PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
if not bool(robo - self.nextRegionPoly):
sample = True
"""
# Push the goal point to somewhere inside the next region to ensure the robot will get there.(CHECK!!)
self.q_g = self.q_g+(self.q_g-asarray(self.currentRegionPoly.center()))/norm(self.q_g-asarray(self.currentRegionPoly.center()))*3*self.PioneerLengthHalf
if not self.nextRegionPoly.isInside(self.q_g[0],self.q_g[1]):
self.q_g = self.q_g-(self.q_g-asarray(self.currentRegionPoly.center()))/norm(self.q_g-asarray(self.currentRegionPoly.center()))*6*self.PioneerLengthHalf
"""
#plot exiting point
if self.PLOT_EXIT == True:
plt.figure(self.overlap_figure)
plt.clf()
plt.plot(q_gBundle[:,0],q_gBundle[:,1],'ko' )
plt.plot(self.q_g[0],self.q_g[1],'ro')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
##################################################
#######check whether obstacle is detected#########
##################################################
#Update pose,update self.robot, self.realRobot orientation
self.robot.shift(pose[0]-self.prev_pose[0],pose[1]-self.prev_pose[1])
self.realRobot.shift(pose[0]-self.prev_pose[0],pose[1]-self.prev_pose[1])
self.robot.rotate(pose[2]-self.prev_pose[2],pose[0],pose[1])
self.realRobot.rotate(pose[2]-self.prev_pose[2],pose[0],pose[1])
self.prev_pose = pose
############################
########### STEP 1##########
############################
##Check whether obsRange overlaps with obstacle or the boundary (overlap returns the part of robot not covered by the region
# for real Pioneer robot
if self.system == 1:
# motion controller is not in boundary following mode
if self.boundary_following == False:
if self.robocomm.getReceiveObs() == False:
overlap = self.robot - ( self.map_work)
else:
overlap = self.robot - ( self.map_work - self.robocomm.getObsPoly())
else: #TRUE
# use a robot with full range all around it
Robot = PolyShapes.Circle(self.obsRange,(pose[0],pose[1]))
Robot.shift(self.shift*cos(pose[2]),self.shift*sin(pose[2]))
if self.robocomm.getReceiveObs() == False:
overlap = Robot - ( self.map_work)
else:
overlap = Robot - ( self.map_work - self.robocomm.getObsPoly())
# for ODE
else:
if self.boundary_following == False:
overlap = self.robot - (self.map_work)
else:#TRUE
overlap = self.robot - (self.map_work)
if self.boundary_following == False:
if bool(overlap): ## overlap of obstacles
#print "There MAYBE overlap~~ check connection to goal"
# check whether the real robot or and path to goal overlap with the obstacle
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
path = PolyUtils.convexHull(self.realRobot + QGoalPoly)
if self.system == 1:
if self.robocomm.getReceiveObs() == False:
pathOverlap = path - ( self.map_work)
else:
pathOverlap = path - ( self.map_work - self.robocomm.getObsPoly())
else:
pathOverlap = path - ( self.map_work)
if bool(pathOverlap): # there is overlapping, go into bounding following mode
#print "There IS overlap"
self.q_hit = mat([pose[0],pose[1]]).T
self.boundary_following = True
#Generate m-line polygon
QHitPoly = PolyShapes.Circle(self.PioneerLengthHalf/4,(pose[0],pose[1]))
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf/4,(self.q_g[0],self.q_g[1]))
self.m_line = PolyUtils.convexHull(QHitPoly + QGoalPoly)
#plot the first overlap
if self.PLOT_M_LINE == True:
plt.figure(self.overlap_figure)
plt.clf()
self.plotPoly(QHitPoly,'k')
self.plotPoly(QGoalPoly,'k')
self.plotPoly(overlap,'g')
self.plotPoly(self.m_line,'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
else: ##head towards the q_goal
if self.system == 1:
if self.robocomm.getReceiveObs() == False: # wait for obstacles from Pioneer
vx = 0
vy = 0
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
#print "no obstacles 2-ODE true"
else: ##head towards the q_goal
if self.system == 1:
if self.robocomm.getReceiveObs() == False: # wait for obstacles from Pioneer
vx = 0
vy = 0
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
#print "no obstacles 1-ODE true"
if self.boundary_following == True:
self.q_hit_count += 1
# finding the point to go normal to (closest overlapping point)
j = 0
recheck = 0
while not bool(overlap):
# cannot see the obstacle. Based on the location of the previous point blow up the range of the robot on the left or on the right
j += 1
# finding whether the previous obstacle point is on the left side or the right side of the robot
# angle = angle of the previous point from the x-axis of the field
# omega = angle of the current Pioneer orientation from the x-axis of the field
# cc = differnece between angle and omega ( < pi = previous point on the left of robot, else on the right of robot)
x = self.prev_follow[0] -pose[0]
y = self.prev_follow[1] -pose[1]
angle = atan(y/x)
# convert angle to 2pi
if x > 0 and y > 0:
angle = angle
elif x < 0 and y > 0:
angle = pi + angle
elif x <0 and y < 0:
angle = pi + angle
else:
angle = 2*pi + angle
# convert pose to 2pi
if pose[2] < 0:
omega = (2*pi + pose[2])
else:
omega = pose[2]
if omega > angle:
cc = 2*pi - (omega - angle)
else:
cc = angle - omega
# on the left
#if angle - omega > 0 and angle - omega < pi:
if cc < pi:
#print "on the left, angle: "+ str(angle) + " omega: "+ str(omega)+ " angle-omega: "+ str(angle-omega)
Robot = PolyShapes.Rectangle(self.range*2*j,self.range*2*j)
Robot.shift(pose[0]-self.range*j*2,pose[1]-self.range*j)
Robot.rotate(pose[2]-pi/2,pose[0],pose[1])
# on the right
else:
#print "on the right, angle: "+ str(angle) + " omega: "+ str(omega)+ " angle-omega: "+ str(angle-omega)
Robot = PolyShapes.Rectangle(self.range*2*j,self.range*2*j)
Robot.shift(pose[0],pose[1]-self.range*j)
Robot.rotate(pose[2]-pi/2,pose[0],pose[1])
if self.system == 1:
overlap = Robot - ( self.map_work - self.robocomm.getObsPoly())
else:
overlap = Robot - ( self.map_work)
#self.plotPoly(Robot, 'm',2)
#determines as dynamic obstacles and can be go striaight to the goal point
if j >= 2:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
overlap = None
self.overlap = overlap
self.q_hit_count = 0
self.boundary_following = False
self.m_line = None
self.drive_handler.setVelocity(vx,vy, pose[2])
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ###0.05
departed = not self.currentRegionPoly.overlaps(self.realRobot)
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
arrived = self.nextRegionPoly.covers(self.realRobot)
return arrived
##extra box plotting in figure 1#
if self.PLOT_OVERLAP == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
self.plotPoly(overlap,'g',3)
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
# find the closest point on the obstacle to the robot
overlap_len = len(overlap)
for j in range(overlap_len):
BoundPolyPoints = asarray(overlap[j])
for i in range(len(BoundPolyPoints)-1):
bundle_x = (BoundPolyPoints[i,0] +BoundPolyPoints[1+i,0])/2 #mid-point coordinate x
bundle_y = (BoundPolyPoints[i,1] +BoundPolyPoints[1+i,1])/2 #mid-point coordinate y
q_overlap = hstack((q_overlap,vstack((bundle_x,bundle_y))))
bundle_x = (BoundPolyPoints[len(BoundPolyPoints)-1,0] +BoundPolyPoints[0,0])/2 #mid-point coordinate x
bundle_y = (BoundPolyPoints[len(BoundPolyPoints)-1,1] +BoundPolyPoints[0,1])/2 #mid-point coordinate y
q_overlap = hstack((q_overlap,vstack((bundle_x,bundle_y))))
q_overlap = q_overlap.transpose()
pt = self.closest_pt([pose[0],pose[1]], vstack((q_overlap,asarray(PolyUtils.pointList(overlap)))))
self.prev_follow = pt
#calculate the vector to follow the obstacle
normal = mat([pose[0],pose[1]] - pt)
#find the distance from the closest point
distance = norm(normal)
velocity = normal * self.trans_matrix
vx = (velocity/norm(velocity)/3)[0,0]
vy = (velocity/norm(velocity)/3)[0,1]
# push or pull the robot towards the obstacle depending on whether the robot is close or far from the obstacle.
turn = pi/4*(distance-0.5*self.obsRange)/(self.obsRange) ### change to 0.6 from 0.5 for more allowance in following
corr_matrix = mat([[cos(turn),-sin(turn)],[sin(turn),cos(turn)]])
v = corr_matrix*mat([[vx],[vy]])
vx = v[0,0]
vy = v[1,0]
##plotting overlap on figure 2
if self.PLOT_OVERLAP == True:
plt.figure(self.overlap_figure)
plt.clf()
self.plotPoly(self.m_line,'b');
self.plotPoly(overlap,'r');
plt.plot(pt[0],pt[1],'ro')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
## conditions that the loop will end
#for 11111
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ####0.05
departed = not self.currentRegionPoly.overlaps(self.realRobot)
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
arrived = self.nextRegionPoly.covers(self.realRobot)
#for 33333
reachMLine= self.m_line.overlaps(RobotPoly)
# 1.reached the next region
if arrived:
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
print "arriving at the next region. Exit boundary following mode"
vx = 0
vy = 0
"""
# 2.q_hit is reencountered
elif norm(self.q_hit-mat([pose[0],pose[1]]).T) < 0.05 and self.q_hit_count > self.q_hit_Thres:
print "reencounter q_hit. cannot reach q_goal"
vx = 0
vy = 0
"""
# 3.m-line reencountered
elif reachMLine:
#print >>sys.__stdout__, "m-line overlaps RoboPoly, m-line" + str(norm(self.q_g-self.q_hit)-2*self.obsRange) + " distance: " + str(norm(self.q_g-mat([pose[0],pose[1]]).T))
if norm(self.q_g-mat([pose[0],pose[1]]).T) < norm(self.q_g-self.q_hit)-2*self.obsRange:
#print "m-line overlaps RoboPoly, m-line" + str(norm(self.q_g-self.q_hit)-2*self.obsRange) + " distance: " + str(norm(self.q_g-mat([pose[0],pose[1]]).T))
#print "leaving boundary following mode"
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
leaving = False
# turn the robot till it is facing the goal
while not leaving:
x = self.q_g[0] -self.pose_handler.getPose()[0]
y = self.q_g[1] -self.pose_handler.getPose()[1]
angle = atan(y/x)
if x > 0 and y > 0:
angle = angle
elif x < 0 and y > 0:
angle = pi + angle
elif x <0 and y < 0:
angle = pi + angle
else:
angle = 2*pi + angle
if self.pose_handler.getPose()[2] < 0:
omega = (2*pi + self.pose_handler.getPose()[2])
#print >>sys.__stdout__,"omega<0: "+ str(omega)
else:
omega = self.pose_handler.getPose()[2]
#print >>sys.__stdout__,"omega: "+ str(omega)
if omega > angle:
cc = 2*pi - (omega - angle)
else:
cc = angle - omega
# angle(goal point orientation) on the left of omega(robot orientation)
#if angle - omega > 0 and angle - omega < pi:
if cc < pi:
#print>>sys.__stdout__, "turn left"
vx,vy = self.turnLeft(cc)
# on the right
else:
#print>>sys.__stdout__, "turn right"
vx, vy = self.turnRight(2*pi-cc)
#print>>sys.__stdout__, "omega: "+ str(omega) + " angle: "+ str(angle) + " (omega-angle): " + str(omega-angle)
self.drive_handler.setVelocity(vx,vy, self.pose_handler.getPose()[2])
if omega - angle < pi/6 and omega - angle > -pi/6:
leaving = True
#Check whether the robot can leave now (the robot has to be closer to the goal than when it is at q_hit to leave)
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
path = PolyUtils.convexHull(self.realRobot + QGoalPoly)
if self.system == 1:
if self.robocomm.getReceiveObs() == False:
pathOverlap = path - ( self.map_work)
else:
pathOverlap = path - ( self.map_work - self.robocomm.getObsPoly())
else:
pathOverlap = path - ( self.map_work)
if not bool(pathOverlap):
#print "There is NO MORE obstacles in front for now."
# check if the robot is closer to the goal compared with q_hit
if norm(self.q_hit-mat(self.q_g).T) > norm(mat([pose[0],pose[1]]).T-mat(self.q_g).T) :
#print "The robot is closer than the leaving point. The robot can leave"
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
lala = 1
#print "not leaving bug algorithm. difference(-farther) =" + str(norm(self.q_hit-mat(self.q_g).T) - norm(mat([pose[0],pose[1]]).T-mat(self.q_g).T))
"""
# Pass this desired velocity on to the drive handler
# Check if there are obstacles within 0.35m of the robot, if so, stop the robot
if self.system == 1:
if self.robocomm.getSTOP() == True:
vx = 0
vy = 0
"""
#vx = 0
#vy = 0
self.overlap = overlap
self.drive_handler.setVelocity(vx,vy, pose[2])
# Set the current region as the previous current region(for checking whether the robot has arrived at the next region)
self.previous_current_reg = current_reg
# check whether robot has arrived at the next region
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ###0.05
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
departed = not self.currentRegionPoly.overlaps(self.realRobot)
arrived = self.nextRegionPoly.covers(self.realRobot)
if arrived:
self.q_hit_count = 0
self.boundary_following = False
self.m_line = None
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
print "WARNING: Left current region but not in expected destination region"
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
#print "I think I'm in " + r.name
#print pose
break
self.last_warning = time.time()
return arrived
0
Example 82
Project: AZOrange Source File: competitiveWorkflow.py
def getStatistics(dataset, runningDir, resultsFile, mlList=[ml for ml in MLMETHODS if AZOC.MLMETHODS[ml]["useByDefault"]], queueType = "NoSGE", verbose = 0, getAllModels = False, callBack = None):
"""
runningDir (An existing dir for creating one job dir per fold)
|
+---- status (The overall status: "started", "finished" or the progress "1/10", "2/10", ...)
|
+---- fold_1
|
+---- fold_2
|
.
.
.
The running will be monitorized by this method.
Whenever a MLMethod fails the respective fold job is restarted
"""
if dataset.domain.classVar.varType == orange.VarTypes.Discrete:
responseType = "Classification"
else:
responseType = "Regression"
#Create the Train and test sets
DataIdxs = dataUtilities.SeedDataSampler(dataset, AZOC.QSARNEXTFOLDS )
#Check data in advance so that, by chance, it will not faill at the last fold!
#for foldN in range(AZOC.QSARNEXTFOLDS):
#trainData = dataset.select(DataIdxs,foldN,negate=1)
#checkTrainData(trainData)
jobs = {}
thisDir = os.getcwd()
os.chdir(runningDir)
#PID = os.getpid()
#print "Started getStatistics in Process with PID: "+str(PID)
#os.system('echo "'+str(PID)+'" > '+os.path.join(runningDir,"PID"))
os.system('echo "started" > '+os.path.join(runningDir,"status"))
# Start all Fold jobs
stepsDone = 0
nTotalSteps = AZOC.QSARNEXTFOLDS
for fold in range(AZOC.QSARNEXTFOLDS):
job = str(fold)
print "Starting job for fold ",job
trainData = dataset.select(DataIdxs,fold,negate=1)
jobs[job] = {"job":job,"path":os.path.join(runningDir, "fold_"+job), "running":False, "failed":False, "finished":False}
# Uncomment next 3 lines for running in finished jobs dirs
#st, jID = commands.getstatusoutput("cat "+os.path.join(runningDir, "fold_"+job,"jID"))
#jobs[job]["jID"] = jID
#continue
os.system("rm -rf "+jobs[job]["path"])
os.system("mkdir -p "+jobs[job]["path"])
trainData.save(os.path.join(jobs[job]["path"],"trainData.tab"))
file_h = open(os.path.join(jobs[job]["path"],"run.sh"),"w")
file_h.write("#!/bin/tcsh\n")
file_h.write("source "+os.path.join(os.environ["AZORANGEHOME"], "templateProfile") + "\n")
file_h.write("python "+os.path.join(jobs[job]["path"],"QsubScript.py")+"\n")
file_h.close()
file_h = open(os.path.join(jobs[job]["path"],"QsubScript.py"),"w")
file_h.write("import os\n")
file_h.write("from AZutilities import dataUtilities\n")
file_h.write("from AZutilities import competitiveWorkflow\n")
file_h.write("data = dataUtilities.DataTable('"+os.path.join(jobs[job]["path"],"trainData.tab")+"')\n")
file_h.write('os.system(\'echo "running" > '+os.path.join(jobs[job]["path"],"status")+' \')\n')
file_h.write("models = competitiveWorkflow.getModel(data, mlList="+str(mlList)+", savePath = '"+os.path.join(jobs[job]["path"],"results.pkl")+"', queueType = '"+queueType+"', getAllModels = "+str(getAllModels)+")\n")
file_h.write("nModelsSaved = 0\n")
file_h.write("for model in models:\n")
file_h.write(" if not models[model] is None:\n")
file_h.write(" models[model].write('"+os.path.join(jobs[job]["path"],"model")+"'+'_'+model)\n")
file_h.write(' nModelsSaved += 1\n')
file_h.write('if nModelsSaved == len([m for m in models if not models[m] is None ]):\n')
file_h.write(' os.system(\'echo "finished" > '+os.path.join(jobs[job]["path"],"status")+' \')\n')
file_h.write('else:\n')
file_h.write(' os.system(\'echo "failed" > '+os.path.join(jobs[job]["path"],"status")+' \')\n')
file_h.close()
os.chdir(os.path.join(jobs[job]["path"]))
if queueType == "NoSGE": # Serial mode
status, out = commands.getstatusoutput("tcsh " + os.path.join(jobs[job]["path"],"run.sh"))
if status:
print "ERROR on Job "+str(job)+" (will be restarted latter)"
print out
else:
statusFile = os.path.join(jobs[job]["path"],"status")
if os.path.isfile(statusFile):
st, status = commands.getstatusoutput("cat "+statusFile)
else:
print "ERROR: Missing status file"
status = None
if not status:
print "ERROR! job "+job+" has no status!"
jobs[job]["failed"] = True
elif status == "failed":
print "Job "+job+" failed to build all models"
jobs[job]["failed"] = True
elif status == "finished":
jobs[job]["finished"] = True
if not isJobProgressingOK(jobs[job]):
print "Job "+job+" failed to build one or more models in getMLStatistics"
jobs[job]["failed"] = True
jobs[job]["finished"] = False
if jobs[job]["failed"]:
print "Job "+job+" FAILED"
else:
print "Finished Job "+str(job)+" with success"
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
else:
cmd = "qsub -cwd -q batch.q" + AZOC.SGE_QSUB_ARCH_OPTION_CURRENT + os.path.join(jobs[job]["path"],"run.sh")
status, out = commands.getstatusoutput(cmd)
if status:
print "ERROR on Job "+str(job)+" (will be skipped)"
print out
#raise Exception("ERROR starting job for folder "+str(job))
# Your job 955801 ("template_run.sh") has been submitted
jID = out.strip().split(" ")[2]
print " jID: ",jID
os.system('echo "'+jID+'" > '+os.path.join(jobs[job]["path"], "jID"))
jobs[job]["running"] = True
jobs[job]["jID"] = jID
os.chdir(runningDir)
os.chdir(thisDir)
finished = []
if queueType == "NoSGE":
failed = []
#Report failed Jobs
for job in jobs:
if jobs[job]["finished"]:
finished.append(job)
for job in jobs:
if jobs[job]["failed"]:
failed.append(job)
print "Successful finished Jobs: ",finished
print "Failed Jobs: ",failed
else: # Monitor SGE jobs untill all are finished
#Monitor Fold jobs
updateJobsStatus(jobs)
for job in jobs:
if jobs[job]["finished"]:
finished.append(job)
print "Jobs already finished: ",finished
os.system(' echo "'+str(len(finished))+'/'+str(AZOC.QSARNEXTFOLDS)+'" > '+os.path.join(runningDir,"status"))
while len(finished) < AZOC.QSARNEXTFOLDS:
print ".",
sys.stdout.flush()
updateJobsStatus(jobs)
for job in jobs:
if jobs[job]["finished"] and job not in finished:
finished.append(job)
if callBack:
stepsDone += 1
if not callBack((100*stepsDone)/nTotalSteps): return None
print time.asctime()+": Finished job "+str(job)
os.system(' echo "'+str(len(finished))+'/'+str(AZOC.QSARNEXTFOLDS)+'" > '+os.path.join(runningDir,"status"))
for job in [j for j in jobs if jobs[j]["failed"]]:
jobs[job] = restartJob(jobs[job])
time.sleep(5)
print "All fold jobs finished!"
# Gather the results
print "Gathering results..."
#Var for saving each Fols result
results = {}
exp_pred = {}
nTrainEx = {}
nTestEx = {}
# Var for saving the statistics results
statistics = {}
mlMethods = [ml for ml in AZOC.MLMETHODS] + ["Consensus"]
sortedJobs = [job for job in jobs]
sortedJobs.sort(cmp = lambda x,y:int(x)>int(y) and 1 or -1)
# Place for storing the selected models results
results["selectedML"] = []
exp_pred["selectedML"] = []
nTrainEx["selectedML"] = []
nTestEx["selectedML"] = []
foldSelectedML = []
for ml in mlMethods: # Loop over each MLMethod
try:
#Var for saving each Fols result
results[ml] = []
exp_pred[ml] = []
nTrainEx[ml] = []
nTestEx[ml] = []
logTxt = ""
for job in sortedJobs: #loop over each fold
modelPath = os.path.join(jobs[job]["path"], "model_"+ml)
if not os.path.isdir(modelPath):
if getAllModels: print "MLMethod "+ml+" not available in fold "+job
continue
resFile = os.path.join(jobs[job]["path"], "results.pkl")
statFile_h = open(resFile)
foldStat = pickle.load(statFile_h)
statFile_h.close()
#load model
model = AZBaseClasses.modelRead(modelPath)
#Test the model
testData = dataset.select(DataIdxs,int(job))
nTrainEx[ml].append(model.NTrainEx)
nTestEx[ml].append(len(testData))
if foldStat[ml]["selected"]:
foldSelectedML.append(ml)
nTrainEx["selectedML"].append(model.NTrainEx)
nTestEx["selectedML"].append(len(testData))
if responseType == "Classification":
results[ml].append((evalUtilities.getClassificationAccuracy(testData, model), evalUtilities.getConfMat(testData, model) ) )
if foldStat[ml]["selected"]:
results["selectedML"].append(results[ml][-1])
else:
local_exp_pred = []
for ex in testData:
local_exp_pred.append((ex.getclass(), model(ex)))
results[ml].append((evalUtilities.calcRMSE(local_exp_pred), evalUtilities.calcRsqrt(local_exp_pred) ) )
#Save the experimental value and correspondent predicted value
exp_pred[ml] += local_exp_pred
if foldStat[ml]["selected"]:
results["selectedML"].append(results[ml][-1])
exp_pred["selectedML"]+= local_exp_pred
res = createStatObj(results[ml], exp_pred[ml], nTrainEx[ml], nTestEx[ml],responseType, len(sortedJobs), logTxt)
if not res:
raise Exception("No results available!")
if getAllModels:
statistics[ml] = copy.deepcopy(res)
writeResults(statistics, resultsFile)
print " OK",ml
except:
print "Error on MLmethod "+ml+". It will be skipped"
ml = "selectedML"
res = createStatObj(results[ml], exp_pred[ml], nTrainEx[ml], nTestEx[ml],responseType, len(sortedJobs), logTxt, foldSelectedML)
if not res:
raise Exception("No results available!")
statistics[ml] = copy.deepcopy(res)
writeResults(statistics, resultsFile)
os.system(' echo "finished" > '+os.path.join(runningDir,"status"))
return statistics
0
Example 83
Project: wercker-cli Source File: create.py
@login_required
def create(path='.', valid_token=None):
if not valid_token:
raise ValueError("A valid token is required!")
term = get_term()
if get_value(VALUE_PROJECT_ID, print_warnings=False):
puts("A .wercker file was found.")
run_create = prompt.yn(
"Are you sure you want to run `wercker create`?",
default="n")
if run_create is False:
puts("Aborting.")
return
else:
puts("")
if project_link(
valid_token=valid_token,
puts_result=False,
auto_link=False
):
puts("A matching application was found on wercker.")
use_link = prompt.yn("Do you want to run 'wercker link' instead of\
`wercker create`?")
puts("")
if use_link is True:
project_link(valid_token=valid_token)
return
path = find_git_root(path)
if path:
options = get_remote_options(path)
heroku_options = filter_heroku_sources(options)
else:
options = []
heroku_options = []
if not path:
return False
puts('''About to create an application on wercker.
This consists of the following steps:
1. Configure application
2. Setup keys
3. Add a deploy target ({heroku_options} heroku targets detected)
4. Trigger initial build'''.format(
wercker_url=get_value(VALUE_WERCKER_URL),
heroku_options=len(heroku_options))
)
if not path:
puts(
term.red("Error:") +
" Could not find a repository." +
" wercker create requires a git repository. Create/clone a\
repository first."
)
return
options = [o for o in options if o not in heroku_options]
options = [o for o in options if o.priority > 1]
count = len(options)
puts('''
Step ''' + term.white('1') + '''. Configure application
-------------
''')
puts(
"%s repository location(s) found...\n"
% term.bold(str(count))
)
url = pick_url(options)
url = convert_to_url(url)
source = get_preferred_source_type(url)
puts("\n%s repository detected..." % source)
puts("Selected repository url is %s\n" % url)
client = Client()
code, profile = client.get_profile(valid_token)
source_type = get_source_type(url)
if source_type == SOURCE_BITBUCKET:
if profile.get('hasBitbucketToken', False) is False:
puts("No Bitbucket account linked with your profile. Wercker uses\
this connection to linkup some events for your repository on Bitbucket to our\
service.")
provider_url = get_value(
VALUE_WERCKER_URL
) + '/provider/add/cli/bitbucket'
puts("Launching {url} to start linking.".format(
url=provider_url
))
from time import sleep
sleep(5)
import webbrowser
webbrowser.open(provider_url)
raw_input("Press enter to continue...")
elif source_type == SOURCE_GITHUB:
if profile.get('hasGithubToken', False) is False:
puts("No GitHub account linked with your profile. Wercker uses\
this connection to linkup some events for your repository on GitHub to our\
service.")
provider_url = get_value(
VALUE_WERCKER_URL
) + '/provider/add/cli/github'
puts("Launching {url} to start linking.".format(
url=provider_url
))
from time import sleep
sleep(5)
import webbrowser
webbrowser.open(provider_url)
raw_input("Press enter to continue...")
username = get_username(url)
project = get_project(url)
puts('''
Step {t.white}2{t.normal}.
-------------
In order to clone the repository on wercker, an ssh key is needed. A new/unique
key can be generated for each repository. There 3 ways of using ssh keys on
wercker:
{t.green}1. Automatically add a deploy key [recommended]{t.normal}
2. Use the checkout key, wercker uses for public projects.
3. Let wercker generate a key, but allow add it manually to github/bitbucket.
(needed when using git submodules)
For more information on this see: http://etc...
'''.format(t=term))
key_method = None
while(True):
result = prompt.get_value_with_default(
"Options:",
'1'
)
valid_values = [str(i + 1) for i in range(3)]
if result in valid_values:
key_method = valid_values.index(result)
break
else:
puts(term.red("warning: ") + " invalid build selected.")
checkout_key_id = None
checkout_key_publicKey = None
if(key_method != 1):
puts('''Retrieving a new ssh-key.''')
status, response = client.create_checkout_key()
puts("done.")
if status == 200:
checkout_key_id = response['id']
checkout_key_publicKey = response['publicKey']
if key_method == 0:
puts('Adding deploy key to repository:')
status, response = client.link_checkout_key(valid_token,
checkout_key_id,
username,
project,
source_type)
if status != 200:
puts(term.red("Error:") +
" uanble to add key to repository.")
sys.exit(1)
elif key_method == 2:
profile_username = profile.get('username')
status, response = client.get_profile_detailed(
valid_token,
profile_username)
username = response[source_type + 'Username']
url = None
if source_type == SOURCE_GITHUB:
url = "https://github.com/settings/ssh"
elif source_type == SOURCE_BITBUCKET:
url = "http://bitbucket.org/account/user/{username}/\
ssh-keys/"
if status == 200:
formatted_key = "\n".join(
textwrap.wrap(checkout_key_publicKey))
puts('''Please add the following public key:
{publicKey}
You can add the key here: {url}\n'''.format(publicKey=formatted_key,
url=url.format(
username=username)))
raw_input("Press enter to continue...")
else:
puts(term.red("Error:") +
" unable to load wercker profile information.")
sys.exit(1)
else:
puts(term.red("Error:") + 'unable to retrieve an ssh key.')
sys.exit(1)
puts("Creating a new application")
status, response = client.create_project(
valid_token,
username,
project,
source,
checkout_key_id,
)
if response['success']:
puts("done.\n")
set_value(VALUE_PROJECT_ID, response['data']['id'])
puts("In the root of this repository a .wercker file has been created\
which enables the link between the source code and wercker.\n")
site_url = None
if source_type == SOURCE_GITHUB:
site_url = "https://github.com/" + \
username + \
"/" + \
project
elif source_type == SOURCE_BITBUCKET:
site_url = "https://bitbucket.org/" + \
username + \
"/" + \
project
puts('''
Step ''' + term.white('3') + '''.
-------------
''')
target_options = heroku_options
nr_targets = len(target_options)
puts("%s automatic supported target(s) found." % str(nr_targets))
if nr_targets:
target_add(valid_token=valid_token)
puts('''
Step ''' + term.white('4') + '''.
-------------
''')
project_build(valid_token=valid_token)
puts('''
Done.
-------------
You are all set up to for using wercker. You can trigger new builds by
committing and pushing your latest changes.
Happy coding!''')
else:
puts(
term.red("Error: ") +
"Unable to create project. \n\nResponse: %s\n" %
(response.get('errorMessage'))
)
puts('''
Note: only repository where the wercker's user has permissions on can be added.
This is because some event hooks for wercker need to be registered on the
repository. If you want to test a public repository and don't have permissions
on it: fork it. You can add the forked repository to wercker''')
0
Example 84
Project: tp-libvirt Source File: virsh_snapshot_create_as.py
def run(test, params, env):
"""
Test snapshot-create-as command
Make sure that the clean repo can be used because qemu-guest-agent need to
be installed in guest
The command create a snapshot (disk and RAM) from arguments which including
the following point
* virsh snapshot-create-as --print-xml --diskspec --name --description
* virsh snapshot-create-as --print-xml with multi --diskspec
* virsh snapshot-create-as --print-xml --memspec
* virsh snapshot-create-as --description
* virsh snapshot-create-as --no-metadata
* virsh snapshot-create-as --no-metadata --print-xml (negative test)
* virsh snapshot-create-as --atomic --disk-only
* virsh snapshot-create-as --quiesce --disk-only (positive and negative)
* virsh snapshot-create-as --reuse-external
* virsh snapshot-create-as --disk-only --diskspec
* virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
* virsh snapshot-create-as --disk-only and --memspec (negative)
* Create multi snapshots with snapshot-create-as
* Create snapshot with name a--a a--a--snap1
"""
if not virsh.has_help_command('snapshot-create-as'):
raise error.TestNAError("This version of libvirt does not support "
"the snapshot-create-as test")
vm_name = params.get("main_vm")
status_error = params.get("status_error", "no")
options = params.get("snap_createas_opts")
multi_num = params.get("multi_num", "1")
diskspec_num = params.get("diskspec_num", "1")
bad_disk = params.get("bad_disk")
reuse_external = "yes" == params.get("reuse_external", "no")
start_ga = params.get("start_ga", "yes")
domain_state = params.get("domain_state")
memspec_opts = params.get("memspec_opts")
config_format = "yes" == params.get("config_format", "no")
snapshot_image_format = params.get("snapshot_image_format")
diskspec_opts = params.get("diskspec_opts")
create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
unix_channel = "yes" == params.get("unix_channel", "yes")
dac_denial = "yes" == params.get("dac_denial", "no")
check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")
disk_snapshot_attr = params.get('disk_snapshot_attr', 'external')
set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no")
# gluster related params
replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
disk_src_protocol = params.get("disk_source_protocol")
restart_tgtd = params.get("restart_tgtd", "no")
vol_name = params.get("vol_name")
tmp_dir = data_dir.get_tmp_dir()
pool_name = params.get("pool_name", "gluster-pool")
brick_path = os.path.join(tmp_dir, pool_name)
uri = params.get("virsh_uri")
usr = params.get('unprivileged_user')
if usr:
if usr.count('EXAMPLE'):
usr = 'testacl'
if disk_src_protocol == 'iscsi':
if not libvirt_version.version_compare(1, 0, 4):
raise error.TestNAError("'iscsi' disk doesn't support in"
" current libvirt version.")
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
" libvirt version.")
if not libvirt_version.version_compare(1, 2, 7):
# As bug 1017289 closed as WONTFIX, the support only
# exist on 1.2.7 and higher
if disk_src_protocol == 'gluster':
raise error.TestNAError("Snapshot on glusterfs not support in "
"current version. Check more info with "
"https://bugzilla.redhat.com/buglist.cgi?"
"bug_id=1017289,1032370")
opt_names = locals()
if memspec_opts is not None:
mem_options = compose_disk_options(test, params, memspec_opts)
# if the parameters have the disk without "file=" then we only need to
# add testdir for it.
if mem_options is None:
mem_options = os.path.join(test.tmpdir, memspec_opts)
options += " --memspec " + mem_options
tag_diskspec = 0
dnum = int(diskspec_num)
if diskspec_opts is not None:
tag_diskspec = 1
opt_names['diskopts_1'] = diskspec_opts
# diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
if dnum > 1:
tag_diskspec = 1
for i in range(1, dnum + 1):
opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)
if tag_diskspec == 1:
for i in range(1, dnum + 1):
disk_options = compose_disk_options(test, params,
opt_names["diskopts_%s" % i])
options += " --diskspec " + disk_options
logging.debug("options are %s", options)
vm = env.get_vm(vm_name)
option_dict = {}
option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
logging.debug("option_dict is %s", option_dict)
# A backup of original vm
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
logging.debug("original xml is %s", vmxml_backup)
# Generate empty image for negative test
if bad_disk is not None:
bad_disk = os.path.join(test.tmpdir, bad_disk)
os.open(bad_disk, os.O_RDWR | os.O_CREAT)
# Generate external disk
if reuse_external:
disk_path = ''
for i in range(dnum):
external_disk = "external_disk%s" % i
if params.get(external_disk):
disk_path = os.path.join(test.tmpdir,
params.get(external_disk))
utils.run("qemu-img create -f qcow2 %s 1G" % disk_path)
# Only chmod of the last external disk for negative case
if dac_denial:
utils.run("chmod 500 %s" % disk_path)
qemu_conf = None
libvirtd_conf = None
libvirtd_log_path = None
libvirtd = utils_libvirtd.Libvirtd()
try:
# Config "snapshot_image_format" option in qemu.conf
if config_format:
qemu_conf = utils_config.LibvirtQemuConfig()
qemu_conf.snapshot_image_format = snapshot_image_format
logging.debug("the qemu config file content is:\n %s" % qemu_conf)
libvirtd.restart()
if check_json_no_savevm:
libvirtd_conf = utils_config.LibvirtdConfig()
libvirtd_conf["log_level"] = '1'
libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
logging.debug("the libvirtd config file content is:\n %s" %
libvirtd_conf)
libvirtd.restart()
if replace_vm_disk:
libvirt.set_vm_disk(vm, params, tmp_dir)
if set_snapshot_attr:
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
disk_xml = vmxml_backup.get_devices(device_type="disk")[0]
vmxml_new.del_device(disk_xml)
# set snapshot attribute in disk xml
disk_xml.snapshot = disk_snapshot_attr
new_disk = disk.Disk(type_name='file')
new_disk.xmltreefile = disk_xml.xmltreefile
vmxml_new.add_device(new_disk)
logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile)
vmxml_new.sync()
vm.start()
# Start qemu-ga on guest if have --quiesce
if unix_channel and options.find("quiesce") >= 0:
vm.prepare_guest_agent()
session = vm.wait_for_login()
if start_ga == "no":
# The qemu-ga could be running and should be killed
session.cmd("kill -9 `pidof qemu-ga`")
# Check if the qemu-ga get killed
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if not stat_ps:
# As managed by systemd and set as autostart, qemu-ga
# could be restarted, so use systemctl to stop it.
session.cmd("systemctl stop qemu-guest-agent")
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if not stat_ps:
raise error.TestNAError("Fail to stop agent in "
"guest")
if domain_state == "paused":
virsh.suspend(vm_name)
else:
# Remove channel if exist
if vm.is_alive():
vm.destroy(gracefully=False)
xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name)
xml_inst.remove_agent_channels()
vm.start()
# Record the previous snapshot-list
snaps_before = virsh.snapshot_list(vm_name)
# Attach disk before create snapshot if not print xml and multi disks
# specified in cfg
if dnum > 1 and "--print-xml" not in options:
for i in range(1, dnum):
disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
utils.run("qemu-img create -f qcow2 %s 200M" % disk_path)
virsh.attach_disk(vm_name, disk_path,
'vd%s' % list(string.lowercase)[i],
debug=True)
# Run virsh command
# May create several snapshots, according to configuration
for count in range(int(multi_num)):
if create_autodestroy:
# Run virsh command in interactive mode
vmxml_backup.undefine()
vp = virsh.VirshPersistent()
vp.create(vmxml_backup['xml'], '--autodestroy')
cmd_result = vp.snapshot_create_as(vm_name, options,
ignore_status=True,
debug=True)
vp.close_session()
vmxml_backup.define()
else:
cmd_result = virsh.snapshot_create_as(vm_name, options,
unprivileged_user=usr,
uri=uri,
ignore_status=True,
debug=True)
# for multi snapshots without specific snapshot name, the
# snapshot name is using time string with 1 second
# incremental, to avoid get snapshot failure with same name,
# sleep 1 second here.
if int(multi_num) > 1:
time.sleep(1.1)
output = cmd_result.stdout.strip()
status = cmd_result.exit_status
# check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
else:
# Check memspec file should be removed if failed
if (options.find("memspec") >= 0 and
options.find("atomic") >= 0):
if os.path.isfile(option_dict['memspec']):
os.remove(option_dict['memspec'])
raise error.TestFail("Run failed but file %s exist"
% option_dict['memspec'])
else:
logging.info("Run failed as expected and memspec"
" file already been removed")
# Check domain xml is not updated if reuse external fail
elif reuse_external and dac_denial:
output = virsh.dumpxml(vm_name).stdout.strip()
if "reuse_external" in output:
raise error.TestFail("Domain xml should not be "
"updated with snapshot image")
else:
logging.info("Run failed as expected")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command: %s"
% output)
else:
# Check the special options
snaps_list = virsh.snapshot_list(vm_name)
logging.debug("snaps_list is %s", snaps_list)
check_snapslist(vm_name, options, option_dict, output,
snaps_before, snaps_list)
# For cover bug 872292
if check_json_no_savevm:
pattern = "The command savevm has not been found"
with open(libvirtd_log_path) as f:
for line in f:
if pattern in line and "error" in line:
raise error.TestFail("'%s' was found: %s"
% (pattern, line))
finally:
if vm.is_alive():
vm.destroy()
# recover domain xml
xml_recover(vmxml_backup)
path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
if os.path.isfile(path):
raise error.TestFail("Still can find snapshot metadata")
if disk_src_protocol == 'gluster':
libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
libvirtd.restart()
if disk_src_protocol == 'iscsi':
libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd)
# rm bad disks
if bad_disk is not None:
os.remove(bad_disk)
# rm attach disks and reuse external disks
if dnum > 1 and "--print-xml" not in options:
for i in range(dnum):
disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
if os.path.exists(disk_path):
os.unlink(disk_path)
if reuse_external:
external_disk = "external_disk%s" % i
disk_path = os.path.join(test.tmpdir,
params.get(external_disk))
if os.path.exists(disk_path):
os.unlink(disk_path)
# restore config
if config_format and qemu_conf:
qemu_conf.restore()
if libvirtd_conf:
libvirtd_conf.restore()
if libvirtd_conf or (config_format and qemu_conf):
libvirtd.restart()
if libvirtd_log_path and os.path.exists(libvirtd_log_path):
os.unlink(libvirtd_log_path)
0
Example 85
Project: Sick-Beard-TPB Source File: SickBeard.py
def main():
"""
TV for me
"""
# do some preliminary stuff
sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.MY_ARGS = sys.argv[1:]
sickbeard.CREATEPID = False
sickbeard.DAEMON = False
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"):
reload(sys)
try:
# pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(sickbeard.SYS_ENCODING)
except:
print 'Sorry, you MUST add the Sick Beard folder to the PYTHONPATH environment variable'
print 'or find another way to force Python to use ' + sickbeard.SYS_ENCODING + ' for string encoding.'
sys.exit(1)
# Need console logging for SickBeard.py and SickBeard-console.exe
consoleLogging = (not hasattr(sys, "frozen")) or (sickbeard.MY_NAME.lower().find('-console') > 0)
# Rename the main thread
threading.currentThread().name = "MAIN"
try:
opts, args = getopt.getopt(sys.argv[1:], "qfdp::", ['quiet', 'forceupdate', 'daemon', 'port=', 'pidfile=', 'nolaunch', 'config=', 'datadir=']) # @UnusedVariable
except getopt.GetoptError:
print "Available Options: --quiet, --forceupdate, --port, --daemon, --pidfile, --config, --datadir"
sys.exit()
forceUpdate = False
forcedPort = None
noLaunch = False
for o, a in opts:
# For now we'll just silence the logging
if o in ('-q', '--quiet'):
consoleLogging = False
# Should we update (from tvdb) all shows in the DB right away?
if o in ('-f', '--forceupdate'):
forceUpdate = True
# Suppress launching web browser
# Needed for OSes without default browser assigned
# Prevent duplicate browser window when restarting in the app
if o in ('--nolaunch',):
noLaunch = True
# Override default/configured port
if o in ('-p', '--port'):
forcedPort = int(a)
# Run as a daemon
if o in ('-d', '--daemon'):
if sys.platform == 'win32':
print "Daemonize not supported under Windows, starting normally"
else:
consoleLogging = False
sickbeard.DAEMON = True
# Specify folder to load the config file from
if o in ('--config',):
sickbeard.CONFIG_FILE = os.path.abspath(a)
# Specify folder to use as the data dir
if o in ('--datadir',):
sickbeard.DATA_DIR = os.path.abspath(a)
# Write a pidfile if requested
if o in ('--pidfile',):
sickbeard.PIDFILE = str(a)
# If the pidfile already exists, sickbeard may still be running, so exit
if os.path.exists(sickbeard.PIDFILE):
sys.exit("PID file '" + sickbeard.PIDFILE + "' already exists. Exiting.")
# The pidfile is only useful in daemon mode, make sure we can write the file properly
if sickbeard.DAEMON:
sickbeard.CREATEPID = True
try:
file(sickbeard.PIDFILE, 'w').write("pid\n")
except IOError, e:
raise SystemExit("Unable to write PID file: %s [%d]" % (e.strerror, e.errno))
else:
logger.log(u"Not running in daemon mode. PID file creation disabled.")
# If they don't specify a config file then put it in the data dir
if not sickbeard.CONFIG_FILE:
sickbeard.CONFIG_FILE = os.path.join(sickbeard.DATA_DIR, "config.ini")
# Make sure that we can create the data dir
if not os.access(sickbeard.DATA_DIR, os.F_OK):
try:
os.makedirs(sickbeard.DATA_DIR, 0744)
except os.error, e:
raise SystemExit("Unable to create datadir '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the data dir
if not os.access(sickbeard.DATA_DIR, os.W_OK):
raise SystemExit("Datadir must be writeable '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the config file
if not os.access(sickbeard.CONFIG_FILE, os.W_OK):
if os.path.isfile(sickbeard.CONFIG_FILE):
raise SystemExit("Config file '" + sickbeard.CONFIG_FILE + "' must be writeable.")
elif not os.access(os.path.dirname(sickbeard.CONFIG_FILE), os.W_OK):
raise SystemExit("Config file root dir '" + os.path.dirname(sickbeard.CONFIG_FILE) + "' must be writeable.")
os.chdir(sickbeard.DATA_DIR)
if consoleLogging:
print "Starting up Sick Beard " + SICKBEARD_VERSION + " from " + sickbeard.CONFIG_FILE
# Load the config and publish it to the sickbeard package
if not os.path.isfile(sickbeard.CONFIG_FILE):
logger.log(u"Unable to find '" + sickbeard.CONFIG_FILE + "' , all settings will be default!", logger.ERROR)
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
CUR_DB_VERSION = db.DBConnection().checkDBVersion()
if CUR_DB_VERSION > 0:
if CUR_DB_VERSION < MIN_DB_VERSION:
raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") is too old to migrate from with this version of Sick Beard (" + str(MIN_DB_VERSION) + ").\n" + \
"Upgrade using a previous version of SB first, or start with no database file to begin fresh.")
if CUR_DB_VERSION > MAX_DB_VERSION:
raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") has been incremented past what this version of Sick Beard supports (" + str(MAX_DB_VERSION) + ").\n" + \
"If you have used other forks of SB, your database may be unusable due to their modifications.")
# Initialize the config and our threads
sickbeard.initialize(consoleLogging=consoleLogging)
sickbeard.showList = []
if sickbeard.DAEMON:
daemonize()
# Use this PID for everything
sickbeard.PID = os.getpid()
if forcedPort:
logger.log(u"Forcing web server to port " + str(forcedPort))
startPort = forcedPort
else:
startPort = sickbeard.WEB_PORT
if sickbeard.WEB_LOG:
log_dir = sickbeard.LOG_DIR
else:
log_dir = None
# sickbeard.WEB_HOST is available as a configuration value in various
# places but is not configurable. It is supported here for historic reasons.
if sickbeard.WEB_HOST and sickbeard.WEB_HOST != '0.0.0.0':
webhost = sickbeard.WEB_HOST
else:
if sickbeard.WEB_IPV6:
webhost = '::'
else:
webhost = '0.0.0.0'
try:
initWebServer({
'port': startPort,
'host': webhost,
'data_root': os.path.join(sickbeard.PROG_DIR, 'gui/'+sickbeard.GUI_NAME),
'web_root': sickbeard.WEB_ROOT,
'log_dir': log_dir,
'username': sickbeard.WEB_USERNAME,
'password': sickbeard.WEB_PASSWORD,
'enable_https': sickbeard.ENABLE_HTTPS,
'https_cert': sickbeard.HTTPS_CERT,
'https_key': sickbeard.HTTPS_KEY,
})
except IOError:
logger.log(u"Unable to start web server, is something else running on port %d?" % startPort, logger.ERROR)
if sickbeard.LAUNCH_BROWSER and not sickbeard.DAEMON:
logger.log(u"Launching browser and exiting", logger.ERROR)
sickbeard.launchBrowser(startPort)
sys.exit()
# Build from the DB to start with
logger.log(u"Loading initial show list")
loadShowsFromDB()
# Fire up all our threads
sickbeard.start()
# Launch browser if we're supposed to
if sickbeard.LAUNCH_BROWSER and not noLaunch and not sickbeard.DAEMON:
sickbeard.launchBrowser(startPort)
# Start an update if we're supposed to
if forceUpdate:
sickbeard.showUpdateScheduler.action.run(force=True) # @UndefinedVariable
# Stay alive while my threads do the work
while (True):
if sickbeard.invoked_command:
sickbeard.invoked_command()
sickbeard.invoked_command = None
time.sleep(1)
return
0
Example 86
def run_tests():
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
namespace = os.environ.get('NAMESPACE', None)
poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
debug = os.environ.get('DEBUG_TESTS',False) in ['true','True']
debug_logdir = os.environ.get('DEBUG_LOGDIR')
json_output = os.environ.get('TEST_JSON_OUTPUT', None)
junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None)
if namespace:
namespace = "'{0}'".format(namespace,)
else:
namespace = 'null'
sandbox = False
if serverurl.find('test.salesforce.com') != -1:
sandbox = True
sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0')
# Change base_url to use the tooling api
sf.base_url = sf.base_url + 'tooling/'
# Split test_name_match by commas to allow multiple class name matching options
where_name = []
for pattern in test_name_match.split(','):
if pattern:
where_name.append("Name LIKE '{0}'".format(pattern))
# Add any excludes to the where clause
where_exclude = []
for pattern in test_name_exclude.split(','):
if pattern:
where_exclude.append("(NOT Name LIKE '{0}')".format(pattern,))
# Get all test classes for namespace
query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(namespace,)
if where_name:
query += " AND ({0})".format(' OR '.join(where_name),)
if where_exclude:
query += " AND {0}".format(' AND '.join(where_exclude),)
print "Running Query: {0}".format(query,)
sys.stdout.flush()
res = sf.query_all(query)
print "Found {0} classes".format(res['totalSize'],)
sys.stdout.flush()
if not res['totalSize']:
return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}
classes_by_id = {}
classes_by_name = {}
trace_id = None
results_by_class_name = {}
classes_by_log_id = {}
logs_by_class_id = {}
for cls in res['records']:
classes_by_id[cls['Id']] = cls['Name']
classes_by_name[cls['Name']] = cls['Id']
results_by_class_name[cls['Name']] = {}
# If debug is turned on, setup debug traces for all test classes
if debug:
print 'Setting up trace flag to capture debug logs'
# Get the User's id to set a TraceFlag
res_user = sf.query("Select Id from User where Username = '{0}'".format(username,))
user_id = res_user['records'][0]['Id']
# Set up a simple-salesforce sobject for TraceFlag using the tooling api
TraceFlag = sf.TraceFlag
TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
.format(instance=sf.sf_instance,
object_name='TraceFlag',
sf_version=sf.sf_version))
# First, delete any old trace flags still lying around
tf_res = sf.query('Select Id from TraceFlag')
if tf_res['totalSize']:
for tf in tf_res['records']:
TraceFlag.delete(tf['Id'])
expiration = datetime.datetime.now() + datetime.timedelta(seconds=60*60*12)
res = TraceFlag.create({
'ApexCode': 'Info',
'ApexProfiling': 'Debug',
'Callout': 'Info',
'Database': 'Info',
'ExpirationDate': expiration.isoformat(),
#'ScopeId': user_id,
'System': 'Info',
'TracedEntityId': user_id,
'Validation': 'Info',
'Visualforce': 'Info',
'Workflow': 'Info',
})
trace_id = res['id']
print 'Created TraceFlag for user'
# Run all the tests
print "Queuing tests for execution..."
sys.stdout.flush()
job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
# Loop waiting for the tests to complete
while True:
res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'".format(job_id,))
counts = {
'Queued': 0,
'Processing': 0,
'Aborted': 0,
'Completed': 0,
'Failed': 0,
'Preparing': 0,
'Holding': 0,
}
for item in res['records']:
counts[item['Status']] += 1
# If all tests have run, break from the loop
if not counts['Queued'] and not counts['Processing']:
print ''
print '-------------------------------------------------------------------------------'
print 'Test Results'
print '-------------------------------------------------------------------------------'
sys.stdout.flush()
break
print 'Completed: %(Completed)s Processing: %(Processing)s Queued: %(Queued)s' % counts
sys.stdout.flush()
sleep(poll_interval)
# Get the test results by method
res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'".format(job_id,))
counts = {
'Pass': 0,
'Fail': 0,
'CompileFail': 0,
'Skip': 0,
}
for result in res['records']:
class_name = classes_by_id[result['ApexClassId']]
results_by_class_name[class_name][result['MethodName']] = result
counts[result['Outcome']] += 1
if debug and result['ApexLogId']:
classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
# Fetch debug logs if debug is enabled
if debug:
log_ids = "('{0}')".format("','".join([str(id) for id in classes_by_log_id.keys()]),)
res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}".format(log_ids,))
for log in res['records']:
class_id = classes_by_log_id[log['Id']]
class_name = classes_by_id[class_id]
logs_by_class_id[class_id] = log
# Fetch the debug log file
body_url = '{0}sobjects/ApexLog/{1}/Body'.format(sf.base_url, log['Id'])
resp = sf.request.get(body_url, headers=sf.headers)
log_file = class_name + '.log'
if debug_logdir:
log_file = debug_logdir + os.sep + log_file
f = open(log_file, 'w')
f.write(resp.content)
f.close()
# Parse stats from the log file
f = open(log_file, 'r')
method_stats = parse_log(class_name, f)
# Add method stats to results_by_class_name
for method, info in method_stats.items():
results_by_class_name[class_name][method].update(info)
# Delete the trace flag
TraceFlag.delete(trace_id)
# Build an OrderedDict of results
test_results = []
class_names = results_by_class_name.keys()
class_names.sort()
for class_name in class_names:
class_id = classes_by_name[class_name]
duration = None
if debug and class_id in logs_by_class_id:
duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
print 'Class: {0} ({1}s)'.format(class_name, duration)
else:
print 'Class: {0}'.format(class_name,)
sys.stdout.flush()
method_names = results_by_class_name[class_name].keys()
method_names.sort()
for method_name in method_names:
result = results_by_class_name[class_name][method_name]
test_results.append({
'Children': result.get('children', None),
'ClassName': decode_to_unicode(class_name),
'Method': decode_to_unicode(result['MethodName']),
'Message': decode_to_unicode(result['Message']),
'Outcome': decode_to_unicode(result['Outcome']),
'StackTrace': decode_to_unicode(result['StackTrace']),
'Stats': result.get('stats', None),
'TestTimestamp': result.get('TestTimestamp', None),
})
# Output result for method
if debug and json_output and result.get('stats') and 'duration' in result['stats']:
# If debug is enabled and we're generating the json output, include duration with the test
print u' {0}: {1} ({2}s)'.format(
result['Outcome'],
result['MethodName'],
result['stats']['duration']
)
else:
print u' {Outcome}: {MethodName}'.format(**result)
if debug and not json_output:
print u' DEBUG LOG INFO:'
stats = result.get('stats',None)
if not stats:
print u' No stats found, likely because of debug log size limit'
else:
stat_keys = stats.keys()
stat_keys.sort()
for stat in stat_keys:
try:
value = stats[stat]
output = u' {0} / {1}'.format(value['used'], value['allowed'])
print output.ljust(26) + stat
except:
output = u' {0}'.format(stats[stat],)
print output.ljust(26) + stat
# Print message and stack trace if failed
if result['Outcome'] in ['Fail','CompileFail']:
print u' Message: {Message}'.format(**result)
print u' StackTrace: {StackTrace}'.format(**result)
sys.stdout.flush()
print u'-------------------------------------------------------------------------------'
print u'Passed: %(Pass)s Fail: %(Fail)s Compile Fail: %(CompileFail)s Skipped: %(Skip)s' % counts
print u'-------------------------------------------------------------------------------'
sys.stdout.flush()
if counts['Fail'] or counts['CompileFail']:
print u''
print u'Failing Tests'
print u'-------------'
print u''
sys.stdout.flush()
counter = 0
for result in test_results:
if result['Outcome'] not in ['Fail','CompileFail']:
continue
counter += 1
print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'], result['Method'], result['Outcome'])
print u' Message: {0}'.format(result['Message'],)
print u' StackTrace: {0}'.format(result['StackTrace'],)
sys.stdout.flush()
if json_output:
f = codecs.open(json_output, encoding='utf-8', mode='w')
f.write(json.dumps(test_results))
f.close()
if junit_output:
f = codecs.open(junit_output, encoding='utf-8', mode='w')
f.write('<testsuite tests="{0}">\n'.format(len(test_results)),)
for result in test_results:
testcase = ' <testcase classname="{0}" name="{1}"'.format(result['ClassName'], result['Method'])
if 'Stats' in result and result['Stats'] and 'duration' in result['Stats']:
testcase = '{0} time="{1}"'.format(testcase, result['Stats']['duration'])
if result['Outcome'] in ['Fail','CompileFail']:
testcase = '{0}>\n'.format(testcase,)
testcase = '{0} <failure type="{1}">{2}</failure>\n'.format(
testcase,
cgi.escape(result['StackTrace']),
cgi.escape(result['Message']),
)
testcase = '{0} </testcase>\n'.format(testcase,)
else:
testcase = '{0} />\n'.format(testcase,)
f.write(testcase)
f.write('</testsuite>')
f.close()
return counts
0
Example 87
Project: SublimeApex Source File: metadata.py
def deploy(self, base64_zip):
""" Deploy zip file
Arguments:
* zipFile -- base64 encoded zipfile
"""
result = self.login()
if not result or not result["success"]: return
# Log the StartTime
start_time = datetime.datetime.now()
# Populate the soap_body with actual session id
deploy_options = self.settings["deploy_options"]
# If just checkOnly, output VALIDATE, otherwise, output DEPLOY
deploy_or_validate = "validate" if deploy_options["checkOnly"] else "deploy"
# 1. Issue a deploy request to start the asynchronous retrieval
headers = {
"Content-Type": "text/xml;charset=UTF-8",
"SOAPAction": '""'
}
# [sf:%s]
Printer.get('log').write_start().write("[sf:%s] Start request for a deploy..." % deploy_or_validate)
options = deploy_options
options["zipfile"] = base64_zip
soap_body = self.soap.create_request('deploy', options)
try:
response = requests.post(self.metadata_url, soap_body, verify=False, headers=headers)
except Exception as e:
self.result = {
"Error Message": "Network Issue" if "Max retries exceeded" in str(e) else str(e),
"URL": self.metadata_url,
"Operation": "DEPLOY",
"success": False
}
return self.result
# Check whether session_id is expired
if "INVALID_SESSION_ID" in response.text:
Printer.get('log').write("[sf:%s] Session expired, need login again" % deploy_or_validate)
self.login(True)
return self.deploy(base64_zip)
# If status_code is > 399, which means it has error
# If status_code is > 399, which means it has error
if response.status_code > 399:
self.result = util.get_response_error(response)
return self.result
# [sf:%s]
Printer.get('log').write("[sf:%s] Request for a deploy submitted successfully." % deploy_or_validate)
# Get async process id
async_process_id = util.getUniqueElementValueFromXmlString(response.content, "id")
# [sf:%s]
Printer.get('log').write("[sf:%s] Request ID for the current deploy task: %s" % (deploy_or_validate, async_process_id))
# [sf:%s]
Printer.get('log').write("[sf:%s] Waiting for server to finish processing the request..." % deploy_or_validate)
# 2. issue a check status loop request to assure the async
# process is done
result = self.check_deploy_status(async_process_id)
body = result["body"]
index = 1
failure_dict = {}
while body["status"] in ["Pending", "InProgress", "Canceling"]:
if "stateDetail" in body:
if int(body["numberComponentsDeployed"]) < int(body["numberComponentsTotal"]):
Printer.get('log').write("[sf:%s] Request Status: %s (%s/%s) -- %s" % (
deploy_or_validate,
body["status"],
body["numberComponentsDeployed"],
body["numberComponentsTotal"],
body["stateDetail"]
))
else:
Printer.get('log').write("[sf:%s] TestRun Status: %s (%s/%s) -- %s" % (
deploy_or_validate,
body["status"],
body["numberTestsCompleted"],
body["numberTestsTotal"],
body["stateDetail"]
))
else:
Printer.get('log').write("[sf:%s] Request Status: %s" % (
deploy_or_validate, body["status"]
))
# Process Test Run Result
if "runTestResult" in body["details"] and \
"failures" in body["details"]["runTestResult"]:
failures = body["details"]["runTestResult"]["failures"]
if isinstance(failures, dict):
if failures["id"] not in failure_dict:
failure_dict[failures["id"]] = failures
Printer.get('log').write("-" * 84).write("Test Failures: ")
Printer.get('log').write("%s.\t%s" % (index, failures["message"]))
for msg in failures["stackTrace"].split("\n"):
Printer.get('log').write("\t%s" % msg)
# [sf:deploy]
Printer.get('log').write("-" * 84)
index += index
elif isinstance(failures, list):
for f in failures:
if f["id"] not in failure_dict:
failure_dict[f["id"]] = f
Printer.get('log').write("-" * 84).write("Test Failures: ")
Printer.get('log').write("%s.\t%s" % (index, f["message"]))
# If compile error, there will no stack trace
if isinstance(f["stackTrace"], str):
for msg in f["stackTrace"].split("\n"):
Printer.get('log').write("\t%s" % msg)
Printer.get('log').write("-" * 84)
index += 1
# Thread Wait
sleep_seconds = 2 if body["status"] == "Pending" else 1
time.sleep(sleep_seconds)
result = self.check_deploy_status(async_process_id)
body = result["body"]
# Check if job is canceled
if body["status"] == "Canceled":
Printer.get('log').write("\nBUILD FAILED", False)
Printer.get('log').write("cuem******* DEPLOYMENT FAILED ***********", False)
Printer.get('log').write("Request ID: %s" % async_process_id, False)
Printer.get('log').write("\nRequest Canceled", False)
Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
# If check status request failed, this will not be done
elif body["status"] == "Failed":
# Append failure message
Printer.get('log').write("[sf:%s] Request Failed\n\nBUILD FAILED" % deploy_or_validate)
Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
Printer.get('log').write("Request ID: %s" % async_process_id, False)
# print (json.dumps(body, indent=4))
# Output Failure Details
failures_messages = []
if "componentFailures" in body["details"]:
component_failures = body["details"]["componentFailures"]
if isinstance(component_failures, dict):
component_failure = component_failures
failures_messages.append("1. %s -- %s: %s (line %s)" % (
component_failure["fileName"],
component_failure["problemType"],
component_failure["problem"].replace("\n", " "),
component_failure["lineNumber"] \
if "lineNumber" in component_failure else "0"
))
elif isinstance(component_failures, list):
for index in range(len(component_failures)):
component_failure = component_failures[index]
failures_messages.append("%s. %s -- %s: %s (line %s)" % (
index+1,
component_failure["fileName"],
component_failure["problemType"],
component_failure["problem"],
component_failure["lineNumber"] \
if "lineNumber" in component_failure else "0"
))
elif "errorMessage" in body:
Printer.get('log').write("\n" + body["errorMessage"], False)
warning_messages = []
if "runTestResult" in body["details"]:
runTestResult = body["details"]["runTestResult"]
if "codeCoverageWarnings" in runTestResult:
coverage_warnings = runTestResult["codeCoverageWarnings"]
if isinstance(runTestResult["codeCoverageWarnings"], dict):
coverage_warnings = [coverage_warnings]
elif isinstance(runTestResult["codeCoverageWarnings"], list):
coverage_warnings = coverage_warnings
for warn in coverage_warnings:
if not isinstance(warn["name"], str): continue
warning_messages.append("%s -- %s" % (warn["name"], warn["message"]))
# Output failure message
if failures_messages:
Printer.get('log').write("\n\nAll Component Failures:", False)
Printer.get('log').write("\n"+"\n\n".join(failures_messages), False)
# Output warning message
if warning_messages:
Printer.get('log').write("\n\nTest Coverage Warnings:", False)
Printer.get('log').write("\n"+"\n".join(warning_messages), False)
# End for Deploy Result
Printer.get('log').write("\n*********** %s FAILED ***********" % (
deploy_or_validate.upper()), False)
else:
# Append succeed message
Printer.get('log').write("\n[sf:%s] Request Succeed" % deploy_or_validate, False)
Printer.get('log').write("[sf:%s] *********** %s SUCCEEDED ***********" % (
deploy_or_validate, deploy_or_validate.upper()), False)
Printer.get('log').write("[sf:%s] Finished request %s successfully." % (
deploy_or_validate, async_process_id), False)
# Total time
total_seconds = (datetime.datetime.now() - start_time).seconds
Printer.get('log').write("\n\nTotal time: %s seconds" % total_seconds, False)
# Display debug log message in the new view
if "header" in result and result["header"] and "debugLog" in result["header"]:
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Debugging Information",
"input": result["header"]["debugLog"]
})
self.result = result
0
Example 88
Project: tp-qemu Source File: watchdog.py
@error.context_aware
def run(test, params, env):
"""
Configure watchdog, crash the guest and check if watchdog_action occurs.
Test Step:
1. see every function step
Params:
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
timeout = int(params.get("login_timeout", '360'))
relogin_timeout = int(params.get("relogin_timeout", '240'))
watchdog_device_type = params.get("watchdog_device_type", "i6300esb")
watchdog_action = params.get("watchdog_action", "reset")
trigger_cmd = params.get("trigger_cmd", "echo c > /dev/watchdog")
# internal function
def _watchdog_device_check(session, watchdog_device):
"""
Check the watchdog device have been found and init successfully. if not
will raise error.
"""
# when using ib700 need modprobe it's driver manually.
if watchdog_device == "ib700":
session.cmd("modprobe ib700wdt")
# when wDT is 6300esb need check pci info
if watchdog_device == "i6300esb":
error.context("checking pci info to ensure have WDT device",
logging.info)
o = session.cmd_output("lspci")
if o:
wdt_pci_info = re.findall(".*6300ESB Watchdog Timer", o)
if not wdt_pci_info:
raise error.TestFail("Can not find watchdog pci")
logging.info("Found watchdog pci device : %s" % wdt_pci_info)
# checking watchdog init info using dmesg
error.context("Checking watchdog init info using dmesg", logging.info)
dmesg_info = params.get("dmesg_info", "(i6300ESB|ib700wdt).*init")
(s, o) = session.cmd_status_output(
"dmesg | grep -i '%s' " % dmesg_info)
if s != 0:
error_msg = "Wactchdog device '%s' initialization failed "
raise error.TestError(error_msg % watchdog_device)
logging.info("Watchdog device '%s' add and init successfully"
% watchdog_device)
logging.debug("Init info : '%s'" % o)
def _trigger_watchdog(session, trigger_cmd=None):
"""
Trigger watchdog action
Params:
@session: guest connect session.
@trigger_cmd: cmd trigger the watchdog
"""
if trigger_cmd is not None:
error.context("Trigger Watchdog action using:'%s'." % trigger_cmd,
logging.info)
session.sendline(trigger_cmd)
def _action_check(session, watchdog_action):
"""
Check whether or not the watchdog action occurred. if the action was
not occurred will raise error.
"""
# when watchdog action is pause, shutdown, reset, poweroff
# the vm session will lost responsive
response_timeout = int(params.get("response_timeout", '240'))
error.context("Check whether or not watchdog action '%s' took effect"
% watchdog_action, logging.info)
if not utils_misc.wait_for(lambda: not session.is_responsive(),
response_timeout, 0, 1):
if watchdog_action == "none" or watchdog_action == "debug":
logging.info("OK, the guest session is responsive still")
else:
txt = "Oops, seems action '%s' took no" % watchdog_action
txt += " effect, guest is still responsive."
raise error.TestFail(txt)
# when action is poweroff or shutdown(without no-shutdown option), the vm
# will dead, and qemu exit.
# The others the vm monitor still responsive, can report the vm status.
if (watchdog_action == "poweroff" or (watchdog_action == "shutdown" and
params.get("disable_shutdown") != "yes")):
if not utils_misc.wait_for(lambda: vm.is_dead(),
response_timeout, 0, 1):
txt = "Oops, seems '%s' action took no effect, " % watchdog_action
txt += "guest is still alive!"
raise error.TestFail(txt)
else:
if watchdog_action == "pause":
f_param = "paused"
elif watchdog_action == "shutdown":
f_param = "shutdown"
else:
f_param = "running"
if not utils_misc.wait_for(
lambda: vm.monitor.verify_status(f_param),
response_timeout, 0, 1):
logging.debug("Monitor status is:%s" % vm.monitor.get_status())
txt = "Oops, seems action '%s' took no effect" % watchdog_action
txt += " , Wrong monitor status!"
raise error.TestFail(txt)
# when the action is reset, need can relogin the guest.
if watchdog_action == "reset":
logging.info("Try to login the guest after reboot")
vm.wait_for_login(timeout=relogin_timeout)
logging.info("Watchdog action '%s' come into effect." %
watchdog_action)
def check_watchdog_support():
"""
check the host qemu-kvm support watchdog device
Test Step:
1. Send qemu command 'qemu -watchdog ?'
2. Check the watchdog type that the host support.
"""
qemu_binary = utils_misc.get_qemu_binary(params)
watchdog_type_check = params.get(
"watchdog_type_check", " -watchdog '?'")
qemu_cmd = qemu_binary + watchdog_type_check
# check the host support watchdog types.
error.context("Checking whether or not the host support WDT '%s'"
% watchdog_device_type, logging.info)
watchdog_device = utils.system_output("%s 2>&1" % qemu_cmd,
retain_output=True)
if watchdog_device:
if re.findall(watchdog_device_type, watchdog_device, re.I):
logging.info("The host support '%s' type watchdog device" %
watchdog_device_type)
else:
logging.info("The host support watchdog device type is: '%s'"
% watchdog_device)
raise error.TestNAError("watdog %s isn't supported!"
% watchdog_device_type)
else:
raise error.TestNAError("No watchdog device supported by the host!")
def guest_boot_with_watchdog():
"""
check the guest can boot with watchdog device
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
"""
_watchdog_device_check(session, watchdog_device_type)
def watchdog_action_test():
"""
Watchdog action test
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3.Trigger wathchdog action through open /dev/watchdog
4.Ensure watchdog_action take effect.
"""
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
_action_check(session, watchdog_action)
def magic_close_support():
"""
Magic close the watchdog action.
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3. Inside guest, trigger watchdog action"
4. Inside guest, before heartbeat expires, close this action"
5. Wait heartbeat timeout check the watchdog action deactive.
"""
response_timeout = int(params.get("response_timeout", '240'))
magic_cmd = params.get("magic_close_cmd", "echo V > /dev/watchdog")
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
# magic close
error.context("Magic close is start", logging.info)
_trigger_watchdog(session, magic_cmd)
if utils_misc.wait_for(lambda: not session.is_responsive(),
response_timeout, 0, 1):
error_msg = "Oops,Watchdog action took effect, magic close FAILED"
raise error.TestFail(error_msg)
logging.info("Magic close took effect.")
def migration_when_wdt_timeout():
"""
Migration when WDT timeout
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3. Start VM with watchdog device, action reset|pause
4. Inside RHEL guest, trigger watchdog
5. Before WDT timeout, do vm migration
6. After migration, check the watchdog action take effect
"""
mig_timeout = float(params.get("mig_timeout", "3600"))
mig_protocol = params.get("migration_protocol", "tcp")
mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
error.context("Do migration(protocol:%s),Watchdog have been triggered."
% mig_protocol, logging.info)
args = (mig_timeout, mig_protocol, mig_cancel_delay)
migrate_thread = utils.InterruptedThread(vm.migrate, args)
migrate_thread.start()
_action_check(session, watchdog_action)
migrate_thread.join(timeout=mig_timeout)
def hotplug_unplug_watchdog_device():
"""
Hotplug/unplug watchdog device
Test Step:
1. Start VM with "-watchdog-action pause" CLI option
2. Add WDT via monitor
3. Trigger watchdog action in guest
4. Remove WDT device through monitor cmd "device_del"
5. Resume and relogin the guest, check the device have been removed.
"""
session = vm.wait_for_login(timeout=timeout)
o = session.cmd_output("lspci")
if o:
wdt_pci_info = re.findall(".*6300ESB Watchdog Timer", o)
if wdt_pci_info:
raise error.TestFail("Can find watchdog pci")
plug_watchdog_device = params.get("plug_watchdog_device", "i6300esb")
watchdog_device_add = ("device_add driver=%s, id=%s"
% (plug_watchdog_device, "watchdog"))
watchdog_device_del = ("device_del id=%s" % "watchdog")
error.context("Hotplug watchdog device '%s'" % plug_watchdog_device,
logging.info)
vm.monitor.send_args_cmd(watchdog_device_add)
# wait watchdog device init
time.sleep(5)
_watchdog_device_check(session, plug_watchdog_device)
_trigger_watchdog(session, trigger_cmd)
_action_check(session, watchdog_action)
error.context("Hot unplug watchdog device", logging.info)
vm.monitor.send_args_cmd(watchdog_device_del)
error.context("Resume the guest, check the WDT have been removed",
logging.info)
vm.resume()
session = vm.wait_for_login(timeout=timeout)
o = session.cmd_output("lspci")
if o:
wdt_pci_info = re.findall(".*6300ESB Watchdog Timer", o)
if wdt_pci_info:
raise error.TestFail("Oops, find watchdog pci, unplug failed")
logging.info("The WDT remove successfully")
# main procedure
test_type = params.get("test_type")
check_watchdog_support()
error.context("'%s' test starting ... " % test_type, logging.info)
error.context("Boot VM with WDT(Device:'%s', Action:'%s'),and try to login"
% (watchdog_device_type, watchdog_action), logging.info)
params["start_vm"] = "yes"
env_process.preprocess_vm(test, params, env, params.get("main_vm"))
vm = env.get_vm(params["main_vm"])
session = vm.wait_for_login(timeout=timeout)
if params.get("setup_runlevel") == "yes":
error.context("Setup the runlevel for guest", logging.info)
utils_test.qemu.setup_runlevel(params, session)
if (test_type in locals()):
test_running = locals()[test_type]
test_running()
else:
raise error.TestError("Oops test %s doesn't exist, have a check please."
% test_type)
0
Example 89
Project: BitXBay Source File: electrum_main.py
def main():
global guiWindow
parser = arg_parser()
options, args = parser.parse_args()
if options.portable and options.wallet_path is None:
options.electrum_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'electrum_data')
# config is an object passed to the various constructors (wallet, interface, gui)
if is_android:
config_options = {
'portable': True,
'verbose': True,
'gui': 'android',
'auto_cycle': True,
}
else:
config_options = eval(str(options))
for k, v in config_options.items():
if v is None:
config_options.pop(k)
set_verbosity(config_options.get('verbose'))
config = SimpleConfig(config_options)
cmd = ''
if len(args) == 0:
url = None
cmd = 'gui'
elif len(args) == 1 and re.match('^bitcoin:', args[0]):
url = args[0]
cmd = 'gui'
else:
pass
cmd = args[0]
if cmd == 'gui':
gui_name = 'stdio'#config.get('gui', 'classic')
if gui_name in ['lite', 'classic']:
gui_name = 'qt'
try:
gui = __import__('electrum_gui.' + gui_name, fromlist=['electrum_gui'])
except ImportError:
traceback.print_exc(file=sys.stdout)
sys.exit()
#sys.exit("Error: Unknown GUI: " + gui_name )
# network interface
if not options.offline:
network = Network(config)
network.start()
else:
network = None
guiWindow = gui = gui.ElectrumGui(config, network)
gui.main(url)
if network:
network.stop()
# we use daemon threads, their termination is enforced.
# this sleep command gives them time to terminate cleanly.
time.sleep(0.1)
sys.exit(0)
if cmd not in known_commands:
cmd = 'help'
cmd = known_commands[cmd]
# instanciate wallet for command-line
storage = WalletStorage(config)
if cmd.name in ['create', 'restore']:
if storage.file_exists:
sys.exit("Error: Remove the existing wallet first!")
if options.password is not None:
password = options.password
elif cmd.name == 'restore' and options.mpk:
password = None
else:
password = prompt_password("Password (hit return if you do not wish to encrypt your wallet):")
# if config.server is set, the user either passed the server on command line
# or chose it previously already. if he didn't pass a server on the command line,
# we just pick up a random one.
if not config.get('server'):
config.set_key('server', pick_random_server())
#fee = options.tx_fee if options.tx_fee else raw_input("fee (default:%s):" % (str(Decimal(wallet.fee)/100000000)))
#gap = options.gap_limit if options.gap_limit else raw_input("gap limit (default 5):")
#if fee:
# wallet.set_fee(float(fee)*100000000)
#if gap:
# wallet.change_gap_limit(int(gap))
if cmd.name == 'restore':
if options.mpk:
wallet = Wallet.from_mpk(options.mpk, storage)
else:
import getpass
seed = getpass.getpass(prompt="seed:", stream=None) if options.concealed else raw_input("seed:")
wallet = Wallet.from_seed(str(seed),storage)
if not wallet:
sys.exit("Error: Invalid seed")
wallet.save_seed(password)
if not options.offline:
network = Network(config)
network.start()
wallet.start_threads(network)
print_msg("Recovering wallet...")
wallet.restore(lambda x: x)
if wallet.is_found():
print_msg("Recovery successful")
else:
print_msg("Warning: Found no history for this wallet")
else:
wallet.synchronize()
print_msg("Warning: This wallet was restored offline. It may contain more addresses than displayed.")
else:
wallet = Wallet(storage)
wallet.init_seed(None)
wallet.save_seed(password)
wallet.synchronize()
print_msg("Your wallet generation seed is:\n\"%s\"" % wallet.get_mnemonic(password))
print_msg("Please keep it in a safe place; if you lose it, you will not be able to restore your wallet.")
print_msg("Wallet saved in '%s'" % wallet.storage.path)
# terminate
sys.exit(0)
if cmd.name not in ['create', 'restore'] and cmd.requires_wallet and not storage.file_exists:
print_msg("Error: Wallet file not found.")
print_msg("Type 'electrum create' to create a new wallet, or provide a path to a wallet with the -w option")
sys.exit(0)
if cmd.requires_wallet:
wallet = Wallet(storage)
else:
wallet = None
# important warning
if cmd.name in ['dumpprivkey', 'dumpprivkeys']:
print_msg("WARNING: ALL your private keys are secret.")
print_msg("Exposing a single private key can compromise your entire wallet!")
print_msg("In particular, DO NOT use 'redeem private key' services proposed by third parties.")
# commands needing password
if cmd.requires_password:
if wallet.seed == '':
seed = ''
password = None
elif wallet.use_encryption:
password = prompt_password('Password:', False)
if not password:
print_msg("Error: Password required")
sys.exit(1)
# check password
try:
seed = wallet.get_seed(password)
except Exception:
print_msg("Error: This password does not decode this wallet.")
sys.exit(1)
else:
password = None
seed = wallet.get_seed(None)
else:
password = None
# add missing arguments, do type conversions
if cmd.name == 'importprivkey':
# See if they specificed a key on the cmd line, if not prompt
if len(args) == 1:
args[1] = prompt_password('Enter PrivateKey (will not echo):', False)
elif cmd.name == 'signrawtransaction':
args = [cmd, args[1], json.loads(args[2]) if len(args) > 2 else [], json.loads(args[3]) if len(args) > 3 else []]
elif cmd.name == 'createmultisig':
args = [cmd, int(args[1]), json.loads(args[2])]
elif cmd.name == 'createrawtransaction':
args = [cmd, json.loads(args[1]), json.loads(args[2])]
elif cmd.name == 'listaddresses':
args = [cmd, options.show_all, options.show_labels]
elif cmd.name in ['payto', 'mktx']:
domain = [options.from_addr] if options.from_addr else None
args = ['mktx', args[1], Decimal(args[2]), Decimal(options.tx_fee) if options.tx_fee else None, options.change_addr, domain]
elif cmd.name in ['paytomany', 'mksendmanytx']:
domain = [options.from_addr] if options.from_addr else None
outputs = []
for i in range(1, len(args), 2):
if len(args) < i+2:
print_msg("Error: Mismatched arguments.")
sys.exit(1)
outputs.append((args[i], Decimal(args[i+1])))
args = ['mksendmanytx', outputs, Decimal(options.tx_fee) if options.tx_fee else None, options.change_addr, domain]
elif cmd.name == 'help':
if len(args) < 2:
print_help(parser)
# check the number of arguments
if len(args) - 1 < cmd.min_args:
print_msg("Not enough arguments")
print_msg("Syntax:", cmd.syntax)
sys.exit(1)
if cmd.max_args >= 0 and len(args) - 1 > cmd.max_args:
print_msg("too many arguments", args)
print_msg("Syntax:", cmd.syntax)
sys.exit(1)
if cmd.max_args < 0:
if len(args) > cmd.min_args + 1:
message = ' '.join(args[cmd.min_args:])
print_msg("Warning: Final argument was reconstructed from several arguments:", repr(message))
args = args[0:cmd.min_args] + [message]
# run the command
if cmd.name == 'deseed':
if not wallet.seed:
print_msg("Error: This wallet has no seed")
else:
ns = wallet.storage.path + '.seedless'
print_msg("Warning: you are going to create a seedless wallet'\nIt will be saved in '%s'" % ns)
if raw_input("Are you sure you want to continue? (y/n) ") in ['y', 'Y', 'yes']:
wallet.storage.path = ns
wallet.seed = ''
wallet.storage.put('seed', '', True)
wallet.use_encryption = False
wallet.storage.put('use_encryption', wallet.use_encryption, True)
for k in wallet.imported_keys.keys():
wallet.imported_keys[k] = ''
wallet.storage.put('imported_keys', wallet.imported_keys, True)
print_msg("Done.")
else:
print_msg("Action canceled.")
elif cmd.name == 'getconfig':
key = args[1]
out = config.get(key)
print_msg(out)
elif cmd.name == 'setconfig':
key, value = args[1:3]
try:
value = ast.literal_eval(value)
except:
pass
config.set_key(key, value, True)
print_msg(True)
elif cmd.name == 'password':
new_password = prompt_password('New password:')
wallet.update_password(password, new_password)
else:
run_command(cmd, password, args)
time.sleep(0.1)
sys.exit(0)
0
Example 90
Project: SickGear Source File: SickBeard.py
def start(self):
# do some preliminary stuff
sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.MY_ARGS = sys.argv[1:]
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, '')
except (locale.Error, IOError):
pass
try:
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
if not hasattr(sys, 'setdefaultencoding'):
moves.reload_module(sys)
try:
# pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(sickbeard.SYS_ENCODING)
except:
print('Sorry, you MUST add the SickGear folder to the PYTHONPATH environment variable')
print('or find another way to force Python to use %s for string encoding.' % sickbeard.SYS_ENCODING)
sys.exit(1)
# Need console logging for SickBeard.py and SickBeard-console.exe
self.consoleLogging = (not hasattr(sys, 'frozen')) or (sickbeard.MY_NAME.lower().find('-console') > 0)
# Rename the main thread
threading.currentThread().name = 'MAIN'
try:
opts, args = getopt.getopt(sys.argv[1:], 'hfqdp::',
['help', 'forceupdate', 'quiet', 'nolaunch', 'daemon', 'pidfile=', 'port=',
'datadir=', 'config=', 'noresize']) # @UnusedVariable
except getopt.GetoptError:
sys.exit(self.help_message())
for o, a in opts:
# Prints help message
if o in ('-h', '--help'):
sys.exit(self.help_message())
# For now we'll just silence the logging
if o in ('-q', '--quiet'):
self.consoleLogging = False
# Should we update (from indexer) all shows in the DB right away?
if o in ('-f', '--forceupdate'):
self.forceUpdate = True
# Suppress launching web browser
# Needed for OSes without default browser assigned
# Prevent duplicate browser window when restarting in the app
if o in ('--nolaunch',):
self.noLaunch = True
# Override default/configured port
if o in ('-p', '--port'):
try:
self.forcedPort = int(a)
except ValueError:
sys.exit('Port: %s is not a number. Exiting.' % a)
# Run as a double forked daemon
if o in ('-d', '--daemon'):
self.runAsDaemon = True
# When running as daemon disable consoleLogging and don't start browser
self.consoleLogging = False
self.noLaunch = True
if sys.platform == 'win32':
self.runAsDaemon = False
# Write a pidfile if requested
if o in ('--pidfile',):
self.CREATEPID = True
self.PIDFILE = str(a)
# If the pidfile already exists, sickbeard may still be running, so exit
if os.path.exists(self.PIDFILE):
sys.exit('PID file: %s already exists. Exiting.' % self.PIDFILE)
# Specify folder to load the config file from
if o in ('--config',):
sickbeard.CONFIG_FILE = os.path.abspath(a)
# Specify folder to use as the data dir
if o in ('--datadir',):
sickbeard.DATA_DIR = os.path.abspath(a)
# Prevent resizing of the banner/posters even if PIL is installed
if o in ('--noresize',):
sickbeard.NO_RESIZE = True
# The pidfile is only useful in daemon mode, make sure we can write the file properly
if self.CREATEPID:
if self.runAsDaemon:
pid_dir = os.path.dirname(self.PIDFILE)
if not os.access(pid_dir, os.F_OK):
sys.exit(u"PID dir: %s doesn't exist. Exiting." % pid_dir)
if not os.access(pid_dir, os.W_OK):
sys.exit(u'PID dir: %s must be writable (write permissions). Exiting.' % pid_dir)
else:
if self.consoleLogging:
print(u'Not running in daemon mode. PID file creation disabled')
self.CREATEPID = False
# If they don't specify a config file then put it in the data dir
if not sickbeard.CONFIG_FILE:
sickbeard.CONFIG_FILE = os.path.join(sickbeard.DATA_DIR, 'config.ini')
# Make sure that we can create the data dir
if not os.access(sickbeard.DATA_DIR, os.F_OK):
try:
os.makedirs(sickbeard.DATA_DIR, 0o744)
except os.error:
sys.exit(u'Unable to create data directory: %s Exiting.' % sickbeard.DATA_DIR)
# Make sure we can write to the data dir
if not os.access(sickbeard.DATA_DIR, os.W_OK):
sys.exit(u'Data directory: %s must be writable (write permissions). Exiting.' % sickbeard.DATA_DIR)
# Make sure we can write to the config file
if not os.access(sickbeard.CONFIG_FILE, os.W_OK):
if os.path.isfile(sickbeard.CONFIG_FILE):
sys.exit(u'Config file: %s must be writeable (write permissions). Exiting.' % sickbeard.CONFIG_FILE)
elif not os.access(os.path.dirname(sickbeard.CONFIG_FILE), os.W_OK):
sys.exit(u'Config file directory: %s must be writeable (write permissions). Exiting'
% os.path.dirname(sickbeard.CONFIG_FILE))
os.chdir(sickbeard.DATA_DIR)
if self.consoleLogging:
print(u'Starting up SickGear from %s' % sickbeard.CONFIG_FILE)
# Load the config and publish it to the sickbeard package
if not os.path.isfile(sickbeard.CONFIG_FILE):
print(u'Unable to find "%s", all settings will be default!' % sickbeard.CONFIG_FILE)
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
# check all db versions
for d, min_v, max_v, mo in [
('failed.db', sickbeard.failed_db.MIN_DB_VERSION, sickbeard.failed_db.MAX_DB_VERSION, 'FailedDb'),
('cache.db', sickbeard.cache_db.MIN_DB_VERSION, sickbeard.cache_db.MAX_DB_VERSION, 'CacheDb'),
('sickbeard.db', sickbeard.mainDB.MIN_DB_VERSION, sickbeard.mainDB.MAX_DB_VERSION, 'MainDb')
]:
cur_db_version = db.DBConnection(d).checkDBVersion()
if cur_db_version > 0:
if cur_db_version < min_v:
print(u'Your [%s] database version (%s) is too old to migrate from with this version of SickGear'
% (d, cur_db_version))
sys.exit(u'Upgrade using a previous version of SG first,'
+ u' or start with no database file to begin fresh')
if cur_db_version > max_v:
print(u'Your [%s] database version (%s) has been incremented past'
u' what this version of SickGear supports. Trying to rollback now. Please wait...' %
(d, cur_db_version))
try:
rollback_loaded = db.get_rollback_module()
if None is not rollback_loaded:
rollback_loaded.__dict__[mo]().run(max_v)
else:
print(u'ERROR: Could not download Rollback Module.')
except (StandardError, Exception):
pass
if db.DBConnection(d).checkDBVersion() > max_v:
print(u'Rollback failed.')
sys.exit(u'If you have used other forks, your database may be unusable due to their changes')
print(u'Rollback of [%s] successful.' % d)
# Initialize the config and our threads
sickbeard.initialize(consoleLogging=self.consoleLogging)
if self.runAsDaemon:
self.daemonize()
# Get PID
sickbeard.PID = os.getpid()
if self.forcedPort:
logger.log(u'Forcing web server to port %s' % self.forcedPort)
self.startPort = self.forcedPort
else:
self.startPort = sickbeard.WEB_PORT
if sickbeard.WEB_LOG:
self.log_dir = sickbeard.LOG_DIR
else:
self.log_dir = None
# sickbeard.WEB_HOST is available as a configuration value in various
# places but is not configurable. It is supported here for historic reasons.
if sickbeard.WEB_HOST and sickbeard.WEB_HOST != '0.0.0.0':
self.webhost = sickbeard.WEB_HOST
else:
if sickbeard.WEB_IPV6:
self.webhost = '::'
else:
self.webhost = '0.0.0.0'
# web server options
self.web_options = {
'port': int(self.startPort),
'host': self.webhost,
'data_root': os.path.join(sickbeard.PROG_DIR, 'gui', sickbeard.GUI_NAME),
'web_root': sickbeard.WEB_ROOT,
'log_dir': self.log_dir,
'username': sickbeard.WEB_USERNAME,
'password': sickbeard.WEB_PASSWORD,
'enable_https': sickbeard.ENABLE_HTTPS,
'handle_reverse_proxy': sickbeard.HANDLE_REVERSE_PROXY,
'https_cert': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_CERT),
'https_key': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_KEY),
}
# start web server
try:
# used to check if existing SG instances have been started
sickbeard.helpers.wait_for_free_port(self.web_options['host'], self.web_options['port'])
self.webserver = WebServer(self.web_options)
self.webserver.start()
except Exception:
logger.log(u'Unable to start web server, is something else running on port %d?' % self.startPort,
logger.ERROR)
if sickbeard.LAUNCH_BROWSER and not self.runAsDaemon:
logger.log(u'Launching browser and exiting', logger.ERROR)
sickbeard.launch_browser(self.startPort)
os._exit(1)
# Check if we need to perform a restore first
restoreDir = os.path.join(sickbeard.DATA_DIR, 'restore')
if os.path.exists(restoreDir):
if self.restore(restoreDir, sickbeard.DATA_DIR):
logger.log(u'Restore successful...')
else:
logger.log_error_and_exit(u'Restore FAILED!')
# Build from the DB to start with
self.loadShowsFromDB()
# Fire up all our threads
sickbeard.start()
# Build internal name cache
name_cache.buildNameCache()
# refresh network timezones
network_timezones.update_network_dict()
# load all ids from xem
startup_background_tasks = threading.Thread(name='FETCH-XEMDATA', target=sickbeard.scene_exceptions.get_xem_ids)
startup_background_tasks.start()
# sure, why not?
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.trimHistory()
# Start an update if we're supposed to
if self.forceUpdate or sickbeard.UPDATE_SHOWS_ON_START:
sickbeard.showUpdateScheduler.action.run(force=True) # @UndefinedVariable
# Launch browser
if sickbeard.LAUNCH_BROWSER and not (self.noLaunch or self.runAsDaemon):
sickbeard.launch_browser(self.startPort)
# main loop
while True:
time.sleep(1)
0
Example 91
Project: pipeline Source File: ROSE2_META.py
def main():
'''
main run call
'''
debug = False
from optparse import OptionParser
usage = "usage: %prog [options] -g [GENOME] -i [INPUT_REGION_GFF] -r [RANKBY_BAM_FILE] -o [OUTPUT_FOLDER] [OPTIONAL_FLAGS]"
parser = OptionParser(usage=usage)
# required flags
parser.add_option("-i", "--i", dest="input", nargs=1, default=None,
help="Enter a comma separated list of .gff or .bed file of binding sites used to make enhancers")
parser.add_option("-r", "--rankby", dest="rankby", nargs=1, default=None,
help="Enter a comma separated list of bams to rank by")
parser.add_option("-o", "--out", dest="out", nargs=1, default=None,
help="Enter an output folder")
parser.add_option("-g", "--genome", dest="genome", nargs=1, default=None,
help="Enter the genome build (MM9,MM8,HG18,HG19)")
# optional flags
parser.add_option("-n", "--name", dest="name", nargs=1, default=None,
help="Provide a name for the analysis otherwise ROSE will guess")
parser.add_option("-c", "--control", dest="control", nargs=1, default=None,
help="Enter a comma separated list of control bams. Can either provide a single control bam for all rankby bams, or provide a control bam for each individual bam")
parser.add_option("-s", "--stitch", dest="stitch", nargs=1, default='',
help="Enter a max linking distance for stitching. Default will determine optimal stitching parameter")
parser.add_option("-t", "--tss", dest="tss", nargs=1, default=0,
help="Enter a distance from TSS to exclude. 0 = no TSS exclusion")
parser.add_option("--mask", dest="mask", nargs=1, default=None,
help="Mask a set of regions from analysis. Provide a .bed or .gff of masking regions")
# RETRIEVING FLAGS
(options, args) = parser.parse_args()
if not options.input or not options.rankby or not options.out or not options.genome:
print('hi there')
parser.print_help()
exit()
# making the out folder if it doesn't exist
outFolder = utils.formatFolder(options.out, True)
# figuring out folder schema
gffFolder = utils.formatFolder(outFolder + 'gff/', True)
mappedFolder = utils.formatFolder(outFolder + 'mappedGFF/', True)
# GETTING INPUT FILE(s)
inputList = [inputFile for inputFile in options.input.split(',') if len(inputFile) > 1]
#converting all input files into GFFs and moving into the GFF folder
inputGFFList = []
for inputFile in inputList:
if inputFile.split('.')[-1] == 'bed':
# CONVERTING A BED TO GFF
inputGFFName = inputFile.split('/')[-1][0:-4] #strips the last 4 characters i.e. '.bed'
inputGFFFile = '%s%s.gff' % (gffFolder, inputGFFName)
utils.bedToGFF(inputFile, inputGFFFile)
elif options.input.split('.')[-1] == 'gff':
# COPY THE INPUT GFF TO THE GFF FOLDER
os.system('cp %s %s' % (inputFile, gffFolder))
inputGFFFile = '%s%s' % (gffFolder,inputFile.split('/')[-1])
else:
print('WARNING: INPUT FILE DOES NOT END IN .gff or .bed. ASSUMING .gff FILE FORMAT')
# COPY THE INPUT GFF TO THE GFF FOLDER
os.system('cp %s %s' % (inputFile, gffFolder))
inputGFFFile = '%s%s' % (gffFolder,inputFile.split('/')[-1])
inputGFFList.append(inputGFFFile)
# GETTING THE LIST OF BAMFILES TO PROCESS
#either same number of bams for rankby and control
#or only 1 control #or none!
#bamlist should be all rankby bams followed by control bams
bamFileList = []
if options.control:
controlBamList = [bam for bam in options.control.split(',') if len(bam) >0]
rankbyBamList = [bam for bam in options.rankby.split(',') if len(bam) >0]
if len(controlBamList) == len(rankbyBamList):
#case where an equal number of backgrounds are given
bamFileList = rankbyBamList + controlBamList
elif len(controlBamList) == 1:
#case where a universal background is applied
bamFileList = rankbyBamList + controlBamList*len(rankbyBamList)
else:
print('ERROR: EITHER PROVIDE A SINGLE CONTROL BAM FOR ALL SAMPLES, OR ONE CONTROL BAM FOR EACH SAMPLE')
sys.exit()
else:
bamFileList = [bam for bam in options.rankby.split(',') if len(bam) > 0]
# Stitch parameter
if options.stitch == '':
stitchWindow = ''
else:
stitchWindow = int(options.stitch)
# tss options
tssWindow = int(options.tss)
if tssWindow != 0:
removeTSS = True
else:
removeTSS = False
# GETTING THE GENOME
genome = string.upper(options.genome)
print('USING %s AS THE GENOME' % (genome))
# GETTING THE CORRECT ANNOT FILE
genomeDict = {
'HG18': '%s/annotation/hg18_refseq.ucsc' % (codeFolder),
'MM9': '%s/annotation/mm9_refseq.ucsc' % (codeFolder),
'HG19': '%s/annotation/hg19_refseq.ucsc' % (codeFolder),
'MM8': '%s/annotation/mm8_refseq.ucsc' % (codeFolder),
'MM10': '%s/annotation/mm10_refseq.ucsc' % (codeFolder),
'RN4': '%s/annotation/rn4_refseq.ucsc' % (codeFolder),
}
try:
annotFile = genomeDict[genome.upper()]
except KeyError:
print('ERROR: UNSUPPORTED GENOMES TYPE %s' % (genome))
sys.exit()
#FINDING THE ANALYSIS NAME
if options.name:
inputName = options.name
else:
inputName = inputGFFList[0].split('/')[-1].split('.')[0]
print('USING %s AS THE ANALYSIS NAME' % (inputName))
print('FORMATTING INPUT REGIONS')
# MAKING THE RAW INPUT FILE FROM THE INPUT GFFs
#use a simpler unique region naming system
if len(inputGFFList) == 1:
inputGFF = utils.parseTable(inputGFFList[0],'\t')
else:
inputLoci = []
for gffFile in inputGFFList:
print('\tprocessing %s' % (gffFile))
gff = utils.parseTable(gffFile,'\t')
gffCollection = utils.gffToLocusCollection(gff,50)
inputLoci += gffCollection.getLoci()
inputCollection = utils.LocusCollection(inputLoci,50)
inputCollection = inputCollection.stitchCollection() # stitches to produce unique regions
inputGFF = utils.locusCollectionToGFF(inputCollection)
formattedGFF = []
#now number things appropriately
for i,line in enumerate(inputGFF):
#use the coordinates to make a new id inputname_chr_sense_start_stop
chrom = line[0]
coords = [int(line[3]) ,int(line[4])]
sense = line[6]
lineID = '%s_%s' % (inputName,str(i+1)) #1 indexing
newLine = [chrom,lineID,lineID,min(coords),max(coords),'',sense,'',lineID]
formattedGFF.append(newLine)
#name of the master input gff file
masterGFFFile = '%s%s_%s_ALL_-0_+0.gff' % (gffFolder,string.upper(genome),inputName)
utils.unParseTable(formattedGFF,masterGFFFile,'\t')
print('USING %s AS THE INPUT GFF' % (masterGFFFile))
# MAKING THE START DICT
print('MAKING START DICT')
startDict = utils.makeStartDict(annotFile)
#GET CHROMS FOUND IN THE BAMS
print('GETTING CHROMS IN BAMFILES')
bamChromList = getBamChromList(bamFileList)
print("USING THE FOLLOWING CHROMS")
print(bamChromList)
#LOADING IN THE GFF AND FILTERING BY CHROM
print('LOADING AND FILTERING THE GFF')
inputGFF = filterGFF(masterGFFFile,bamChromList)
# LOADING IN THE BOUND REGION REFERENCE COLLECTION
print('LOADING IN GFF REGIONS')
referenceCollection = utils.gffToLocusCollection(inputGFF)
print('CHECKING REFERENCE COLLECTION:')
checkRefCollection(referenceCollection)
# MASKING REFERENCE COLLECTION
# see if there's a mask
if options.mask:
maskFile = options.mask
# if it's a bed file
if maskFile.split('.')[-1].upper() == 'BED':
maskGFF = utils.bedToGFF(maskFile)
elif maskFile.split('.')[-1].upper() == 'GFF':
maskGFF = utils.parseTable(maskFile, '\t')
else:
print("MASK MUST BE A .gff or .bed FILE")
sys.exit()
maskCollection = utils.gffToLocusCollection(maskGFF)
# now mask the reference loci
referenceLoci = referenceCollection.getLoci()
filteredLoci = [locus for locus in referenceLoci if len(maskCollection.getOverlap(locus, 'both')) == 0]
print("FILTERED OUT %s LOCI THAT WERE MASKED IN %s" % (len(referenceLoci) - len(filteredLoci), maskFile))
referenceCollection = utils.LocusCollection(filteredLoci, 50)
# NOW STITCH REGIONS
print('STITCHING REGIONS TOGETHER')
stitchedCollection, debugOutput, stitchWindow = regionStitching(referenceCollection, inputName, outFolder, stitchWindow, tssWindow, annotFile, removeTSS)
# NOW MAKE A STITCHED COLLECTION GFF
print('MAKING GFF FROM STITCHED COLLECTION')
stitchedGFF = utils.locusCollectionToGFF(stitchedCollection)
print(stitchWindow)
print(type(stitchWindow))
if not removeTSS:
stitchedGFFFile = '%s%s_%sKB_STITCHED.gff' % (gffFolder, inputName, str(stitchWindow / 1000))
stitchedGFFName = '%s_%sKB_STITCHED' % (inputName, str(stitchWindow / 1000))
debugOutFile = '%s%s_%sKB_STITCHED.debug' % (gffFolder, inputName, str(stitchWindow / 1000))
else:
stitchedGFFFile = '%s%s_%sKB_STITCHED_TSS_DISTAL.gff' % (gffFolder, inputName, str(stitchWindow / 1000))
stitchedGFFName = '%s_%sKB_STITCHED_TSS_DISTAL' % (inputName, str(stitchWindow / 1000))
debugOutFile = '%s%s_%sKB_STITCHED_TSS_DISTAL.debug' % (gffFolder, inputName, str(stitchWindow / 1000))
# WRITING DEBUG OUTPUT TO DISK
if debug:
print('WRITING DEBUG OUTPUT TO DISK AS %s' % (debugOutFile))
utils.unParseTable(debugOutput, debugOutFile, '\t')
# WRITE THE GFF TO DISK
print('WRITING STITCHED GFF TO DISK AS %s' % (stitchedGFFFile))
utils.unParseTable(stitchedGFF, stitchedGFFFile, '\t')
# SETTING UP THE OVERALL OUTPUT FILE
outputFile1 = outFolder + stitchedGFFName + '_ENHANCER_REGION_MAP.txt'
print('OUTPUT WILL BE WRITTEN TO %s' % (outputFile1))
# MAPPING TO THE NON STITCHED (ORIGINAL GFF)
# MAPPING TO THE STITCHED GFF
# Try to use the bamliquidatior_path.py script on cluster, otherwise, failover to local (in path), otherwise fail.
bamFileListUnique = list(bamFileList)
bamFileListUnique = utils.uniquify(bamFileListUnique)
#prevent redundant mapping
print("MAPPING TO THE FOLLOWING BAMS:")
print(bamFileListUnique)
for bamFile in bamFileListUnique:
bamFileName = bamFile.split('/')[-1]
# MAPPING TO THE STITCHED GFF
mappedOut1Folder = '%s%s_%s_MAPPED' % (mappedFolder, stitchedGFFName, bamFileName)
mappedOut1File = '%s%s_%s_MAPPED/matrix.txt' % (mappedFolder, stitchedGFFName, bamFileName)
if utils.checkOutput(mappedOut1File, 0.2, 0.2):
print("FOUND %s MAPPING DATA FOR BAM: %s" % (stitchedGFFFile, mappedOut1File))
else:
cmd1 = bamliquidator_path + " --sense . -e 200 --match_bamToGFF -r %s -o %s %s" % (stitchedGFFFile, mappedOut1Folder, bamFile)
print(cmd1)
os.system(cmd1)
if utils.checkOutput(mappedOut1File,0.2,5):
print("SUCCESSFULLY MAPPED TO %s FROM BAM: %s" % (stitchedGFFFile, bamFileName))
else:
print("ERROR: FAILED TO MAP %s FROM BAM: %s" % (stitchedGFFFile, bamFileName))
sys.exit()
print('BAM MAPPING COMPLETED NOW MAPPING DATA TO REGIONS')
# CALCULATE DENSITY BY REGION
# NEED TO FIX THIS FUNCTION TO ACCOUNT FOR DIFFERENT OUTPUTS OF LIQUIDATOR
mapCollection(stitchedCollection, referenceCollection, bamFileList, mappedFolder, outputFile1, refName=stitchedGFFName)
print('FINDING AVERAGE SIGNAL AMONGST BAMS')
metaOutputFile = collapseRegionMap(outputFile1,inputName + '_MERGED_SIGNAL',controlBams=options.control)
#now try the merging
print('CALLING AND PLOTTING SUPER-ENHANCERS')
rankbyName = inputName + '_MERGED_SIGNAL'
controlName = 'NONE'
cmd = 'R --no-save %s %s %s %s < %sROSE2_callSuper.R' % (outFolder, metaOutputFile, inputName, controlName,codeFolder)
print(cmd)
os.system(cmd)
# calling the gene mapper
time.sleep(20)
superTableFile = "%s_SuperEnhancers.table.txt" % (inputName)
#for now don't use ranking bam to call top genes
cmd = "python %sROSE2_geneMapper.py -g %s -i %s%s &" % (codeFolder,genome, outFolder, superTableFile)
os.system(cmd)
stretchTableFile = "%s_StretchEnhancers.table.txt" % (inputName)
cmd = "python %sROSE2_geneMapper.py -g %s -i %s%s &" % (codeFolder,genome, outFolder, stretchTableFile)
os.system(cmd)
superStretchTableFile = "%s_SuperStretchEnhancers.table.txt" % (inputName)
cmd = "python %sROSE2_geneMapper.py -g %s -i %s%s &" % (codeFolder,genome, outFolder, superStretchTableFile)
os.system(cmd)
0
Example 92
Project: tp-libvirt Source File: virsh_setmem.py
def run(test, params, env):
"""
Test command: virsh setmem.
1) Prepare vm environment.
2) Handle params
3) Prepare libvirtd status.
4) Run test command and wait for current memory's stable.
5) Recover environment.
4) Check result.
"""
def vm_usable_mem(session):
"""
Get total usable RAM from /proc/meminfo
"""
cmd = "cat /proc/meminfo"
proc_mem = session.cmd_output(cmd)
total_usable_mem = re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
proc_mem).group(1)
return int(total_usable_mem)
def vm_unusable_mem(session):
"""
Get the unusable RAM of the VM.
"""
# Get total physical memory from dmidecode
cmd = "dmidecode -t 17"
dmi_mem = session.cmd_output(cmd)
total_physical_mem = reduce(lambda x, y: int(x) + int(y),
re.findall(r'Size:\s(\d+)\sMB', dmi_mem))
return int(total_physical_mem) * 1024 - vm_usable_mem(session)
def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
"""
Create domain options of command
"""
# Specify domain as argument or parameter
if domarg == "yes":
dom_darg_key = "domainarg"
else:
dom_darg_key = "domain"
# How to reference domain
if vm_ref == "domid":
dom_darg_value = domid
elif vm_ref == "domname":
dom_darg_value = vm_name
elif vm_ref == "domuuid":
dom_darg_value = domuuid
elif vm_ref == "none":
dom_darg_value = None
elif vm_ref == "emptystring":
dom_darg_value = '""'
else: # stick in value directly
dom_darg_value = vm_ref
return {dom_darg_key: dom_darg_value}
def make_sizeref(sizearg, mem_ref, original_mem):
"""
Create size options of command
"""
if sizearg == "yes":
size_darg_key = "sizearg"
else:
size_darg_key = "size"
if mem_ref == "halfless":
size_darg_value = "%d" % (original_mem / 2)
elif mem_ref == "halfmore":
size_darg_value = "%d" % int(original_mem * 1.5) # no fraction
elif mem_ref == "same":
size_darg_value = "%d" % original_mem
elif mem_ref == "emptystring":
size_darg_value = '""'
elif mem_ref == "zero":
size_darg_value = "0"
elif mem_ref == "toosmall":
size_darg_value = "1024"
elif mem_ref == "toobig":
size_darg_value = "1099511627776" # (KiB) One Petabyte
elif mem_ref == "none":
size_darg_value = None
else: # stick in value directly
size_darg_value = mem_ref
return {size_darg_key: size_darg_value}
def cal_deviation(actual, expected):
"""
Calculate deviation of actual result and expected result
"""
numerator = float(actual)
denominator = float(expected)
if numerator > denominator:
numerator = denominator
denominator = float(actual)
return 100 - (100 * (numerator / denominator))
def is_old_libvirt():
"""
Check if libvirt is old version
"""
regex = r'\s+\[--size\]\s+'
return bool(not virsh.has_command_help_match('setmem', regex))
def print_debug_stats(original_inside_mem, original_outside_mem,
test_inside_mem, test_outside_mem,
expected_outside_mem, expected_inside_mem,
delta_percentage, unusable_mem):
"""
Print debug message for test
"""
# Calculate deviation
inside_deviation = cal_deviation(test_inside_mem, expected_inside_mem)
outside_deviation = cal_deviation(test_outside_mem, expected_outside_mem)
dbgmsg = ("Unusable memory of VM : %d KiB\n"
"Original inside memory : %d KiB\n"
"Expected inside memory : %d KiB\n"
"Actual inside memory : %d KiB\n"
"Inside memory deviation : %0.2f%%\n"
"Original outside memory : %d KiB\n"
"Expected outside memory : %d KiB\n"
"Actual outside memory : %d KiB\n"
"Outside memory deviation: %0.2f%%\n"
"Acceptable deviation : %0.2f%%" % (
unusable_mem,
original_inside_mem,
expected_inside_mem,
test_inside_mem,
inside_deviation,
original_outside_mem,
expected_outside_mem,
test_outside_mem,
outside_deviation,
delta_percentage))
for dbgline in dbgmsg.splitlines():
logging.debug(dbgline)
# MAIN TEST CODE ###
# Process cartesian parameters
vm_ref = params.get("setmem_vm_ref", "")
mem_ref = params.get("setmem_mem_ref", "")
flags = params.get("setmem_flags", "")
status_error = "yes" == params.get("status_error", "no")
old_libvirt_fail = "yes" == params.get("setmem_old_libvirt_fail", "no")
quiesce_delay = int(params.get("setmem_quiesce_delay", "1"))
domarg = params.get("setmem_domarg", "no")
sizearg = params.get("setmem_sizearg", "no")
libvirt = params.get("libvirt", "on")
delta_percentage = float(params.get("setmem_delta_per", "10"))
start_vm = "yes" == params.get("start_vm", "yes")
vm_name = params.get("main_vm", "avocado-vt-vm1")
paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
manipulate_dom_before_setmem = "yes" == params.get(
"manipulate_dom_before_setmem", "no")
manipulate_dom_after_setmem = "yes" == params.get(
"manipulate_dom_after_setmem", "no")
manipulate_action = params.get("manipulate_action", "")
vm = env.get_vm(vm_name)
# Back up domain XML
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
backup_xml = vmxml.copy()
vmosxml = vmxml.os
need_mkswap = False
if manipulate_action in ['s3', 's4']:
vm.destroy()
BIOS_BIN = "/usr/share/seabios/bios.bin"
if os.path.isfile(BIOS_BIN):
vmosxml.loader = BIOS_BIN
vmxml.os = vmosxml
vmxml.sync()
else:
logging.error("Not find %s on host", BIOS_BIN)
vmxml.set_pm_suspend(vm_name, "yes", "yes")
vm.prepare_guest_agent()
if manipulate_action == "s4":
need_mkswap = not vm.has_swap()
if need_mkswap:
logging.debug("Creating swap partition")
vm.create_swap_partition()
memballoon_model = params.get("memballoon_model", "")
if memballoon_model:
vm.destroy()
vmxml.del_device('memballoon', by_tag=True)
memballoon_xml = vmxml.get_device_class('memballoon')()
memballoon_xml.model = memballoon_model
vmxml.add_device(memballoon_xml)
logging.info(memballoon_xml)
vmxml.sync()
vm.start()
remove_balloon_driver = "yes" == params.get("remove_balloon_driver", "no")
if remove_balloon_driver:
if not vm.is_alive():
logging.error("Can't remove module as guest not running")
else:
session = vm.wait_for_login()
cmd = "rmmod virtio_balloon"
s_rmmod, o_rmmod = session.cmd_status_output(cmd)
if s_rmmod != 0:
logging.error("Fail to remove module virtio_balloon in guest:\n%s",
o_rmmod)
session.close()
# Get original data
domid = vm.get_id()
domuuid = vm.get_uuid()
uri = vm.connect_uri
if not vm.is_alive():
vm.start()
session = vm.wait_for_login()
if session.cmd_status('dmidecode'):
# The physical memory size is in vm xml, use it when dmideode not
# supported
unusable_mem = int(vmxml.max_mem) - vm_usable_mem(session)
else:
unusable_mem = vm_unusable_mem(session)
original_outside_mem = vm.get_used_mem()
original_inside_mem = vm_usable_mem(session)
session.close()
# Prepare VM state
if not start_vm:
vm.destroy()
else:
if paused_after_start_vm:
vm.pause()
old_libvirt = is_old_libvirt()
if old_libvirt:
logging.info("Running test on older libvirt")
use_kilobytes = True
else:
logging.info("Running test on newer libvirt")
use_kilobytes = False
# Argument pattern is complex, build with dargs
dargs = {'flagstr': flags,
'use_kilobytes': use_kilobytes,
'uri': uri, 'ignore_status': True, "debug": True}
dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem))
# Prepare libvirtd status
libvirtd = utils_libvirtd.Libvirtd()
if libvirt == "off":
libvirtd.stop()
else:
if not libvirtd.is_running():
libvirtd.start()
if status_error or (old_libvirt_fail & old_libvirt):
logging.info("Error Test: Expecting an error to occur!")
try:
memory_change = True
if manipulate_dom_before_setmem:
manipulate_domain(vm_name, manipulate_action)
if manipulate_action in ['save', 'managedsave', 's4']:
memory_change = False
result = virsh.setmem(**dargs)
status = result.exit_status
if status is 0:
logging.info(
"Waiting %d seconds for VM memory to settle", quiesce_delay)
# It takes time for kernel to settle on new memory
# and current clean pages is not predictable. Therefor,
# extremely difficult to determine quiescence, so
# sleep one second per error percent is reasonable option.
time.sleep(quiesce_delay)
if manipulate_dom_before_setmem:
manipulate_domain(vm_name, manipulate_action, True)
if manipulate_dom_after_setmem:
manipulate_domain(vm_name, manipulate_action)
manipulate_domain(vm_name, manipulate_action, True)
# Recover libvirtd status
if libvirt == "off":
libvirtd.start()
# Gather stats if not running error test
if not status_error and not old_libvirt_fail:
if not memory_change:
test_inside_mem = original_inside_mem
test_outside_mem = original_outside_mem
else:
if vm.state() == "shut off":
vm.start()
# Make sure it's never paused
vm.resume()
session = vm.wait_for_login()
# Actual results
test_inside_mem = vm_usable_mem(session)
session.close()
test_outside_mem = vm.get_used_mem()
# Expected results for both inside and outside
if remove_balloon_driver:
expected_mem = original_outside_mem
else:
if not memory_change:
expected_mem = original_inside_mem
elif sizearg == "yes":
expected_mem = int(dargs["sizearg"])
else:
expected_mem = int(dargs["size"])
if memory_change:
# Should minus unusable memory for inside memory check
expected_inside_mem = expected_mem - unusable_mem
expected_outside_mem = expected_mem
else:
expected_inside_mem = expected_mem
expected_outside_mem = original_outside_mem
print_debug_stats(original_inside_mem, original_outside_mem,
test_inside_mem, test_outside_mem,
expected_outside_mem, expected_inside_mem,
delta_percentage, unusable_mem)
# Don't care about memory comparison on error test
outside_pass = cal_deviation(test_outside_mem,
expected_outside_mem) <= delta_percentage
inside_pass = cal_deviation(test_inside_mem,
expected_inside_mem) <= delta_percentage
if status is not 0 or not outside_pass or not inside_pass:
msg = "test conditions not met: "
if status is not 0:
msg += "Non-zero virsh setmem exit code. "
if not outside_pass:
msg += "Outside memory deviated. "
if not inside_pass:
msg += "Inside memory deviated. "
raise error.TestFail(msg)
return # Normal test passed
elif not status_error and old_libvirt_fail:
if status is 0:
if old_libvirt:
raise error.TestFail("Error test did not result in an error")
else:
if not old_libvirt:
raise error.TestFail("Newer libvirt failed when it should not")
else: # Verify an error test resulted in error
if status is 0:
raise error.TestFail("Error test did not result in an error")
finally:
if need_mkswap:
vm.cleanup_swap()
vm.destroy()
backup_xml.sync()
0
Example 93
Project: crypto Source File: decryptoapp.py
def main():
import os
import sys
from time import sleep
import getpass
import tarfile
from Naked.commandline import Command
from Naked.toolshed.shell import execute, muterun
from Naked.toolshed.system import dir_exists, file_exists, list_all_files, make_path, stdout, stderr, is_dir
from shellescape import quote
# ------------------------------------------------------------------------------------------
# [ Instantiate command line object ]
# used for all subsequent conditional logic in the CLI application
# ------------------------------------------------------------------------------------------
c = Command(sys.argv[0], sys.argv[1:])
# ------------------------------------------------------------------------------------------
# [ VALIDATION LOGIC ] - early validation of appropriate command syntax
# Test that user entered at least one argument to the executable, print usage if not
# ------------------------------------------------------------------------------------------
if not c.command_suite_validates():
from crypto.settings import usage as crypto_usage
print(crypto_usage)
sys.exit(1)
# ------------------------------------------------------------------------------------------
# [ HELP, VERSION, USAGE LOGIC ]
# Naked framework provides default help, usage, and version commands for all applications
# --> settings for user messages are assigned in the lib/crypto/settings.py file
# ------------------------------------------------------------------------------------------
if c.help(): # User requested crypto help information
from crypto.settings import help as crypto_help
print(crypto_help)
sys.exit(0)
elif c.usage(): # User requested crypto usage information
from crypto.settings import usage as crypto_usage
print(crypto_usage)
sys.exit(0)
elif c.version(): # User requested crypto version information
from crypto.settings import app_name, major_version, minor_version, patch_version
version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version
print(version_display_string)
sys.exit(0)
# ------------------------------------------------------------------------------------------
# [ APPLICATION LOGIC ]
#
# ------------------------------------------------------------------------------------------
elif c.argc > 1:
# code for multi-file processing and commands that include options
use_standard_output = False # print to stdout flag
use_file_overwrite = False # overwrite existing file
untar_archives = True # untar decrypted tar archives, true by default
# set user option flags
if c.option('--stdout') or c.option('-s'):
use_standard_output = True
if c.option('--overwrite') or c.option('-o'):
use_file_overwrite = True
if c.option('--nountar'):
untar_archives = False
directory_list = [] # directory paths included in the user entered paths from the command line
file_list = [] # file paths included in the user entered paths from the command line (and inside directories entered)
for argument in c.argv:
if file_exists(argument): # user included a file, add it to the file_list for decryption
if argument.endswith('.crypt'):
file_list.append(argument) # add .crypt files to the list of files for decryption
elif argument.endswith('.gpg'):
file_list.append(argument)
elif argument.endswith('.asc'):
file_list.append(argument)
elif argument.endswith('.pgp'):
file_list.append(argument)
else:
# cannot identify as an encrypted file, give it a shot anyways but warn user
file_list.append(argument)
stdout("Could not confirm that '" + argument + "' is encrypted based upon the file type. Attempting decryption. Keep your fingers crossed...")
elif dir_exists(argument): # user included a directory, add it to the directory_list
directory_list.append(argument)
else:
if argument[0] == "-":
pass # if it is an option, do nothing
else:
stderr("'" + argument + "' does not appear to be an existing file or directory. Aborting decryption attempt for this request.")
# unroll the contained directory files into the file_list IF they are encrypted file types
if len(directory_list) > 0:
for directory in directory_list:
directory_file_list = list_all_files(directory)
for contained_file in directory_file_list:
if contained_file.endswith('.crypt'):
file_list.append(make_path(directory, contained_file)) # include the file with a filepath 'directory path/contained_file path'
elif contained_file.endswith('.gpg'):
file_list.append(make_path(directory, contained_file))
elif contained_file.endswith('asc'):
file_list.append(make_path(directory, contained_file))
elif contained_file.endswith('.pgp'):
file_list.append(make_path(directory, contained_file))
# confirm that there are files for decryption, if not abort
if len(file_list) == 0:
stderr("Could not identify files for decryption")
sys.exit(1)
# get passphrase used to symmetrically decrypt the file
passphrase = getpass.getpass("Please enter your passphrase: ")
if len(passphrase) == 0: # confirm that user entered a passphrase
stderr("You did not enter a passphrase. Please repeat your command and try again.")
sys.exit(1)
passphrase_confirm = getpass.getpass("Please enter your passphrase again: ")
if passphrase == passphrase_confirm:
# begin decryption of each requested file. the directory path was already added to the file path above
for encrypted_file in file_list:
# create the decrypted file name
decrypted_filename = ""
if encrypted_file.endswith('.crypt'):
decrypted_filename = encrypted_file[0:-6]
elif encrypted_file.endswith('.gpg') or encrypted_file.endswith('.asc') or encrypted_file.endswith('.pgp'):
decrypted_filename = encrypted_file[0:-4]
else:
decrypted_filename = encrypted_file + '.decrypt' # if it was a file without a known encrypted file type, add the .decrypt suffix
# determine whether file overwrite will take place with the decrypted file
skip_file = False # flag that indicates this file should not be encrypted
created_tmp_files = False
if not use_standard_output: # if not writing a file, no need to check for overwrite
if file_exists(decrypted_filename):
if use_file_overwrite: # rename the existing file to temp file which will be erased or replaced (on decryption failures) below
tmp_filename = decrypted_filename + '.tmp'
os.rename(decrypted_filename, tmp_filename)
created_tmp_files = True
else:
stdout("The file path '" + decrypted_filename + "' already exists. This file was not decrypted.")
skip_file = True
# begin decryption
if not skip_file:
if use_standard_output: # using --quiet flag to suppress stdout messages from gpg, just want the file data in stdout stream
system_command = "gpg --batch --quiet --passphrase " + quote(passphrase) + " -d " + quote(encrypted_file)
successful_execution = execute(system_command) # use naked execute function to directly push to stdout, rather than return stdout
if not successful_execution:
stderr("Unable to decrypt file '" + encrypted_file + "'", 0)
if created_tmp_files: # restore the moved tmp file to original if decrypt failed
tmp_filename = decrypted_filename + '.tmp'
if file_exists(tmp_filename):
os.rename(tmp_filename, decrypted_filename)
else: # decryption successful but we are in stdout flag so do not include any other output from decrypto
pass
else:
system_command = "gpg --batch -o " + quote(decrypted_filename) + " --passphrase " + quote(passphrase) + " -d " + quote(encrypted_file)
response = muterun(system_command)
if response.exitcode == 0:
stdout("'" + encrypted_file + "' decrypted to '" + decrypted_filename + "'")
else: # failed decryption
if created_tmp_files: # restore the moved tmp file to original if decrypt failed
tmp_filename = decrypted_filename + '.tmp'
if file_exists(tmp_filename):
os.rename(tmp_filename, decrypted_filename)
# report the error
stderr(response.stderr)
stderr("Decryption failed for " + encrypted_file)
# cleanup: remove the tmp file
if created_tmp_files:
tmp_filename = decrypted_filename + '.tmp'
if file_exists(tmp_filename):
os.remove(tmp_filename)
# untar/extract any detected archive file(s)
if untar_archives is True:
if decrypted_filename.endswith('.tar') and tarfile.is_tarfile(decrypted_filename):
untar_path_tuple = os.path.split(decrypted_filename)
untar_path = untar_path_tuple[0]
if use_file_overwrite:
with tarfile.open(decrypted_filename) as tar:
if len(untar_path) > 0:
tar.extractall(path=untar_path) # use dir path from the decrypted_filename if not CWD
stdout("'" + decrypted_filename + "' unpacked in the directory path '" + untar_path + "'")
else:
tar.extractall() # else use CWD
stdout("'" + decrypted_filename + "' unpacked in the current working directory")
else:
with tarfile.TarFile(decrypted_filename, 'r', errorlevel=1) as tar:
for tarinfo in tar:
t_file = tarinfo.name
if len(untar_path) > 0:
t_file_path = os.path.join(untar_path, t_file)
else:
t_file_path = t_file
if not os.path.exists(t_file_path):
try:
if len(untar_path) > 0:
tar.extract(t_file, path=untar_path) # write to the appropriate dir
else:
tar.extract(t_file) # write to CWD
except IOError as e:
stderr(
"Failed to unpack the file '" + t_file_path + "' [" + str(
e) + "]")
elif is_dir(t_file_path):
pass # do nothing if it exists and is a directory, no need to warn
else: # it is a file and it already exists, provide user error message
stderr(
"Failed to unpack the file '" + t_file_path + "'. File already exists. Use the --overwrite flag to replace existing files.")
# remove the decrypted tar archive file
os.remove(decrypted_filename)
# overwrite the entered passphrases after file decryption is complete for all files
passphrase = ""
passphrase_confirm = ""
# add a short pause to hinder brute force pexpect style password attacks with decrypto
sleep(0.2) # 200ms pause
else: # passphrases did not match
passphrase = ""
passphrase_confirm = ""
stderr("The passphrases did not match. Please enter your command again.")
sys.exit(1)
elif c.argc == 1:
# simple single file or directory processing with default settings
path = c.arg0
if file_exists(path): # SINGLE FILE
check_existing_file = False # check for a file with the name of new decrypted filename in the directory
if path.endswith('.crypt'):
decrypted_filename = path[0:-6] # remove the .crypt suffix
check_existing_file = True
elif path.endswith('.gpg') or path.endswith('.pgp') or path.endswith('.asc'):
decrypted_filename = path[0:-4]
check_existing_file = True
else:
decrypted_filename = path + ".decrypt" # if there is not a standard file type, then add a .decrypt suffix to the decrypted file name
stdout("Could not confirm that the requested file is encrypted based upon the file type. Attempting decryption. Keep your fingers crossed...")
# confirm that the decrypted path does not already exist, if so abort with warning message to user
if check_existing_file is True:
if file_exists(decrypted_filename):
stderr("Your file will be decrypted to '" + decrypted_filename + "' and this file path already exists. Please move the file or use the --overwrite option with your command if you intend to replace the current file.")
sys.exit(1)
# get passphrase used to symmetrically decrypt the file
passphrase = getpass.getpass("Please enter your passphrase: ")
if len(passphrase) == 0: # confirm that user entered a passphrase
stderr("You did not enter a passphrase. Please repeat your command and try again.")
sys.exit(1)
passphrase_confirm = getpass.getpass("Please enter your passphrase again: ")
# confirm that the passphrases match
if passphrase == passphrase_confirm:
system_command = "gpg --batch -o " + quote(decrypted_filename) + " --passphrase " + quote(passphrase) + " -d " + quote(path)
response = muterun(system_command)
if response.exitcode == 0:
# unpack tar archive generated from the decryption, if present
if decrypted_filename.endswith('.tar') and tarfile.is_tarfile(decrypted_filename):
untar_path_tuple = os.path.split(decrypted_filename)
untar_path = untar_path_tuple[0]
with tarfile.TarFile(decrypted_filename, 'r', errorlevel=1) as tar:
for tarinfo in tar:
t_file = tarinfo.name
if len(untar_path) > 0:
t_file_path = os.path.join(untar_path, t_file)
else:
t_file_path = t_file
if not os.path.exists(t_file_path):
try:
if len(untar_path) > 0:
tar.extract(t_file, path=untar_path) # write to the appropriate dir
else:
tar.extract(t_file) # write to CWD
except IOError as e:
stderr("Failed to unpack the file '" + t_file_path + "' [" + str(e) + "]")
elif is_dir(t_file_path):
pass # do nothing if it exists and is a directory, no need to warn
else: # it is a file and it already exists, provide user error message
stderr("Failed to unpack the file '" + t_file_path + "'. File already exists. Use the --overwrite flag to replace existing files.")
# remove the decrypted tar archive
os.remove(decrypted_filename)
stdout("Decryption complete")
# overwrite user entered passphrases
passphrase = ""
passphrase_confirm = ""
sys.exit(0)
else:
stderr(response.stderr)
stderr("Decryption failed")
# overwrite user entered passphrases
passphrase = ""
passphrase_confirm = ""
# add a short pause to hinder brute force pexpect style password attacks with decrypto
sleep(0.2) # 200ms pause
sys.exit(1)
else:
stderr("The passphrases did not match. Please enter your command again.")
sys.exit(1)
elif dir_exists(path): # SINGLE DIRECTORY
dirty_directory_file_list = list_all_files(path)
directory_file_list = [x for x in dirty_directory_file_list if (x.endswith('.crypt') or x.endswith('.gpg') or x.endswith('.pgp') or x.endswith('.asc'))]
# if there are no encrypted files found, warn and abort
if len(directory_file_list) == 0:
stderr("There are no encrypted files in the directory")
sys.exit(1)
# prompt for the passphrase
passphrase = getpass.getpass("Please enter your passphrase: ")
if len(passphrase) == 0: # confirm that user entered a passphrase
stderr("You did not enter a passphrase. Please repeat your command and try again.")
sys.exit(1)
passphrase_confirm = getpass.getpass("Please enter your passphrase again: ")
if passphrase == passphrase_confirm:
# decrypt all of the encypted files in the directory
for filepath in directory_file_list:
absolute_filepath = make_path(path, filepath) # combine the directory path and file name into absolute path
# remove file suffix from the decrypted file path that writes to disk
if absolute_filepath.endswith('.crypt'):
decrypted_filepath = absolute_filepath[0:-6] # remove the .crypt suffix
elif absolute_filepath.endswith('.gpg') or absolute_filepath.endswith('.pgp') or absolute_filepath.endswith('.asc'):
decrypted_filepath = absolute_filepath[0:-4]
# confirm that the file does not already exist
if file_exists(decrypted_filepath):
stdout("The file path '" + decrypted_filepath + "' already exists. This file was not decrypted.")
else:
system_command = "gpg --batch -o " + quote(decrypted_filepath) + " --passphrase " + quote(passphrase) + " -d " + quote(absolute_filepath)
response = muterun(system_command)
if response.exitcode == 0:
stdout("'" + absolute_filepath + "' decrypted to '" + decrypted_filepath + "'")
else:
stderr(response.stderr)
stderr("Decryption failed for " + absolute_filepath)
# overwrite user entered passphrases
passphrase = ""
passphrase_confirm = ""
# add a short pause to hinder brute force pexpect style password attacks with decrypto
sleep(0.2) # 200ms pause
else:
# overwrite user entered passphrases
passphrase = ""
passphrase_confirm = ""
stderr("The passphrases did not match. Please enter your command again.")
sys.exit(1)
else:
# error message, not a file or directory. user entry error
stderr("The path that you entered does not appear to be an existing file or directory. Please try again.")
sys.exit(1)
# ------------------------------------------------------------------------------------------
# [ DEFAULT MESSAGE FOR MATCH FAILURE ]
# Message to provide to the user when all above conditional logic fails to meet a true condition
# ------------------------------------------------------------------------------------------
else:
print("Could not complete your request. Please try again.")
sys.exit(1)
0
Example 94
Project: SickRage Source File: SickBeard.py
def start(self): # pylint: disable=too-many-branches,too-many-statements
"""
Start SickRage
"""
# do some preliminary stuff
sickbeard.MY_FULLNAME = ek(os.path.normpath, ek(os.path.abspath, __file__))
sickbeard.MY_NAME = ek(os.path.basename, sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = ek(os.path.dirname, sickbeard.MY_FULLNAME)
sickbeard.LOCALE_DIR = ek(os.path.join, sickbeard.PROG_DIR, 'locale')
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.MY_ARGS = sys.argv[1:]
try:
locale.setlocale(locale.LC_ALL, '')
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
sickbeard.SYS_ENCODING = 'UTF-8'
# pylint: disable=no-member
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING.lower() in ('ansi_x3.4-1968', 'us-ascii', 'ascii', 'charmap') or \
(sys.platform.startswith('win') and sys.getwindowsversion()[0] >= 6 and str(getattr(sys.stdout, 'device', sys.stdout).encoding).lower() in ('cp65001', 'charmap')):
sickbeard.SYS_ENCODING = 'UTF-8'
# TODO: Continue working on making this unnecessary, this hack creates all sorts of hellish problems
if not hasattr(sys, 'setdefaultencoding'):
reload(sys)
try:
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(sickbeard.SYS_ENCODING) # pylint: disable=no-member
except (AttributeError, LookupError):
sys.exit('Sorry, you MUST add the SickRage folder to the PYTHONPATH environment variable\n'
'or find another way to force Python to use %s for string encoding.' % sickbeard.SYS_ENCODING)
# Need console logging for SickBeard.py and SickBeard-console.exe
self.console_logging = (not hasattr(sys, 'frozen')) or (sickbeard.MY_NAME.lower().find('-console') > 0)
# Rename the main thread
threading.currentThread().name = 'MAIN'
try:
opts, args_ = getopt.getopt(
sys.argv[1:], 'hqdp::',
['help', 'quiet', 'nolaunch', 'daemon', 'pidfile=', 'port=', 'datadir=', 'config=', 'noresize']
)
except getopt.GetoptError:
sys.exit(self.help_message())
for option, value in opts:
# Prints help message
if option in ('-h', '--help'):
sys.exit(self.help_message())
# For now we'll just silence the logging
if option in ('-q', '--quiet'):
self.console_logging = False
# Suppress launching web browser
# Needed for OSes without default browser assigned
# Prevent duplicate browser window when restarting in the app
if option in ('--nolaunch',):
self.no_launch = True
# Override default/configured port
if option in ('-p', '--port'):
try:
self.forced_port = int(value)
except ValueError:
sys.exit('Port: {0} is not a number. Exiting.'.format(value))
# Run as a double forked daemon
if option in ('-d', '--daemon'):
self.run_as_daemon = True
# When running as daemon disable console_logging and don't start browser
self.console_logging = False
self.no_launch = True
if sys.platform == 'win32' or sys.platform == 'darwin':
self.run_as_daemon = False
# Write a pid file if requested
if option in ('--pidfile',):
self.create_pid = True
self.pid_file = str(value)
# If the pid file already exists, SickRage may still be running, so exit
if ek(os.path.exists, self.pid_file):
sys.exit('PID file: {0} already exists. Exiting.'.format(self.pid_file))
# Specify folder to load the config file from
if option in ('--config',):
sickbeard.CONFIG_FILE = ek(os.path.abspath, value)
# Specify folder to use as the data directory
if option in ('--datadir',):
sickbeard.DATA_DIR = ek(os.path.abspath, value)
# Prevent resizing of the banner/posters even if PIL is installed
if option in ('--noresize',):
sickbeard.NO_RESIZE = True
# The pid file is only useful in daemon mode, make sure we can write the file properly
if self.create_pid:
if self.run_as_daemon:
pid_dir = ek(os.path.dirname, self.pid_file)
if not ek(os.access, pid_dir, os.F_OK):
sys.exit('PID dir: {0} doesn\'t exist. Exiting.'.format(pid_dir))
if not ek(os.access, pid_dir, os.W_OK):
sys.exit('PID dir: {0} must be writable (write permissions). Exiting.'.format(pid_dir))
else:
if self.console_logging:
sys.stdout.write('Not running in daemon mode. PID file creation disabled.\n')
self.create_pid = False
# If they don't specify a config file then put it in the data dir
if not sickbeard.CONFIG_FILE:
sickbeard.CONFIG_FILE = ek(os.path.join, sickbeard.DATA_DIR, 'config.ini')
# Make sure that we can create the data dir
if not ek(os.access, sickbeard.DATA_DIR, os.F_OK):
try:
ek(os.makedirs, sickbeard.DATA_DIR, 0o744)
except os.error:
raise SystemExit('Unable to create data directory: {0}'.format(sickbeard.DATA_DIR))
# Make sure we can write to the data dir
if not ek(os.access, sickbeard.DATA_DIR, os.W_OK):
raise SystemExit('Data directory must be writeable: {0}'.format(sickbeard.DATA_DIR))
# Make sure we can write to the config file
if not ek(os.access, sickbeard.CONFIG_FILE, os.W_OK):
if ek(os.path.isfile, sickbeard.CONFIG_FILE):
raise SystemExit('Config file must be writeable: {0}'.format(sickbeard.CONFIG_FILE))
elif not ek(os.access, ek(os.path.dirname, sickbeard.CONFIG_FILE), os.W_OK):
raise SystemExit('Config file root dir must be writeable: {0}'.format(ek(os.path.dirname, sickbeard.CONFIG_FILE)))
ek(os.chdir, sickbeard.DATA_DIR)
# Check if we need to perform a restore first
restore_dir = ek(os.path.join, sickbeard.DATA_DIR, 'restore')
if ek(os.path.exists, restore_dir):
success = self.restore_db(restore_dir, sickbeard.DATA_DIR)
if self.console_logging:
sys.stdout.write('Restore: restoring DB and config.ini {0}!\n'.format(('FAILED', 'SUCCESSFUL')[success]))
# Load the config and publish it to the sickbeard package
if self.console_logging and not ek(os.path.isfile, sickbeard.CONFIG_FILE):
sys.stdout.write('Unable to find {0}, all settings will be default!\n'.format(sickbeard.CONFIG_FILE))
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE, encoding='UTF-8')
# Initialize the config and our threads
sickbeard.initialize(consoleLogging=self.console_logging)
if self.run_as_daemon:
self.daemonize()
# Get PID
sickbeard.PID = os.getpid()
# Build from the DB to start with
self.load_shows_from_db()
logger.log('Starting SickRage [{branch}] using \'{config}\''.format
(branch=sickbeard.BRANCH, config=sickbeard.CONFIG_FILE))
self.clear_cache()
if self.forced_port:
logger.log('Forcing web server to port {port}'.format(port=self.forced_port))
self.start_port = self.forced_port
else:
self.start_port = sickbeard.WEB_PORT
if sickbeard.WEB_LOG:
self.log_dir = sickbeard.LOG_DIR
else:
self.log_dir = None
# sickbeard.WEB_HOST is available as a configuration value in various
# places but is not configurable. It is supported here for historic reasons.
if sickbeard.WEB_HOST and sickbeard.WEB_HOST != '0.0.0.0':
self.web_host = sickbeard.WEB_HOST
else:
self.web_host = '' if sickbeard.WEB_IPV6 else '0.0.0.0'
# web server options
self.web_options = {
'port': int(self.start_port),
'host': self.web_host,
'data_root': ek(os.path.join, sickbeard.PROG_DIR, 'gui', sickbeard.GUI_NAME),
'web_root': sickbeard.WEB_ROOT,
'log_dir': self.log_dir,
'username': sickbeard.WEB_USERNAME,
'password': sickbeard.WEB_PASSWORD,
'enable_https': sickbeard.ENABLE_HTTPS,
'handle_reverse_proxy': sickbeard.HANDLE_REVERSE_PROXY,
'https_cert': ek(os.path.join, sickbeard.PROG_DIR, sickbeard.HTTPS_CERT),
'https_key': ek(os.path.join, sickbeard.PROG_DIR, sickbeard.HTTPS_KEY),
}
# start web server
self.web_server = SRWebServer(self.web_options)
self.web_server.start()
# Fire up all our threads
sickbeard.start()
# Build internal name cache
name_cache.buildNameCache()
# Pre-populate network timezones, it isn't thread safe
network_timezones.update_network_dict()
# sure, why not?
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.trimHistory()
# Check for metadata indexer updates for shows (sets the next aired ep!)
# sickbeard.showUpdateScheduler.forceRun()
# Launch browser
if sickbeard.LAUNCH_BROWSER and not (self.no_launch or self.run_as_daemon):
sickbeard.launchBrowser('https' if sickbeard.ENABLE_HTTPS else 'http', self.start_port, sickbeard.WEB_ROOT)
# main loop
while True:
time.sleep(1)
0
Example 95
Project: radical.pilot Source File: radical-pilot-agent-multicore.py
def bootstrap_3():
"""
This method continues where the bootstrapper left off, but will quickly pass
control to the Agent class which will spawn the functional components.
Most of bootstrap_3 applies only to agent_0, in particular all mongodb
interactions remains excluded for other sub-agent instances.
The agent interprets a config file, which will specify in an agent_layout
section:
- what nodes should be used for sub-agent startup
- what bridges should be started
- what components should be started
- what are the endpoints for bridges which are not started
bootstrap_3 will create derived config files for all sub-agents.
The agent master (agent_0) will collect information about the nodes required
for all instances. That is added to the config itself, for the benefit of
the LRMS initialisation which is expected to block those nodes from the
scheduler.
"""
global lrms, agent, bridges
# find out what agent instance name we have
if len(sys.argv) != 2:
raise RuntimeError('invalid number of parameters (%s)' % sys.argv)
agent_name = sys.argv[1]
# load the agent config, and overload the config dicts
agent_cfg = "%s/%s.cfg" % (os.getcwd(), agent_name)
print "startup agent %s : %s" % (agent_name, agent_cfg)
cfg = ru.read_json_str(agent_cfg)
cfg['agent_name'] = agent_name
pilot_id = cfg['pilot_id']
# set up a logger and profiler
prof = ru.Profiler ('%s.bootstrap_3' % agent_name)
prof.prof('sync ref', msg='agent start', uid=pilot_id)
log = ru.get_logger('%s.bootstrap_3' % agent_name,
'%s.bootstrap_3.log' % agent_name, 'DEBUG') # FIXME?
log.info('start')
prof.prof('sync ref', msg='agent start')
try:
import setproctitle as spt
spt.setproctitle('radical.pilot %s' % agent_name)
except Exception as e:
log.debug('no setproctitle: %s', e)
log.setLevel(cfg.get('debug', 'INFO'))
print "Agent config (%s):\n%s\n\n" % (agent_cfg, pprint.pformat(cfg))
# quickly set up a mongodb handle so that we can report errors.
# FIXME: signal handlers need mongo_p, but we won't have that until later
if agent_name == 'agent_0':
# Check for the RADICAL_PILOT_DB_HOSTPORT env var, which will hold the
# address of the tunnelized DB endpoint.
# If it exists, we overrule the agent config with it.
hostport = os.environ.get('RADICAL_PILOT_DB_HOSTPORT')
if hostport:
dburl = ru.Url(cfg['mongodb_url'])
dburl.host, dburl.port = hostport.split(':')
cfg['mongodb_url'] = str(dburl)
_, mongo_db, _, _, _ = ru.mongodb_connect(cfg['mongodb_url'])
mongo_p = mongo_db["%s.p" % cfg['session_id']]
if not mongo_p:
raise RuntimeError('could not get a mongodb handle')
# set up signal and exit handlers
def exit_handler():
global lrms, agent, bridges
print 'atexit'
if lrms:
lrms.stop()
lrms = None
if bridges:
for b in bridges:
b.stop()
bridges = dict()
if agent:
agent.stop()
agent = None
sys.exit(1)
def sigint_handler(signum, frame):
if agent_name == 'agent_0':
pilot_FAILED(msg='Caught SIGINT. EXITING (%s)' % frame)
print 'sigint'
prof.prof('stop', msg='sigint_handler', uid=pilot_id)
prof.close()
sys.exit(2)
def sigterm_handler(signum, frame):
if agent_name == 'agent_0':
pilot_FAILED(msg='Caught SIGTERM. EXITING (%s)' % frame)
print 'sigterm'
prof.prof('stop', msg='sigterm_handler %s' % os.getpid(), uid=pilot_id)
prof.close()
sys.exit(3)
def sigalarm_handler(signum, frame):
if agent_name == 'agent_0':
pilot_FAILED(msg='Caught SIGALRM (Walltime limit?). EXITING (%s)' % frame)
print 'sigalrm'
prof.prof('stop', msg='sigalarm_handler', uid=pilot_id)
prof.close()
sys.exit(4)
import atexit
atexit.register(exit_handler)
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGALRM, sigalarm_handler)
# if anything went wrong up to this point, we would have been unable to
# report errors into mongodb. From here on, any fatal error should result
# in one of the above handlers or exit handlers being activated, thus
# reporting the error dutifully.
try:
# ----------------------------------------------------------------------
# des Pudels Kern: merge LRMS info into cfg and get the agent started
if agent_name == 'agent_0':
# only the master agent creates LRMS and sub-agent config files.
# The LRMS which will give us the set of agent_nodes to use for
# sub-agent startup. Add the remaining LRMS information to the
# config, for the benefit of the scheduler).
lrms = rp.agent.RM.create(name = cfg['lrms'],
cfg = cfg,
logger = log)
cfg['lrms_info'] = lrms.lrms_info
# the master agent also is the only one which starts bridges. It
# has to do so before creating the Agent Worker instance, as that is
# using the bridges already.
bridges = start_bridges(cfg, log)
# FIXME: make sure all communication channels are in place. This could
# be replaced with a proper barrier, but not sure if that is worth it...
time.sleep (1)
# after we started bridges, we'll add their in and out addresses
# to the config, so that the communication channels can connect to
# them. At this point we also write configs for all sub-agents this
# instance intents to spawn.
#
# FIXME: we should point the address to the node of the subagent
# which hosts the bridge, not the local IP. Until this
# is fixed, bridges MUST run on agent_0 (which is what
# RM.hostip() below will point to).
nodeip = rp.agent.RM.hostip(cfg.get('network_interface'), logger=log)
write_sub_configs(cfg, bridges, nodeip, log)
# Store some runtime information into the session
mongo_p.update({"_id": pilot_id},
{"$set": {"lm_info" : lrms.lm_info.get('version_info'),
"lm_detail": lrms.lm_info.get('lm_detail')}})
# we now have correct bridge addresses added to the agent_0.cfg, and all
# other agents will have picked that up from their config files -- we
# can start the agent and all its components!
agent = rp.worker.Agent(cfg)
agent.start()
log.debug('waiting for agent %s to join' % agent_name)
agent.join()
log.debug('agent %s joined' % agent_name)
# ----------------------------------------------------------------------
except SystemExit:
log.exception("Exit running agent: %s" % agent_name)
if agent and not agent.final_cause:
agent.final_cause = "sys.exit"
except Exception as e:
log.exception("Error running agent: %s" % agent_name)
if agent and not agent.final_cause:
agent.final_cause = "error"
finally:
# in all cases, make sure we perform an orderly shutdown. I hope python
# does not mind doing all those things in a finally clause of
# (essentially) main...
if agent:
agent.stop()
log.debug('agent %s finalized' % agent_name)
# agent.stop will not tear down bridges -- we do that here at last
for name,b in bridges.items():
try:
log.info("closing bridge %s", b)
b['handle'].stop()
except Exception as e:
log.exception('ignore failing bridge terminate (%s)', e)
bridges = dict()
# make sure the lrms release whatever it acquired
if lrms:
lrms.stop()
lrms = None
# agent_0 will also report final pilot state to the DB
if agent_name == 'agent_0':
if agent and agent.final_cause == 'timeout':
pilot_DONE(mongo_p, pilot_id, log, "TIMEOUT received. Terminating.")
elif agent and agent.final_cause == 'cancel':
pilot_CANCELED(mongo_p, pilot_id, log, "CANCEL received. Terminating.")
elif agent and agent.final_cause == 'sys.exit':
pilot_CANCELED(mongo_p, pilot_id, log, "EXIT received. Terminating.")
elif agent and agent.final_cause == 'finalize':
log.info('shutdown due to component finalization -- assuming error')
pilot_FAILED(mongo_p, pilot_id, log, "FINALIZE received")
elif agent:
pilot_FAILED(mongo_p, pilot_id, log, "TERMINATE received")
else:
pilot_FAILED(mongo_p, pilot_id, log, "FAILED startup")
log.info('stop')
prof.prof('stop', msg='finally clause agent', uid=pilot_id)
prof.close()
0
Example 96
Project: tp-libvirt Source File: virsh_managedsave.py
def run(test, params, env):
"""
Test command: virsh managedsave.
This command can save and destroy a
running domain, so it can be restarted
from the same state at a later time.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
# define function
def vm_recover_check(option, libvirtd, check_shutdown=False):
"""
Check if the vm can be recovered correctly.
:param guest_name : Checked vm's name.
:param option : managedsave command option.
"""
# This time vm not be shut down
if vm.is_alive():
raise error.TestFail("Guest should be inactive")
# Check vm managed save state.
ret = virsh.dom_list("--managed-save --inactive")
vm_state1 = re.findall(r".*%s.*" % vm_name,
ret.stdout.strip())[0].split()[2]
ret = virsh.dom_list("--managed-save --all")
vm_state2 = re.findall(r".*%s.*" % vm_name,
ret.stdout.strip())[0].split()[2]
if vm_state1 != "saved" or vm_state2 != "saved":
raise error.TestFail("Guest state should be saved")
virsh.start(vm_name)
# This time vm should be in the list
if vm.is_dead():
raise error.TestFail("Guest should be active")
# Restart libvirtd and check vm status again.
libvirtd.restart()
if vm.is_dead():
raise error.TestFail("Guest should be active after"
" restarting libvirtd")
# Check managed save file:
if os.path.exists(managed_save_file):
raise error.TestFail("Managed save image exist "
"after starting the domain")
if option:
if option.count("running"):
if vm.is_dead() or vm.is_paused():
raise error.TestFail("Guest state should be"
" running after started"
" because of '--running' option")
elif option.count("paused"):
if not vm.is_paused():
raise error.TestFail("Guest state should be"
" paused after started"
" because of '--paused' option")
else:
if params.get("paused_after_start_vm") == "yes":
if not vm.is_paused():
raise error.TestFail("Guest state should be"
" paused after started"
" because of initia guest state")
if check_shutdown:
# Resume the domain.
if vm.is_paused():
vm.resume()
vm.wait_for_login()
# Shutdown and start the domain,
# it should be in runing state and can be login.
vm.shutdown()
vm.wait_for_shutdown()
vm.start()
vm.wait_for_login()
def vm_undefine_check(vm_name):
"""
Check if vm can be undefined with manage-save option
"""
#backup xml file
xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
if not os.path.exists(managed_save_file):
raise error.TestFail("Can't find managed save image")
#undefine domain with no options.
if not virsh.undefine(vm_name, options=None,
ignore_status=True).exit_status:
raise error.TestFail("Guest shouldn't be undefined"
"while domain managed save image exists")
#undefine domain with managed-save option.
if virsh.undefine(vm_name, options="--managed-save",
ignore_status=True).exit_status:
raise error.TestFail("Guest can't be undefine with "
"managed-save option")
if os.path.exists(managed_save_file):
raise error.TestFail("Managed save image exists"
" after undefining vm")
#restore and start the vm.
xml_backup.define()
vm.start()
def check_flags_parallel(virsh_cmd, bash_cmd, flags):
"""
Run the commands parallel and check the output.
"""
cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
ret = utils.run(cmd, ignore_status=True)
output = ret.stdout.strip()
logging.debug("check flags output: %s" % output)
lines = re.findall(r"flags:.+%s" % flags, output, re.M)
logging.debug("Find lines: %s" % lines)
if not lines:
raise error.TestFail("Checking flags %s failed" % flags)
return ret
def check_multi_guests(guests, start_delay, libvirt_guests):
"""
Check start_delay option for multiple guests.
"""
# Destroy vm first
if vm.is_alive():
vm.destroy(gracefully=False)
# Clone given number of guests
timeout = params.get("clone_timeout", 360)
for i in range(int(guests)):
dst_vm = "%s_%s" % (vm_name, i)
utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
True, timeout=timeout)
virsh.start(dst_vm)
# Wait 10 seconds for vm to start
time.sleep(10)
is_systemd = utils.run("cat /proc/1/comm").stdout.count("systemd")
if is_systemd:
libvirt_guests.restart()
pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
else:
ret = utils.run("service libvirt-guests restart | \
awk '{ print strftime(\"%b %y %H:%M:%S\"), $0; fflush(); }'")
pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'
# libvirt-guests status command read messages from systemd
# journal, in cases of messages are not ready in time,
# add a time wait here.
def wait_func():
return libvirt_guests.raw_status().stdout.count("Resuming guest")
utils_misc.wait_for(wait_func, 5)
if is_systemd:
ret = libvirt_guests.raw_status()
logging.info("status output: %s", ret.stdout)
resume_time = re.findall(pattern, ret.stdout, re.M)
if not resume_time:
raise error.TestFail("Can't see messages of resuming guest")
# Convert time string to int
resume_seconds = [time.mktime(time.strptime(
tm, "%b %y %H:%M:%S")) for tm in resume_time]
logging.info("Resume time in seconds: %s", resume_seconds)
# Check if start_delay take effect
for i in range(len(resume_seconds)-1):
if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
raise error.TestFail("Checking start_delay failed")
def wait_for_state(vm_state):
"""
Wait for vm state is ready.
"""
utils_misc.wait_for(lambda: vm.state() == vm_state, 10)
def check_guest_flags(bash_cmd, flags):
"""
Check bypass_cache option for single guest.
"""
# Drop caches.
drop_caches()
# form proper parallel command based on if systemd is used or not
is_systemd = utils.run("cat /proc/1/comm").stdout.count("systemd")
if is_systemd:
virsh_cmd_stop = "systemctl stop libvirt-guests"
virsh_cmd_start = "systemctl start libvirt-guests"
else:
virsh_cmd_stop = "service libvirt-guests stop"
virsh_cmd_start = "service libvirt-guests start"
ret = check_flags_parallel(virsh_cmd_stop, bash_cmd %
(managed_save_file, managed_save_file,
"1", flags), flags)
if is_systemd:
ret = libvirt_guests.raw_status()
logging.info("status output: %s", ret.stdout)
if all(["Suspending %s" % vm_name not in ret.stdout,
"stopped, with saved guests" not in ret.stdout]):
raise error.TestFail("Can't see messages of suspending vm")
# status command should return 3.
if not is_systemd:
ret = libvirt_guests.raw_status()
if ret.exit_status != 3:
raise error.TestFail("The exit code %s for libvirt-guests"
" status is not correct" % ret)
# Wait for VM in shut off state
wait_for_state("shut off")
check_flags_parallel(virsh_cmd_start, bash_cmd %
(managed_save_file, managed_save_file,
"0", flags), flags)
# Wait for VM in running state
wait_for_state("running")
def vm_msave_remove_check(vm_name):
"""
Check managed save remove command.
"""
if not os.path.exists(managed_save_file):
raise error.TestFail("Can't find managed save image")
virsh.managedsave_remove(vm_name)
if os.path.exists(managed_save_file):
raise error.TestFail("Managed save image still exists")
virsh.start(vm_name)
# The domain state should be running
if vm.state() != "running":
raise error.TestFail("Guest state should be"
" running after started")
def vm_managedsave_loop(vm_name, loop_range, libvirtd):
"""
Run a loop of managedsave command and check its result.
"""
if vm.is_dead():
virsh.start(vm_name)
for i in range(int(loop_range)):
logging.debug("Test loop: %s" % i)
virsh.managedsave(vm_name)
virsh.start(vm_name)
# Check libvirtd status.
if not libvirtd.is_running():
raise error.TestFail("libvirtd is stopped after cmd")
# Check vm status.
if vm.state() != "running":
raise error.TestFail("Guest isn't in running state")
def build_vm_xml(vm_name, **dargs):
"""
Build the new domain xml and define it.
"""
try:
# stop vm before doing any change to xml
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
if dargs.get("cpu_mode"):
if "cpu" in vmxml:
del vmxml.cpu
cpuxml = vm_xml.VMCPUXML()
cpuxml.mode = params.get("cpu_mode", "host-model")
cpuxml.match = params.get("cpu_match", "exact")
cpuxml.fallback = params.get("cpu_fallback", "forbid")
cpu_topology = {}
cpu_topology_sockets = params.get("cpu_topology_sockets")
if cpu_topology_sockets:
cpu_topology["sockets"] = cpu_topology_sockets
cpu_topology_cores = params.get("cpu_topology_cores")
if cpu_topology_cores:
cpu_topology["cores"] = cpu_topology_cores
cpu_topology_threads = params.get("cpu_topology_threads")
if cpu_topology_threads:
cpu_topology["threads"] = cpu_topology_threads
if cpu_topology:
cpuxml.topology = cpu_topology
vmxml.cpu = cpuxml
vmxml.vcpu = int(params.get("vcpu_nums"))
if dargs.get("sec_driver"):
seclabel_dict = {"type": "dynamic", "model": "selinux",
"relabel": "yes"}
vmxml.set_seclabel([seclabel_dict])
vmxml.sync()
vm.start()
except Exception, e:
logging.error(str(e))
raise error.TestNAError("Build domain xml failed")
status_error = ("yes" == params.get("status_error", "no"))
vm_ref = params.get("managedsave_vm_ref", "name")
libvirtd_state = params.get("libvirtd", "on")
extra_param = params.get("managedsave_extra_param", "")
progress = ("yes" == params.get("managedsave_progress", "no"))
cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
test_undefine = "yes" == params.get("managedsave_undefine", "no")
test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
autostart_bypass_cache = params.get("autostart_bypass_cache", "")
multi_guests = params.get("multi_guests", "")
test_libvirt_guests = params.get("test_libvirt_guests", "")
check_flags = "yes" == params.get("check_flags", "no")
security_driver = params.get("security_driver", "")
remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
option = params.get("managedsave_option", "")
check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
pre_vm_state = params.get("pre_vm_state", "")
move_saved_file = "yes" == params.get("move_saved_file", "no")
test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
if option:
if not virsh.has_command_help_match('managedsave', option):
# Older libvirt does not have this option
raise error.TestNAError("Older libvirt does not"
" handle arguments consistently")
# Backup xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
# Get the libvirtd service
libvirtd = utils_libvirtd.Libvirtd()
# Get config files.
qemu_config = utils_config.LibvirtQemuConfig()
libvirt_guests_config = utils_config.LibvirtGuestsConfig()
# Get libvirt-guests service
libvirt_guests = Factory.create_service("libvirt-guests")
try:
# Destroy vm first for setting configuration file
if vm.state() == "running":
vm.destroy(gracefully=False)
# Prepare test environment.
if libvirtd_state == "off":
libvirtd.stop()
if autostart_bypass_cache:
ret = virsh.autostart(vm_name, "", ignore_status=True)
libvirt.check_exit_status(ret)
qemu_config.auto_start_bypass_cache = autostart_bypass_cache
libvirtd.restart()
if security_driver:
qemu_config.security_driver = [security_driver]
if test_libvirt_guests:
if multi_guests:
start_delay = params.get("start_delay", "20")
libvirt_guests_config.START_DELAY = start_delay
if check_flags:
libvirt_guests_config.BYPASS_CACHE = "1"
# The config file format should be "x=y" instead of "x = y"
utils.run("sed -i -e 's/ = /=/g' "
"/etc/sysconfig/libvirt-guests")
libvirt_guests.restart()
# Change domain xml.
if cpu_mode:
build_vm_xml(vm_name, cpu_mode=True)
if security_driver:
build_vm_xml(vm_name, sec_driver=True)
# Turn VM into certain state.
if pre_vm_state == "transient":
logging.info("Creating %s..." % vm_name)
vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
if vm.is_alive():
vm.destroy(gracefully=False)
# Wait for VM to be in shut off state
utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
vm.undefine()
if virsh.create(vmxml_for_test.xml, ignore_status=True).exit_status:
vmxml_backup.define()
raise error.TestNAError("Cann't create the domain")
# Wait for vm in stable state
if params.get("start_vm") == "yes":
if vm.state() == "shut off":
vm.start()
vm.wait_for_login()
# run test case
domid = vm.get_id()
domuuid = vm.get_uuid()
if vm_ref == "id":
vm_ref = domid
elif vm_ref == "uuid":
vm_ref = domuuid
elif vm_ref == "hex_id":
vm_ref = hex(int(domid))
elif vm_ref.count("invalid"):
vm_ref = params.get(vm_ref)
elif vm_ref == "name":
vm_ref = vm_name
# Ignore exception with "ignore_status=True"
if progress:
option += " --verbose"
option += extra_param
# For bypass_cache test. Run a shell command to check fd flags while
# excuting managedsave command
bash_cmd = ("let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
"/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/*%s* |"
"grep 'flags:.*%s') && break; else sleep 0.05; fi; done;")
# Flags to check bypass cache take effect
flags = "014"
if test_bypass_cache:
# Drop caches.
drop_caches()
virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"1", flags), flags)
# Wait for VM in shut off state
wait_for_state("shut off")
virsh_cmd = "virsh start %s %s" % (option, vm_name)
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0", flags), flags)
# Wait for VM in running state
wait_for_state("running")
elif test_libvirt_guests:
logging.debug("libvirt-guests status: %s", libvirt_guests.status())
if multi_guests:
check_multi_guests(multi_guests,
start_delay, libvirt_guests)
if check_flags:
check_guest_flags(bash_cmd, flags)
else:
# Ensure VM is running
utils_misc.wait_for(lambda: vm.state() == "running", 10)
ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
status = ret.exit_status
# The progress information outputed in error message
error_msg = ret.stderr.strip()
if move_saved_file:
cmd = "echo > %s" % managed_save_file
utils.run(cmd)
# recover libvirtd service start
if libvirtd_state == "off":
libvirtd.start()
if status_error:
if not status:
raise error.TestFail("Run successfully with wrong command!")
else:
if status:
raise error.TestFail("Run failed with right command")
if progress:
if not error_msg.count("Managedsave:"):
raise error.TestFail("Got invalid progress output")
if remove_after_cmd:
vm_msave_remove_check(vm_name)
elif test_undefine:
vm_undefine_check(vm_name)
elif autostart_bypass_cache:
libvirtd.stop()
virsh_cmd = ("(service libvirtd start)")
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0", flags), flags)
elif test_loop_cmd:
loop_range = params.get("loop_range", "20")
vm_managedsave_loop(vm_name, loop_range, libvirtd)
else:
vm_recover_check(option, libvirtd, check_shutdown)
finally:
# Restore test environment.
# Ensure libvirtd is started
if not libvirtd.is_running():
libvirtd.start()
if vm.is_paused():
virsh.resume(vm_name)
elif vm.is_dead():
vm.start()
# Wait for VM in running state
wait_for_state("running")
if autostart_bypass_cache:
virsh.autostart(vm_name, "--disable",
ignore_status=True)
if vm.is_alive():
vm.destroy(gracefully=False)
# Wait for VM to be in shut off state
utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
virsh.managedsave_remove(vm_name)
vmxml_backup.sync()
if multi_guests:
for i in range(int(multi_guests)):
virsh.remove_domain("%s_%s" % (vm_name, i),
"--remove-all-storage")
qemu_config.restore()
libvirt_guests_config.restore()
libvirtd.restart()
0
Example 97
Project: tp-libvirt Source File: virsh_update_device_matrix.py
def run(test, params, env):
"""
Test command: virsh update-device.
Update device from an XML <file>.
1.Prepare test environment, adding a cdrom/floppy to VM.
2.Perform virsh update-device operation.
3.Recover test environment.
4.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
pre_vm_state = params.get("at_dt_device_pre_vm_state")
virsh_dargs = {"debug": True, "ignore_status": True}
def is_attached(vmxml_devices, disk_type, source_file, target_dev):
"""
Check attached device and disk exist or not.
:param vmxml_devices: VMXMLDevices instance
:param disk_type: disk's device type: cdrom or floppy
:param source_file : disk's source file to check
:param target_dev : target device name
:return: True/False if backing file and device found
"""
disks = vmxml_devices.by_device_tag('disk')
for disk in disks:
logging.debug("Check disk XML:\n%s", open(disk['xml']).read())
if disk.device != disk_type:
continue
if disk.target['dev'] != target_dev:
continue
if disk.xmltreefile.find('source') is not None:
if disk.source.attrs['file'] != source_file:
continue
else:
continue
# All three conditions met
logging.debug("Find %s in given disk XML", source_file)
return True
logging.debug("Not find %s in gievn disk XML", source_file)
return False
def check_result(disk_source, disk_type, disk_target,
flags, attach=True):
"""
Check the test result of update-device command.
"""
vm_state = pre_vm_state
active_vmxml = VMXML.new_from_dumpxml(vm_name)
active_attached = is_attached(active_vmxml.devices, disk_type,
disk_source, disk_target)
if vm_state != "transient":
inactive_vmxml = VMXML.new_from_dumpxml(vm_name,
options="--inactive")
inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
disk_source, disk_target)
if flags.count("config") and not flags.count("live"):
if vm_state != "transient":
if attach:
if not inactive_attached:
raise exceptions.TestFail("Inactive domain XML not updated"
" when --config options used for"
" attachment")
if vm_state != "shutoff":
if active_attached:
raise exceptions.TestFail("Active domain XML updated "
"when --config options used"
" for attachment")
else:
if inactive_attached:
raise exceptions.TestFail("Inactive domain XML not updated"
" when --config options used for"
" detachment")
if vm_state != "shutoff":
if not active_attached:
raise exceptions.TestFail("Active domain XML updated "
"when --config options used"
" for detachment")
elif flags.count("live") and not flags.count("config"):
if attach:
if vm_state in ["paused", "running", "transient"]:
if not active_attached:
raise exceptions.TestFail("Active domain XML not updated"
" when --live options used for"
" attachment")
if vm_state in ["paused", "running"]:
if inactive_attached:
raise exceptions.TestFail("Inactive domain XML updated "
"when --live options used for"
" attachment")
else:
if vm_state in ["paused", "running", "transient"]:
if active_attached:
raise exceptions.TestFail("Active domain XML not updated"
" when --live options used for"
" detachment")
if vm_state in ["paused", "running"]:
if not inactive_attached:
raise exceptions.TestFail("Inactive domain XML updated "
"when --live options used for"
" detachment")
elif flags.count("live") and flags.count("config"):
if attach:
if vm_state in ["paused", "running"]:
if not active_attached:
raise exceptions.TestFail("Active domain XML not updated"
" when --live --config options"
" used for attachment")
if not inactive_attached:
raise exceptions.TestFail("Inactive domain XML not updated"
" when --live --config options "
"used for attachment")
else:
if vm_state in ["paused", "running"]:
if active_attached:
raise exceptions.TestFail("Active domain XML not updated"
" when --live --config options"
" used for detachment")
if inactive_attached:
raise exceptions.TestFail("Inactive domain XML not updated"
" when --live --config options "
"used for detachment")
elif flags.count("current") or flags == "":
if attach:
if vm_state in ["paused", "running", "transient"]:
if not active_attached:
raise exceptions.TestFail("Active domain XML not updated "
"when --current options used "
"for attachment")
if vm_state in ["paused", "running"]:
if inactive_attached:
raise exceptions.TestFail("Inactive domain XML updated "
"when --current options used "
"for live attachment")
if vm_state == "shutoff" and not inactive_attached:
raise exceptions.TestFail("Inactive domain XML not updated "
"when --current options used for "
"attachment")
else:
if vm_state in ["paused", "running", "transient"]:
if active_attached:
raise exceptions.TestFail("Active domain XML not updated"
" when --current options used "
"for detachment")
if vm_state in ["paused", "running"]:
if not inactive_attached:
raise exceptions.TestFail("Inactive domain XML updated "
"when --current options used "
"for live detachment")
if vm_state == "shutoff" and inactive_attached:
raise exceptions.TestFail("Inactive domain XML not updated"
" when --current options used "
"for detachment")
def check_rhel_version(release_ver, session=None):
"""
Login to guest and check its release version
"""
rhel_release = {"rhel6": "Red Hat Enterprise Linux Server release 6",
"rhel7": "Red Hat Enterprise Linux Server release 7",
"fedora": "Fedora release"}
version_file = "/etc/redhat-release"
if not rhel_release.has_key(release_ver):
logging.error("Can't support this version of guest: %s",
release_ver)
return False
cmd = "grep '%s' %s" % (rhel_release[release_ver], version_file)
if session:
s = session.cmd_status(cmd)
else:
s = process.run(cmd, ignore_status=True, shell=True).exit_status
logging.debug("Check version cmd return:%s", s)
if s == 0:
return True
else:
return False
vmxml_backup = VMXML.new_from_dumpxml(vm_name, options="--inactive")
# Before doing anything - let's be sure we can support this test
# Parse flag list, skip testing early if flag is not supported
# NOTE: "".split("--") returns [''] which messes up later empty test
at_flag = params.get("at_dt_device_at_options", "")
dt_flag = params.get("at_dt_device_dt_options", "")
flag_list = []
if at_flag.count("--"):
flag_list.extend(at_flag.split("--"))
if dt_flag.count("--"):
flag_list.extend(dt_flag.split("--"))
for item in flag_list:
option = item.strip()
if option == "":
continue
if not bool(virsh.has_command_help_match("update-device", option)):
raise exceptions.TestSkipError("virsh update-device doesn't support "
"--%s" % option)
# As per RH BZ 961443 avoid testing before behavior changes
if 'config' in flag_list:
# SKIP tests using --config if libvirt is 0.9.10 or earlier
if not libvirt_version.version_compare(0, 9, 10):
raise exceptions.TestSkipError("BZ 961443: --config behavior change "
"in version 0.9.10")
if 'persistent' in flag_list or 'live' in flag_list:
# SKIP tests using --persistent if libvirt 1.0.5 or earlier
if not libvirt_version.version_compare(1, 0, 5):
raise exceptions.TestSkipError("BZ 961443: --persistent behavior "
"change in version 1.0.5")
# Get the target bus/dev
disk_type = params.get("disk_type", "cdrom")
target_bus = params.get("updatedevice_target_bus", "ide")
target_dev = params.get("updatedevice_target_dev", "hdc")
disk_mode = params.get("disk_mode", "")
support_mode = ['readonly', 'shareable']
if not disk_mode and disk_mode not in support_mode:
raise exceptions.TestError("%s not in support mode %s"
% (disk_mode, support_mode))
# Prepare tmp directory and files.
orig_iso = os.path.join(data_dir.get_tmp_dir(), "orig.iso")
test_iso = os.path.join(data_dir.get_tmp_dir(), "test.iso")
# Check the version first.
host_rhel6 = check_rhel_version('rhel6')
guest_rhel6 = False
if not vm.is_alive():
vm.start()
session = vm.wait_for_login()
if check_rhel_version('rhel6', session):
guest_rhel6 = True
session.close()
vm.destroy(gracefully=False)
try:
# Prepare the disk first.
create_disk(vm_name, orig_iso, disk_type, target_dev, disk_mode)
vmxml_for_test = VMXML.new_from_dumpxml(vm_name,
options="--inactive")
# Turn VM into certain state.
if pre_vm_state == "running":
if at_flag == "--config" or dt_flag == "--config":
if host_rhel6:
raise exceptions.TestSkipError("Config option not supported"
" on this host")
logging.info("Starting %s..." % vm_name)
if vm.is_dead():
vm.start()
vm.wait_for_login().close()
elif pre_vm_state == "shutoff":
if not at_flag or not dt_flag:
if host_rhel6:
raise exceptions.TestSkipError("Default option not supported"
" on this host")
logging.info("Shuting down %s..." % vm_name)
if vm.is_alive():
vm.destroy(gracefully=False)
elif pre_vm_state == "paused":
if at_flag == "--config" or dt_flag == "--config":
if host_rhel6:
raise exceptions.TestSkipError("Config option not supported"
" on this host")
logging.info("Pausing %s..." % vm_name)
if vm.is_dead():
vm.start()
vm.wait_for_login().close()
if not vm.pause():
raise exceptions.TestSkipError("Cann't pause the domain")
elif pre_vm_state == "transient":
logging.info("Creating %s..." % vm_name)
vm.undefine()
if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
vmxml_backup.define()
raise exceptions.TestSkipError("Cann't create the domain")
vm.wait_for_login().close()
except Exception, e:
logging.error(str(e))
if os.path.exists(orig_iso):
os.remove(orig_iso)
vmxml_backup.sync()
raise exceptions.TestSkipError(str(e))
# Get remaining parameters for configuration.
vm_ref = params.get("updatedevice_vm_ref", "domname")
at_status_error = "yes" == params.get("at_status_error", "no")
dt_status_error = "yes" == params.get("dt_status_error", "no")
dom_uuid = vm.get_uuid()
dom_id = vm.get_id()
# Set domain reference.
if vm_ref == "domname":
vm_ref = vm_name
elif vm_ref == "domid":
vm_ref = dom_id
elif vm_ref == "domuuid":
vm_ref = dom_uuid
elif vm_ref == "hexdomid" and dom_id is not None:
vm_ref = hex(int(dom_id))
try:
# Firstly detach the disk.
update_xmlfile = os.path.join(data_dir.get_tmp_dir(),
"update.xml")
create_attach_xml(update_xmlfile, disk_type, target_bus,
target_dev, "", disk_mode)
ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
flagstr=dt_flag, ignore_status=True,
debug=True)
if vm.is_paused():
vm.resume()
vm.wait_for_login().close()
if vm.is_alive() and not guest_rhel6:
time.sleep(5)
# For rhel7 guest, need to update twice for it to take effect.
ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
flagstr=dt_flag, ignore_status=True,
debug=True)
os.remove(update_xmlfile)
libvirt.check_exit_status(ret, dt_status_error)
if not ret.exit_status:
check_result(orig_iso, disk_type, target_dev, dt_flag, False)
# Then attach the disk.
if pre_vm_state == "paused":
if not vm.pause():
raise exceptions.TestFail("Cann't pause the domain")
create_attach_xml(update_xmlfile, disk_type, target_bus,
target_dev, test_iso, disk_mode)
ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
flagstr=at_flag, ignore_status=True,
debug=True)
if vm.is_paused():
vm.resume()
vm.wait_for_login().close()
update_twice = False
if vm.is_alive() and not guest_rhel6:
# For rhel7 guest, need to update twice for it to take effect.
if (pre_vm_state in ["running", "paused"] and
dt_flag == "--config" and at_flag != "--config"):
update_twice = True
elif (pre_vm_state == "transient" and
dt_flag.count("config") and not at_flag.count("config")):
update_twice = True
if update_twice:
time.sleep(5)
ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
flagstr=at_flag, ignore_status=True,
debug=True)
libvirt.check_exit_status(ret, at_status_error)
os.remove(update_xmlfile)
if not ret.exit_status:
check_result(test_iso, disk_type, target_dev, at_flag)
# Try to start vm at last.
if vm.is_dead():
vm.start()
vm.wait_for_login().close()
finally:
vm.destroy(gracefully=False, free_mac_addresses=False)
vmxml_backup.sync()
if os.path.exists(orig_iso):
os.remove(orig_iso)
if os.path.exists(test_iso):
os.remove(test_iso)
0
Example 98
Project: tp-libvirt Source File: specific_kvm.py
def run(test, params, env):
"""
convert specific kvm guest to rhev
"""
for v in params.itervalues():
if "V2V_EXAMPLE" in v:
raise exceptions.TestSkipError("Please set real value for %s" % v)
if utils_v2v.V2V_EXEC is None:
raise ValueError('Missing command: virt-v2v')
vm_name = params.get('main_vm', 'EXAMPLE')
target = params.get('target')
remote_host = params.get('remote_host', 'EXAMPLE')
output_mode = params.get('output_mode')
output_format = params.get('output_format')
output_storage = params.get('output_storage', 'default')
bridge = params.get('bridge')
network = params.get('network')
address_cache = env.get('address_cache')
v2v_timeout = int(params.get('v2v_timeout', 1200))
status_error = 'yes' == params.get('status_error', 'no')
checkpoint = params.get('checkpoint', '')
debug_kernel = 'debug_kernel' == checkpoint
multi_kernel_list = ['multi_kernel', 'debug_kernel']
backup_list = ['virtio_on', 'virtio_off', 'floppy', 'floppy_devmap',
'fstab_cdrom', 'fstab_virtio', 'multi_disks', 'sata_disk',
'network_virtio', 'network_rtl8139', 'network_e1000',
'multi_netcards', 'spice', 'spice_encrypt']
error_list = []
def log_fail(msg):
"""
Log error and update error list
"""
logging.error(msg)
error_list.append(msg)
def vm_shell(func):
"""
Decorator of shell session to vm
"""
def wrapper(*args, **kwargs):
vm = libvirt_vm.VM(vm_name, params, test.bindir,
env.get('address_cache'))
if vm.is_dead():
logging.info('VM is down. Starting it now.')
vm.start()
session = vm.wait_for_login()
kwargs['session'] = session
kwargs['vm'] = vm
func(*args, **kwargs)
if session:
session.close()
vm.shutdown()
return wrapper
def check_disks(vmcheck):
"""
Check disk counts inside the VM
"""
# Initialize windows boot up
os_type = params.get("os_type", "linux")
expected_disks = int(params.get("ori_disks", "1"))
logging.debug("Expect %s disks im VM after convert", expected_disks)
# Get disk counts
if os_type == "linux":
cmd = "lsblk |grep disk |wc -l"
disks = int(vmcheck.session.cmd(cmd).strip())
else:
cmd = r"echo list disk > C:\list_disk.txt"
vmcheck.session.cmd(cmd)
cmd = r"diskpart /s C:\list_disk.txt"
output = vmcheck.session.cmd(cmd).strip()
logging.debug("Disks in VM: %s", output)
disks = len(re.findall('Disk\s\d', output))
logging.debug("Find %s disks in VM after convert", disks)
if disks == expected_disks:
logging.info("Disk counts is expected")
else:
raise exceptions.TestFail("Disk counts is wrong")
def install_kernel(session, url=None, kernel_debug=False):
"""
Install kernel to vm
"""
if kernel_debug:
if not utils_misc.yum_install(['kernel-debug'], session=session):
raise exceptions.TestFail('Fail on installing debug kernel')
else:
logging.info('Install kernel-debug success')
else:
if not (url and url.endswith('.rpm')):
raise exceptions.TestError('kernel url not contain ".rpm"')
# rhel6 need to install kernel-firmware first
if '.el6' in session.cmd('uname -r'):
kernel_fm_url = params.get('kernel_fm_url')
cmd_install_firmware = 'rpm -Uv %s' % kernel_fm_url
try:
session.cmd(cmd_install_firmware, timeout=v2v_timeout)
except Exception, e:
raise exceptions.TestError(str(e))
cmd_install_kernel = 'rpm -iv %s' % url
try:
session.cmd(cmd_install_kernel, timeout=v2v_timeout)
except Exception, e:
raise exceptions.TestError(str(e))
@vm_shell
def multi_kernel(*args, **kwargs):
"""
Make multi-kernel test
"""
session = kwargs['session']
vm = kwargs['vm']
kernel_url = params.get('kernel_url')
install_kernel(session, kernel_url, debug_kernel)
default_kernel = vm.set_boot_kernel(1, debug_kernel)
if not default_kernel:
raise exceptions.TestError('Set default kernel failed')
params['defaultkernel'] = default_kernel
def check_vmlinuz_initramfs(v2v_result):
"""
Check if vmlinuz matches initramfs on multi-kernel case
"""
logging.info('Checking if vmlinuz matches initramfs')
kernels = re.search(
r'kernel packages in this guest:(.*?)grub kernels in this',
v2v_result, flags=re.DOTALL)
try:
lines = kernels.group(1)
kernel_list = re.findall('\((.*?)\)', lines)
for kernel in kernel_list:
vmlinuz = re.search(r'/boot/vmlinuz-(.*?),', kernel).group(1)
initramfs = \
re.search(r'/boot/initramfs-(.*?)\.img', kernel).group(1)
logging.debug('vmlinuz version is: %s' % vmlinuz)
logging.debug('initramfs version is: %s' % initramfs)
if vmlinuz != initramfs:
raise exceptions.TestFail('vmlinuz not match with initramfs')
except Exception, e:
raise exceptions.TestError('Error on find kernel info \n %s' % str(e))
def check_boot_kernel(vmcheck, default_kernel, kernel_debug=False):
"""
Check if converted vm use the default kernel
"""
logging.debug('Check debug kernel: %s' % kernel_debug)
current_kernel = vmcheck.session.cmd('uname -r').strip()
logging.debug('Current kernel: %s' % current_kernel)
logging.debug('Default kernel: %s' % default_kernel)
if kernel_debug:
if '.debug' in current_kernel:
raise exceptions.TestFail('VM should choose non-debug kernel')
elif current_kernel not in default_kernel:
raise exceptions.TestFail('VM should choose default kernel')
def check_floppy_exist(vmcheck):
"""
Check if floppy exists after convertion
"""
blk = vmcheck.session.cmd('lsblk')
logging.info(blk)
if not re.search('fd0', blk):
raise exceptions.TestFail('Floppy not found')
def attach_removable_media(type, source, dev):
bus = {'cdrom': 'ide', 'floppy': 'fdc'}
args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file',
'type': type, 'targetbus': bus[type]}
if type == 'cdrom':
args.update({'mode': 'readonly'})
config = ''
# Join all options together to get command line
for key in args.keys():
config += ' --%s %s' % (key, args[key])
config += ' --current'
virsh.attach_disk(vm_name, source, dev, extra=config)
def change_disk_bus(dest):
"""
Change all disks' bus type to $dest
"""
bus_list = ['ide', 'sata', 'virtio']
if dest not in bus_list:
raise exceptions.TestError('Bus type not support')
dev_prefix = ['h', 's', 'v']
dev_table = dict(zip(bus_list, dev_prefix))
logging.info('Change disk bus to %s' % dest)
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
disks = vmxml.get_disk_all()
index = 0
for disk in disks.values():
if disk.get('device') != 'disk':
continue
target = disk.find('target')
target.set('bus', dest)
target.set('dev', dev_table[dest] + 'd' + string.lowercase[index])
disk.remove(disk.find('address'))
index += 1
vmxml.sync()
def change_network_model(model):
"""
Change network model to $model
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
network_list = vmxml.get_iface_all()
for node in network_list.values():
if node.get('type') == 'network':
node.find('model').set('type', model)
vmxml.sync()
def attach_network_card(model):
"""
Attach network card based on model
"""
if model not in ('e1000', 'virtio', 'rtl8139'):
raise exceptions.TestError('Network model not support')
options = {'type': 'network', 'source': 'default', 'model': model}
line = ''
for key in options:
line += ' --' + key + ' ' + options[key]
line += ' --current'
logging.debug(virsh.attach_interface(vm_name, option=line))
def check_multi_netcards(mac_list, virsh_session_id):
"""
Check if number and type of network cards meet expectation
"""
virsh_instance = virsh.VirshPersistent(session_id=virsh_session_id)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
vm_name, virsh_instance=virsh_instance)
iflist = vmxml.get_iface_all()
logging.debug('MAC list before v2v: %s' % mac_list)
logging.debug('MAC list after v2v: %s' % iflist.keys())
if set(mac_list).difference(iflist.keys()):
raise exceptions.TestFail('Missing network interface')
for mac in iflist:
if iflist[mac].find('model').get('type') != 'virtio':
raise exceptions.TestFail('Network not convert to virtio')
@vm_shell
def insert_floppy_devicemap(**kwargs):
"""
Add an entry of floppy to device.map
"""
session = kwargs['session']
line = '(fd0) /dev/fd0'
devmap = '/boot/grub/device.map'
if session.cmd_status('ls %s' % devmap):
devmap = '/boot/grub2/device.map'
cmd_exist = 'grep \'(fd0)\' %s' % devmap
cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap)
if session.cmd_status(cmd_exist):
session.cmd(cmd_set)
def make_label(session):
"""
Label a volume, swap or root volume
"""
# swaplabel for rhel7 with xfs, e2label for rhel6 or ext*
cmd_map = {'root': 'e2label %s ROOT',
'swap': 'swaplabel -L SWAPPER %s'}
if not session.cmd_status('swaplabel --help'):
blk = 'swap'
elif not session.cmd_status('which e2label'):
blk = 'root'
else:
raise exceptions.TestError('No tool to make label')
entry = session.cmd('blkid|grep %s' % blk).strip()
path = entry.split()[0].strip(':')
cmd_label = cmd_map[blk] % path
if 'LABEL' not in entry:
session.cmd(cmd_label)
return blk
@vm_shell
def specify_fstab_entry(type, **kwargs):
"""
Specify entry in fstab file
"""
type_list = ['cdrom', 'uuid', 'label', 'virtio', 'invalid']
if type not in type_list:
raise exceptions.TestError('Not support %s in fstab' % type)
session = kwargs['session']
# Specify cdrom device
if type == 'cdrom':
line = '/dev/cdrom /media/CDROM auto exec 0 0'
cmd = [
'mkdir -p /media/CDROM',
'mount /dev/cdrom /media/CDROM',
'echo "%s" >> /etc/fstab' % line
]
for i in range(len(cmd)):
session.cmd(cmd[i])
elif type == 'invalid':
line = utils_misc.generate_random_string(6)
session.cmd('echo "%s" >> /etc/fstab' % line)
else:
map = {'uuid': 'UUID', 'label': 'LABEL', 'virtio': '/vd'}
logging.info(type)
if session.cmd_status('cat /etc/fstab|grep %s' % map[type]):
# Specify device by UUID
if type == 'uuid':
entry = session.cmd('blkid -s UUID|grep swap').strip().split()
# Replace path for UUID
origin = entry[0].strip(':')
replace = entry[1].replace('"', '')
# Specify virtio device
elif type == 'virtio':
entry = session.cmd('cat /etc/fstab|grep /boot').strip()
# Get the ID (no matter what, usually UUID)
origin = entry.split()[0]
key = origin.split('=')[1]
blkinfo = session.cmd('blkid|grep %s' % key).strip()
# Replace with virtio disk path
replace = blkinfo.split()[0].strip(':')
# Specify device by label
elif type == 'label':
blk = make_label(session)
entry = session.cmd('blkid|grep %s' % blk).strip()
# Remove " from LABEL="cuem"
replace = entry.split()[1].strip().replace('"', '')
# Replace the original id/path with label
origin = entry.split()[0].strip(':')
cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace)
session.cmd(cmd_fstab)
@vm_shell
def create_large_file(**kwargs):
"""
Create a large file to make left space of root less than 20m
"""
session = kwargs['session']
cmd_df = "df -m /|awk 'END{print $4}'"
avail = int(session.cmd(cmd_df).strip())
logging.info('Available space: %dM' % avail)
if avail > 19:
params['large_file'] = '/file.large'
cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \
(params['large_file'], avail - 18)
session.cmd(cmd_create, timeout=v2v_timeout)
logging.info('Available space: %sM' % session.cmd(cmd_df).strip())
@vm_shell
def corrupt_rpmdb(**kwargs):
"""
Corrupt rpm db
"""
session = kwargs['session']
# If __db.* exist, remove them, then touch _db.001 to corrupt db.
if not session.cmd_status('ls /var/lib/rpm/__db.001'):
session.cmd('rm -f /var/lib/rpm/__db.*')
session.cmd('touch /var/lib/rpm/__db.001')
if not session.cmd_status('yum update'):
raise exceptions.TestError('Corrupt rpmdb failed')
@vm_shell
def bogus_kernel(**kwargs):
"""
Add a bogus kernel entry
"""
session = kwargs['session']
vm = kwargs['vm']
grub_file = vm.get_grub_file(session)
cfg = {
"file": [grub_file, "/etc/grub.d/40_custom"],
"search": ["title .*?.img", "menuentry '.*?}"],
"title": [["(title\s)", r"\1bogus "],
["(menuentry\s'.*?)'", r"\1 bogus'"]],
"kernel": [["(kernel .*?)(\s)", r"\1.bogus\2"],
["(/vmlinuz.*?)(\s)", r"\1.bogus\2"]],
"make": ["pwd", "grub2-mkconfig -o /boot/grub2/grub.cfg"]
}
if 'grub2' in grub_file:
index = 1
else:
index = 0
content = session.cmd('cat %s' % grub_file).strip()
search = re.search(cfg['search'][index], content, re.DOTALL)
if search:
# Make a copy of existing kernel entry string
new_entry = search.group(0)
# Replace title with bogus title
new_entry = re.sub(cfg['title'][index][0],
cfg['title'][index][1], new_entry)
# Replace kernel with bogus kernel
new_entry = re.sub(cfg['kernel'][index][0],
cfg['kernel'][index][1], new_entry)
logging.info(new_entry)
session.cmd('echo "%s"|cat >> %s' % (new_entry, cfg['file'][index]))
# Make effect
session.cmd(cfg['make'][index])
else:
raise exceptions.TestError('No kernel found')
@vm_shell
def grub_serial_terminal(**kwargs):
"""
Edit the serial and terminal lines of grub.conf
"""
session = kwargs['session']
vm = kwargs['vm']
grub_file = vm.get_grub_file(session)
if 'grub2' in grub_file:
raise exceptions.TestSkipError('Skip this case on grub2')
cmd = "sed -i '1iserial -unit=0 -speed=115200\\n"
cmd += "terminal -timeout=10 serial console' %s" % grub_file
session.cmd(cmd)
def make_unclean_fs():
"""
Use force off to make unclean file system of win8
"""
if virsh.start(vm_name, ignore_status=True).exit_status:
raise exceptions.TestError('Start vm failed')
time.sleep(10)
virsh.destroy(vm_name, debug=True)
def cleanup_fs():
"""
Clean up file system by restart and shutdown normally
"""
vm = libvirt_vm.VM(vm_name, params, test.bindir,
env.get('address_cache'))
if vm.is_dead():
vm.start()
# Sleep 1 minute to wait for guest fully bootup
time.sleep(60)
vm.shutdown()
@vm_shell
def set_selinux(value, **kwargs):
"""
Set selinux stat of guest
"""
session = kwargs['session']
current_stat = session.cmd_output('getenforce').strip()
logging.debug('Current selinux status: %s', current_stat)
if current_stat != value:
cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value
logging.info('Set selinux stat with command %s', cmd)
session.cmd(cmd)
def check_v2v_log(output, check=None):
"""
Check if error/warning meets expectation
"""
# Fail if found error message
not_expect_map = {
'fstab_cdrom': ['warning: /files/etc/fstab.*? references unknown'
' device "cdrom"'],
'fstab_label': ['unknown filesystem label.*'],
'fstab_uuid': ['unknown filesystem UUID.*'],
'fstab_virtio': ['unknown filesystem /dev/vd.*'],
'kdump': ['.*multiple files in /boot could be the initramfs.*'],
'ctemp': ['.*case_sensitive_path: v2v: no file or directory.*'],
'floppy_devmap': ['unknown filesystem /dev/fd'],
'corrupt_rpmdb': ['.*error: rpmdb:.*']
}
# Fail if NOT found error message
expect_map = {
'not_shutdown': [
'.*is running or paused.*',
'virt-v2v: error: internal error: invalid argument:.*'
],
'serial_terminal': ['virt-v2v: error: no kernels were found in '
'the grub configuration'],
'no_space': ["virt-v2v: error: not enough free space for "
"conversion on filesystem '/'"],
'unclean_fs': ['.*Windows Hibernation or Fast Restart.*'],
'fstab_invalid': ['libguestfs error: /etc/fstab:.*?: augeas parse failure:']
}
if check is None or not (check in not_expect_map or check in expect_map):
logging.info('Skip checking v2v log')
else:
logging.info('Checking v2v log')
if expect_map.has_key(check):
expect = True
content_map = expect_map
elif not_expect_map.has_key(check):
expect = False
content_map = not_expect_map
if utils_v2v.check_log(output, content_map[check], expect=expect):
logging.info('Finish checking v2v log')
else:
raise exceptions.TestFail('Check v2v log failed')
def check_boot():
"""
Check if guest can boot up after configuration
"""
try:
vm = libvirt_vm.VM(vm_name, params, test.bindir,
env.get('address_cache'))
if vm.is_alive():
vm.shutdown()
logging.info('Booting up %s' % vm_name)
vm.start()
vm.wait_for_login()
vm.shutdown()
logging.info('%s is down' % vm_name)
except Exception, e:
raise exceptions.TestError('Bootup guest and login failed: %s', str(e))
def check_result(result, status_error):
"""
Check virt-v2v command result
"""
utlv.check_exit_status(result, status_error)
output = result.stdout + result.stderr
if status_error:
if checkpoint in ['running', 'paused']:
check_v2v_log(output, 'not_shutdown')
else:
check_v2v_log(output, checkpoint)
else:
if output_mode == 'rhev':
if not utils_v2v.import_vm_to_ovirt(params, address_cache,
timeout=v2v_timeout):
raise exceptions.TestFail('Import VM failed')
if output_mode == 'libvirt':
try:
virsh.start(vm_name, debug=True, ignore_status=False)
except Exception, e:
raise exceptions.TestFail('Start vm failed: %s' % str(e))
# Check guest following the checkpoint do****ent after convertion
vmchecker = VMChecker(test, params, env)
params['vmchecker'] = vmchecker
ret = vmchecker.run()
if len(ret) == 0:
logging.info("All common checkpoints passed")
else:
raise exceptions.TestFail("%s checkpoints failed" % ret)
if checkpoint in ['multi_kernel', 'debug_kernel']:
default_kernel = params.get('defaultkernel')
check_boot_kernel(vmchecker.checker, default_kernel, debug_kernel)
if checkpoint == 'multi_kernel':
check_vmlinuz_initramfs(output)
elif checkpoint == 'floppy':
check_floppy_exist(vmchecker.checker)
elif checkpoint == 'multi_disks':
check_disks(vmchecker.checker)
elif checkpoint == 'multi_netcards':
check_multi_netcards(params['mac_address'],
vmchecker.virsh_session_id)
elif checkpoint.startswith('spice'):
vmchecker.check_graphics({'type': 'spice'})
if checkpoint == 'spice_encrypt':
vmchecker.check_graphics(params[checkpoint])
elif checkpoint.startswith('selinux'):
status = vmchecker.checker.session.cmd('getenforce').strip().lower()
logging.info('Selinux status after v2v:%s', status)
if status != checkpoint[8:]:
log_fail('Selinux status not match')
check_v2v_log(output, checkpoint)
# Merge 2 error lists
error_list.extend(vmchecker.errors)
if len(error_list):
raise exceptions.TestFail('%d checkpoints failed: %s',
len(error_list), error_list)
try:
v2v_params = {
'hostname': remote_host, 'hypervisor': 'kvm', 'v2v_opts': '-v -x',
'storage': output_storage, 'network': network, 'bridge': bridge,
'target': target, 'main_vm': vm_name, 'input_mode': 'libvirt',
}
if output_format:
v2v_params.update({'output_format': output_format})
# Build rhev related options
if output_mode == 'rhev':
# Create SASL user on the ovirt host
user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
params.get("sasl_pwd"))
v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
v2v_sasl.server_ip = params.get("remote_ip")
v2v_sasl.server_user = params.get('remote_user')
v2v_sasl.server_pwd = params.get('remote_pwd')
v2v_sasl.setup(remote=True)
if output_mode == 'local':
v2v_params['storage'] = data_dir.get_tmp_dir()
backup_xml = None
if checkpoint in backup_list:
backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
if checkpoint == 'multi_disks':
attach_disk_path = os.path.join(test.tmpdir, 'attach_disks')
utlv.attach_disks(env.get_vm(vm_name), attach_disk_path,
None, params)
new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
disk_count = 0
for disk in new_xml.get_disk_all().values():
if disk.get('device') == 'disk':
disk_count += 1
params['ori_disks'] = disk_count
elif checkpoint in multi_kernel_list:
multi_kernel()
elif checkpoint == 'virtio_on':
change_disk_bus('virtio')
elif checkpoint == 'virtio_off':
change_disk_bus('ide')
elif checkpoint == 'sata_disk':
change_disk_bus('sata')
elif checkpoint.startswith('floppy'):
img_path = data_dir.get_tmp_dir() + '/floppy.img'
utlv.create_local_disk('floppy', img_path)
attach_removable_media('floppy', img_path, 'fda')
if checkpoint == 'floppy_devmap':
insert_floppy_devicemap()
elif checkpoint.startswith('fstab'):
if checkpoint == 'fstab_cdrom':
img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
utlv.create_local_disk('iso', img_path)
attach_removable_media('cdrom', img_path, 'hdc')
elif checkpoint == 'fstab_virtio':
change_disk_bus('virtio')
specify_fstab_entry(checkpoint[6:])
elif checkpoint == 'running':
virsh.start(vm_name)
logging.info('VM state: %s' %
virsh.domstate(vm_name).stdout.strip())
elif checkpoint == 'paused':
virsh.start(vm_name, '--paused')
logging.info('VM state: %s' %
virsh.domstate(vm_name).stdout.strip())
elif checkpoint == 'serial_terminal':
grub_serial_terminal()
check_boot()
elif checkpoint == 'no_space':
create_large_file()
elif checkpoint == 'corrupt_rpmdb':
corrupt_rpmdb()
elif checkpoint == 'bogus_kernel':
bogus_kernel()
check_boot()
elif checkpoint == 'unclean_fs':
make_unclean_fs()
elif checkpoint.startswith('network'):
change_network_model(checkpoint[8:])
elif checkpoint == 'multi_netcards':
attach_network_card('virtio')
attach_network_card('e1000')
params['mac_address'] = []
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
network_list = vmxml.get_iface_all()
for mac in network_list:
if network_list[mac].get('type') == 'network':
params['mac_address'].append(mac)
if len(params['mac_address']) < 2:
raise exceptions.TestError('Not enough network interface')
logging.debug('MAC address: %s' % params['mac_address'])
elif checkpoint.startswith('spice'):
vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'spice'})
if checkpoint == 'spice_encrypt':
spice_passwd = {'passwd': params.get('spice_passwd', 'redhat')}
vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd)
params[checkpoint] = {'passwdValidTo': '1970-01-01T00:00:01'}
elif checkpoint == 'host_selinux_on':
params['selinux_stat'] = utils_selinux.get_status()
utils_selinux.set_status('enforcing')
elif checkpoint.startswith('selinux'):
set_selinux(checkpoint[8:])
v2v_result = utils_v2v.v2v_cmd(v2v_params)
check_result(v2v_result, status_error)
finally:
if params.get('vmchecker'):
params['vmchecker'].cleanup()
if backup_xml:
backup_xml.sync()
if checkpoint == 'unclean_fs':
cleanup_fs()
if params.get('selinux_stat') and params['selinux_stat'] != 'disabled':
utils_selinux.set_status(params['selinux_stat'])
0
Example 99
Project: headphones Source File: importer.py
def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
# Putting this here to get around the circular import. We're using this to update thumbnails for artist/albums
from headphones import cache
# Can't add various artists - throws an error from MB
if artistid in blacklisted_special_artists:
logger.warn('Cannot import blocked special purpose artist with id' + artistid)
return
# We'll use this to see if we should update the 'LastUpdated' time stamp
errors = False
myDB = db.DBConnection()
# Delete from blacklist if it's on there
myDB.action('DELETE from blacklist WHERE ArtistID=?', [artistid])
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
controlValueDict = {"ArtistID": artistid}
# Don't replace a known artist name with an "Artist ID" placeholder
dbartist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [artistid]).fetchone()
# Only modify the Include Extras stuff if it's a new artist. We need it early so we know what to fetch
if not dbartist:
newValueDict = {"ArtistName": "Artist ID: %s" % (artistid),
"Status": "Loading",
"IncludeExtras": headphones.CONFIG.INCLUDE_EXTRAS,
"Extras": headphones.CONFIG.EXTRAS}
if type == "series":
newValueDict['Type'] = "series"
else:
newValueDict = {"Status": "Loading"}
if dbartist["Type"] == "series":
type = "series"
myDB.upsert("artists", newValueDict, controlValueDict)
if type == "series":
artist = mb.getSeries(artistid)
else:
artist = mb.getArtist(artistid, extrasonly)
if artist and artist.get('artist_name') in blacklisted_special_artist_names:
logger.warn('Cannot import blocked special purpose artist: %s' % artist.get('artist_name'))
myDB.action('DELETE from artists WHERE ArtistID=?', [artistid])
# in case it's already in the db
myDB.action('DELETE from albums WHERE ArtistID=?', [artistid])
myDB.action('DELETE from tracks WHERE ArtistID=?', [artistid])
return
if not artist:
logger.warn("Error fetching artist info. ID: " + artistid)
if dbartist is None:
newValueDict = {"ArtistName": "Fetch failed, try refreshing. (%s)" % (artistid),
"Status": "Active"}
else:
newValueDict = {"Status": "Active"}
myDB.upsert("artists", newValueDict, controlValueDict)
return
if artist['artist_name'].startswith('The '):
sortname = artist['artist_name'][4:]
else:
sortname = artist['artist_name']
logger.info(u"Now adding/updating: " + artist['artist_name'])
controlValueDict = {"ArtistID": artistid}
newValueDict = {"ArtistName": artist['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("artists", newValueDict, controlValueDict)
# See if we need to grab extras. Artist specific extras take precedence
# over global option. Global options are set when adding a new artist
try:
db_artist = myDB.action('SELECT IncludeExtras, Extras from artists WHERE ArtistID=?',
[artistid]).fetchone()
includeExtras = db_artist['IncludeExtras']
except IndexError:
includeExtras = False
# Clean all references to release group in dB that are no longer referenced
# from the musicbrainz refresh
group_list = []
force_repackage = 0
# Don't nuke the database if there's a MusicBrainz error
if len(artist['releasegroups']) != 0:
for groups in artist['releasegroups']:
group_list.append(groups['id'])
if not extrasonly:
remove_missing_groups_from_albums = myDB.select(
"SELECT AlbumID FROM albums WHERE ArtistID=?", [artistid])
else:
remove_missing_groups_from_albums = myDB.select(
'SELECT AlbumID FROM albums WHERE ArtistID=? AND Status="Skipped" AND Type!="Album"',
[artistid])
for items in remove_missing_groups_from_albums:
if items['AlbumID'] not in group_list:
# Remove all from albums/tracks that aren't in release groups
myDB.action("DELETE FROM albums WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM allalbums WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM tracks WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM alltracks WHERE AlbumID=?", [items['AlbumID']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [items['AlbumID']])
logger.info("[%s] Removing all references to release group %s to reflect MusicBrainz refresh" % (
artist['artist_name'], items['AlbumID']))
if not extrasonly:
force_repackage = 1
else:
if not extrasonly:
logger.info(
"[%s] There was either an error pulling data from MusicBrainz or there might not be any releases for this category" %
artist['artist_name'])
# Then search for releases within releasegroups, if releases don't exist, then remove from allalbums/alltracks
album_searches = []
for rg in artist['releasegroups']:
al_title = rg['title']
today = helpers.today()
rgid = rg['id']
skip_log = 0
# Make a user configurable variable to skip update of albums with release dates older than this date (in days)
pause_delta = headphones.CONFIG.MB_IGNORE_AGE
rg_exists = myDB.action("SELECT * from albums WHERE AlbumID=?", [rg['id']]).fetchone()
if not forcefull:
new_release_group = False
try:
check_release_date = rg_exists['ReleaseDate']
except TypeError:
check_release_date = None
new_release_group = True
if new_release_group:
logger.info("[%s] Now adding: %s (New Release Group)" % (artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid, includeExtras)
else:
if check_release_date is None or check_release_date == u"None":
logger.info("[%s] Now updating: %s (No Release Date)" % (artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
if len(check_release_date) == 10:
release_date = check_release_date
elif len(check_release_date) == 7:
release_date = check_release_date + "-31"
elif len(check_release_date) == 4:
release_date = check_release_date + "-12-31"
else:
release_date = today
if helpers.get_age(today) - helpers.get_age(release_date) < pause_delta:
logger.info("[%s] Now updating: %s (Release Date <%s Days)",
artist['artist_name'], rg['title'], pause_delta)
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
logger.info("[%s] Skipping: %s (Release Date >%s Days)",
artist['artist_name'], rg['title'], pause_delta)
skip_log = 1
new_releases = 0
if force_repackage == 1:
new_releases = -1
logger.info('[%s] Forcing repackage of %s (Release Group Removed)',
artist['artist_name'], al_title)
else:
new_releases = new_releases
else:
logger.info("[%s] Now adding/updating: %s (Comprehensive Force)", artist['artist_name'],
rg['title'])
new_releases = mb.get_new_releases(rgid, includeExtras, forcefull)
if new_releases != 0:
# Dump existing hybrid release since we're repackaging/replacing it
myDB.action("DELETE from albums WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from allalbums WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from tracks WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from alltracks WHERE ReleaseID=?", [rg['id']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [rg['id']])
# This will be used later to build a hybrid release
fullreleaselist = []
# Search for releases within a release group
find_hybrid_releases = myDB.action("SELECT * from allalbums WHERE AlbumID=?",
[rg['id']])
# Build the dictionary for the fullreleaselist
for items in find_hybrid_releases:
# don't include hybrid information, since that's what we're replacing
if items['ReleaseID'] != rg['id']:
hybrid_release_id = items['ReleaseID']
newValueDict = {"ArtistID": items['ArtistID'],
"ArtistName": items['ArtistName'],
"AlbumTitle": items['AlbumTitle'],
"AlbumID": items['AlbumID'],
"AlbumASIN": items['AlbumASIN'],
"ReleaseDate": items['ReleaseDate'],
"Type": items['Type'],
"ReleaseCountry": items['ReleaseCountry'],
"ReleaseFormat": items['ReleaseFormat']
}
find_hybrid_tracks = myDB.action("SELECT * from alltracks WHERE ReleaseID=?",
[hybrid_release_id])
totalTracks = 1
hybrid_track_array = []
for hybrid_tracks in find_hybrid_tracks:
hybrid_track_array.append({
'number': hybrid_tracks['TrackNumber'],
'title': hybrid_tracks['TrackTitle'],
'id': hybrid_tracks['TrackID'],
# 'url': hybrid_tracks['TrackURL'],
'duration': hybrid_tracks['TrackDuration']
})
totalTracks += 1
newValueDict['ReleaseID'] = hybrid_release_id
newValueDict['Tracks'] = hybrid_track_array
fullreleaselist.append(newValueDict)
# Basically just do the same thing again for the hybrid release
# This may end up being called with an empty fullreleaselist
try:
hybridrelease = getHybridRelease(fullreleaselist)
logger.info('[%s] Packaging %s releases into hybrid title' % (
artist['artist_name'], rg['title']))
except Exception as e:
errors = True
logger.warn('[%s] Unable to get hybrid release information for %s: %s' % (
artist['artist_name'], rg['title'], e))
continue
# Use the ReleaseGroupID as the ReleaseID for the hybrid release to differentiate it
# We can then use the condition WHERE ReleaseID == ReleaseGroupID to select it
# The hybrid won't have a country or a format
controlValueDict = {"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumID": rg['id'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"ReleaseDate": hybridrelease['ReleaseDate'],
"Type": rg['type']
}
myDB.upsert("allalbums", newValueDict, controlValueDict)
for track in hybridrelease['Tracks']:
cleanname = helpers.clean_name(artist['artist_name'] + ' ' + rg['title'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"AlbumID": rg['id'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action('SELECT Location, BitRate, Format from have WHERE CleanName=?',
[cleanname]).fetchone()
if not match:
match = myDB.action(
'SELECT Location, BitRate, Format from have WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?',
[artist['artist_name'], rg['title'], track['title']]).fetchone()
# if not match:
# match = myDB.action('SELECT Location, BitRate, Format from have WHERE TrackID=?', [track['id']]).fetchone()
if match:
newValueDict['Location'] = match['Location']
newValueDict['BitRate'] = match['BitRate']
newValueDict['Format'] = match['Format']
# myDB.action('UPDATE have SET Matched="True" WHERE Location=?', [match['Location']])
myDB.action('UPDATE have SET Matched=? WHERE Location=?',
(rg['id'], match['Location']))
myDB.upsert("alltracks", newValueDict, controlValueDict)
# Delete matched tracks from the have table
# myDB.action('DELETE from have WHERE Matched="True"')
# If there's no release in the main albums tables, add the default (hybrid)
# If there is a release, check the ReleaseID against the AlbumID to see if they differ (user updated)
# check if the album already exists
if not rg_exists:
releaseid = rg['id']
else:
releaseid = rg_exists['ReleaseID']
if not releaseid:
releaseid = rg['id']
album = myDB.action('SELECT * from allalbums WHERE ReleaseID=?', [releaseid]).fetchone()
controlValueDict = {"AlbumID": rg['id']}
newValueDict = {"ArtistID": album['ArtistID'],
"ArtistName": album['ArtistName'],
"AlbumTitle": album['AlbumTitle'],
"ReleaseID": album['ReleaseID'],
"AlbumASIN": album['AlbumASIN'],
"ReleaseDate": album['ReleaseDate'],
"Type": album['Type'],
"ReleaseCountry": album['ReleaseCountry'],
"ReleaseFormat": album['ReleaseFormat']
}
if rg_exists:
newValueDict['DateAdded'] = rg_exists['DateAdded']
newValueDict['Status'] = rg_exists['Status']
else:
today = helpers.today()
newValueDict['DateAdded'] = today
if headphones.CONFIG.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
elif album['ReleaseDate'] > today and headphones.CONFIG.AUTOWANT_UPCOMING:
newValueDict['Status'] = "Wanted"
# Sometimes "new" albums are added to musicbrainz after their release date, so let's try to catch these
# The first test just makes sure we have year-month-day
elif helpers.get_age(album['ReleaseDate']) and helpers.get_age(
today) - helpers.get_age(
album['ReleaseDate']) < 21 and headphones.CONFIG.AUTOWANT_UPCOMING:
newValueDict['Status'] = "Wanted"
else:
newValueDict['Status'] = "Skipped"
myDB.upsert("albums", newValueDict, controlValueDict)
tracks = myDB.action('SELECT * from alltracks WHERE ReleaseID=?',
[releaseid]).fetchall()
# This is used to see how many tracks you have from an album - to
# mark it as downloaded. Default is 80%, can be set in config as
# ALBUM_COMPLETION_PCT
total_track_count = len(tracks)
if total_track_count == 0:
logger.warning("Total track count is zero for Release ID " +
"'%s', skipping.", releaseid)
continue
for track in tracks:
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": rg['id']}
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
}
myDB.upsert("tracks", newValueDict, controlValueDict)
# Mark albums as downloaded if they have at least 80% (by default, configurable) of the album
have_track_count = len(
myDB.select('SELECT * from tracks WHERE AlbumID=? AND Location IS NOT NULL',
[rg['id']]))
marked_as_downloaded = False
if rg_exists:
if rg_exists['Status'] == 'Skipped' and (
(have_track_count / float(total_track_count)) >= (
headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0)):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?',
['Downloaded', rg['id']])
marked_as_downloaded = True
else:
if (have_track_count / float(total_track_count)) >= (
headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?',
['Downloaded', rg['id']])
marked_as_downloaded = True
logger.info(
u"[%s] Seeing if we need album art for %s" % (artist['artist_name'], rg['title']))
cache.getThumb(AlbumID=rg['id'])
# Start a search for the album if it's new, hasn't been marked as
# downloaded and autowant_all is selected. This search is deferred,
# in case the search failes and the rest of the import will halt.
if not rg_exists and not marked_as_downloaded and headphones.CONFIG.AUTOWANT_ALL:
album_searches.append(rg['id'])
else:
if skip_log == 0:
logger.info(u"[%s] No new releases, so no changes made to %s" % (
artist['artist_name'], rg['title']))
time.sleep(3)
finalize_update(artistid, artist['artist_name'], errors)
logger.info(u"Seeing if we need album art for: %s" % artist['artist_name'])
cache.getThumb(ArtistID=artistid)
logger.info(u"Fetching Metacritic reviews for: %s" % artist['artist_name'])
metacritic.update(artistid, artist['artist_name'], artist['releasegroups'])
if errors:
logger.info(
"[%s] Finished updating artist: %s but with errors, so not marking it as updated in the database" % (
artist['artist_name'], artist['artist_name']))
else:
myDB.action('DELETE FROM newartists WHERE ArtistName = ?', [artist['artist_name']])
logger.info(u"Updating complete for: %s" % artist['artist_name'])
# Start searching for newly added albums
if album_searches:
from headphones import searcher
logger.info("Start searching for %d albums.", len(album_searches))
for album_search in album_searches:
searcher.searchforalbum(albumid=album_search)
0
Example 100
Project: pyroute2 Source File: interface.py
def commit(self,
tid=None,
transaction=None,
commit_phase=1,
commit_mask=0xff,
newif=False):
'''
Commit transaction. In the case of exception all
changes applied during commit will be reverted.
'''
if not commit_phase & commit_mask:
return self
def invalidate():
# on failure, invalidate the interface and detach it
# from the parent
# 0. obtain lock on IPDB, to avoid deadlocks
# ... all the DB updates will wait
with self.ipdb.exclusive:
# 1. drop the IPRoute() link
self.nl = None
# 2. clean up ipdb
self.detach()
# 3. invalidate the interface
with self._direct_state:
for i in tuple(self.keys()):
del self[i]
# 4. the rest
self._mode = 'invalid'
error = None
added = None
removed = None
drop = True
init = None
debug = {'traceback': None,
'transaction': None,
'next_stage': None}
if tid:
transaction = self.global_tx[tid]
else:
if transaction:
drop = False
else:
transaction = self.current_tx
if transaction.partial:
transaction.errors = []
with self._write_lock:
# if the interface does not exist, create it first ;)
if self['ipdb_scope'] != 'system':
newif = True
self.set_target('ipdb_scope', 'system')
try:
# 8<----------------------------------------------------
# ACHTUNG: hack for old platforms
if self['address'] == '00:00:00:00:00:00':
with self._direct_state:
self['address'] = None
self['broadcast'] = None
# 8<----------------------------------------------------
init = self.pick()
try:
self.nl.link('add', **self)
except NetlinkError as x:
# File exists
if x.code == errno.EEXIST:
# A bit special case, could be one of two cases:
#
# 1. A race condition between two different IPDB
# processes
# 2. An attempt to create dummy0, gre0, bond0 when
# the corrseponding module is not loaded. Being
# loaded, the module creates a default interface
# by itself, causing the request to fail
#
# The exception in that case can cause the DB
# inconsistence, since there can be queued not only
# the interface creation, but also IP address
# changes etc.
#
# So we ignore this particular exception and try to
# continue, as it is created by us.
pass
else:
raise
except Exception as e:
if transaction.partial:
transaction.errors.append(e)
raise PartialCommitException()
else:
# If link('add', ...) raises an exception, no netlink
# broadcast will be sent, and the object is unmodified.
# After the exception forwarding, the object is ready
# to repeat the commit() call.
raise
if transaction['ipdb_scope'] == 'create' and commit_phase > 1:
if self['index']:
wd = self.ipdb.watchdog(action='RTM_DELLINK',
ifname=self['ifname'])
with self._direct_state:
self['ipdb_scope'] = 'locked'
self.nl.link('delete', index=self['index'])
wd.wait()
self.load_dict(transaction)
return self
elif newif:
# Here we come only if a new interface is created
#
if commit_phase == 1 and not self.wait_target('ipdb_scope'):
invalidate()
raise CreateException()
# Re-populate transaction.ipaddr to have a proper IP target
#
# The reason behind the code is that a new interface in the
# "up" state will have automatic IPv6 addresses, that aren't
# reflected in the transaction. This may cause a false IP
# target mismatch and a commit failure.
#
# To avoid that, collect automatic addresses to the
# transaction manually, since it is not yet properly linked.
#
for addr in self.ipdb.ipaddr[self['index']]:
transaction['ipaddr'].add(addr)
# now we have our index and IP set and all other stuff
snapshot = self.pick()
# resolve all delayed ports
def resolve_ports(transaction, ports, callback, self, drop):
def error(x):
return KeyError('can not resolve port %s' % x)
for port in tuple(ports):
ifindex = self._resolve_port(port)
if ifindex is None:
if transaction.partial:
transaction.errors.append(error(port))
else:
if drop:
self.drop(transaction.uid)
raise error(port)
else:
ports.remove(port)
with transaction._direct_state: # ????
callback(ifindex)
resolve_ports(transaction,
transaction._delay_add_port,
transaction.add_port,
self, drop)
resolve_ports(transaction,
transaction._delay_del_port,
transaction.del_port,
self, drop)
try:
removed, added = snapshot // transaction
run = transaction._run
nl = transaction.nl
# 8<---------------------------------------------
# Port vlans
if removed['vlans'] or added['vlans']:
if added['vlans']:
transaction['vlans'].add(1)
self['vlans'].set_target(transaction['vlans'])
for i in removed['vlans']:
if i != 1:
# remove vlan from the port
run(nl.vlan_filter, 'del',
index=self['index'],
vlan_info=self['vlans'][i])
for i in added['vlans']:
if i != 1:
# add vlan to the port
run(nl.vlan_filter, 'add',
index=self['index'],
vlan_info=transaction['vlans'][i])
self['vlans'].target.wait(SYNC_TIMEOUT)
if not self['vlans'].target.is_set():
raise CommitException('vlans target is not set')
# 8<---------------------------------------------
# Ports
if removed['ports'] or added['ports']:
self['ports'].set_target(transaction['ports'])
for i in removed['ports']:
# detach port
if i in self.ipdb.interfaces:
(self.ipdb.interfaces[i]
.set_target('master', None)
.mirror_target('master', 'link'))
run(nl.link, 'set', index=i, master=0)
else:
transaction.errors.append(KeyError(i))
for i in added['ports']:
# attach port
if i in self.ipdb.interfaces:
(self.ipdb.interfaces[i]
.set_target('master', self['index'])
.mirror_target('master', 'link'))
run(nl.link, 'set', index=i, master=self['index'])
else:
transaction.errors.append(KeyError(i))
self['ports'].target.wait(SYNC_TIMEOUT)
if not self['ports'].target.is_set():
raise CommitException('ports target is not set')
# wait for proper targets on ports
for i in list(added['ports']) + list(removed['ports']):
port = self.ipdb.interfaces[i]
target = port._local_targets['master']
target.wait(SYNC_TIMEOUT)
with port._write_lock:
del port._local_targets['master']
del port._local_targets['link']
if not target.is_set():
raise CommitException('master target failed')
if i in added['ports']:
if port.if_master != self['index']:
raise CommitException('master set failed')
else:
if port.if_master == self['index']:
raise CommitException('master unset failed')
# 8<---------------------------------------------
# Interface changes
request = IPLinkRequest()
for key in added:
if (key == 'net_ns_fd') or \
(key == 'net_ns_pid') or \
(key not in self._virtual_fields) and \
(key != 'kind'):
request[key] = added[key]
# apply changes only if there is something to apply
if any([request[item] is not None for item in request]):
request['index'] = self['index']
request['kind'] = self['kind']
if request.get('address', None) == '00:00:00:00:00:00':
request.pop('address')
request.pop('broadcast', None)
if tuple(filter(lambda x: x[:3] == 'br_', request)):
request['family'] = socket.AF_BRIDGE
run(nl.link,
(RTM_NEWLINK, NLM_F_REQUEST | NLM_F_ACK),
**request)
else:
run(nl.link, 'set', **request)
# hardcoded pause -- if the interface was moved
# across network namespaces
if ('net_ns_fd' in request) or ('net_ns_pid' in request):
while True:
# wait until the interface will disappear
# from the main network namespace
try:
for link in self.nl.get_links(self['index']):
self.ipdb.interfaces._new(link)
except NetlinkError as e:
if e.code == errno.ENODEV:
break
raise
except Exception:
raise
time.sleep(0.1)
if not transaction.partial:
transaction.wait_all_targets()
# 8<---------------------------------------------
# IP address changes
for _ in range(3):
ip2add = transaction['ipaddr'] - self['ipaddr']
ip2remove = self['ipaddr'] - transaction['ipaddr']
if not ip2add and not ip2remove:
break
self['ipaddr'].set_target(transaction['ipaddr'])
###
# Remove
#
# The promote_secondaries sysctl causes the kernel
# to add secondary addresses back after the primary
# address is removed.
#
# The library can not tell this from the result of
# an external program.
#
# One simple way to work that around is to remove
# secondaries first.
rip = sorted(ip2remove,
key=lambda x: self['ipaddr'][x]['flags'],
reverse=True)
# 8<--------------------------------------
for i in rip:
# When you remove a primary IP addr, all the
# subnetwork can be removed. In this case you
# will fail, but it is OK, no need to roll back
try:
run(nl.addr, 'delete', self['index'], i[0], i[1])
except NetlinkError as x:
# bypass only errno 99,
# 'Cannot assign address'
if x.code != errno.EADDRNOTAVAIL:
raise
except socket.error as x:
# bypass illegal IP requests
if isinstance(x.args[0], basestring) and \
x.args[0].startswith('illegal IP'):
continue
raise
###
# Add addresses
# 8<--------------------------------------
for i in ip2add:
# Try to fetch additional address attributes
try:
kwarg = dict([k for k
in transaction['ipaddr'][i].items()
if k[0] in ('broadcast',
'anycast',
'scope')])
except KeyError:
kwarg = None
# feed the address to the OS
run(nl.addr, 'add', self['index'], i[0], i[1],
**kwarg if kwarg else {})
# 8<--------------------------------------
# bond and bridge interfaces do not send
# IPv6 address updates, when are down
#
# beside of that, bridge interfaces are
# down by default, so they never send
# address updates from beginning
#
# so if we need, force address load
#
# FIXME: probably, we should handle other
# types as well
if self['kind'] in ('bond', 'bridge', 'veth'):
for addr in self.nl.get_addr(family=socket.AF_INET6):
self.ipdb.ipaddr._new(addr)
# 8<--------------------------------------
self['ipaddr'].target.wait(SYNC_TIMEOUT)
if self['ipaddr'].target.is_set():
break
else:
raise CommitException('ipaddr target is not set')
# 8<---------------------------------------------
# Iterate callback chain
for ch in self._commit_hooks:
# An exception will rollback the transaction
ch(self.dump(), snapshot.dump(), transaction.dump())
# 8<---------------------------------------------
# Interface removal
if (added.get('ipdb_scope') in ('shadow', 'remove')):
wd = self.ipdb.watchdog(action='RTM_DELLINK',
ifname=self['ifname'])
if added.get('ipdb_scope') in ('shadow', 'create'):
with self._direct_state:
self['ipdb_scope'] = 'locked'
self.nl.link('delete', index=self['index'])
wd.wait()
if added.get('ipdb_scope') == 'shadow':
with self._direct_state:
self['ipdb_scope'] = 'shadow'
if added['ipdb_scope'] == 'create':
self.load_dict(transaction)
if drop:
self.drop(transaction.uid)
return self
# 8<---------------------------------------------
except Exception as e:
error = e
# log the error environment
debug['traceback'] = traceback.format_exc()
debug['transaction'] = transaction
debug['next_stage'] = None
# something went wrong: roll the transaction back
if commit_phase == 1:
if newif:
drop = False
try:
self.commit(transaction=init if newif else snapshot,
commit_phase=2,
commit_mask=commit_mask,
newif=newif)
except Exception as i_e:
debug['next_stage'] = i_e
error = RuntimeError()
else:
# reload all the database -- it can take a long time,
# but it is required since we have no idea, what is
# the result of the failure
links = self.nl.get_links()
for link in links:
self.ipdb.interfaces._new(link)
links = self.nl.get_vlans()
for link in links:
self.ipdb.interfaces._new(link)
for addr in self.nl.get_addr():
self.ipdb.ipaddr._new(addr)
for key in ('ipaddr', 'ports', 'vlans'):
self[key].clear_target()
# raise partial commit exceptions
if transaction.partial and transaction.errors:
error = PartialCommitException('partial commit error')
# if it is not a rollback turn
if drop and commit_phase == 1:
# drop last transaction in any case
self.drop(transaction.uid)
# raise exception for failed transaction
if error is not None:
error.debug = debug
raise error
time.sleep(config.commit_barrier)
# drop all collected errors, if any
self.errors = []
return self