Here are the examples of the python api copy.copy taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
160 Examples
0
Example 51
Project: python-phonenumbers Source File: buildmetadatafromxml.py
def __init__(self, xterritory, short_data):
# Retrieve the REQUIRED attributes
id = xterritory.attrib['id']
self.o = PhoneMetadata(id, short_data=short_data, register=False)
self.o._mutable = True
if 'countryCode' in xterritory.attrib:
self.o.country_code = int(xterritory.attrib['countryCode'])
else:
self.o.country_code = None
# Retrieve the IMPLIED attributes
self.o.international_prefix = xterritory.get('internationalPrefix', None)
self.o.leading_digits = xterritory.get('leadingDigits', None)
self.o.preferred_international_prefix = xterritory.get('preferredInternationalPrefix', None)
self.o.national_prefix = xterritory.get('nationalPrefix', None)
self.o.national_prefix_for_parsing = _dews_re(xterritory.get('nationalPrefixForParsing', None))
self.o.national_prefix_transform_rule = xterritory.get('nationalPrefixTransformRule', None)
if self.o.national_prefix_transform_rule is not None:
# Replace '$1' etc with '\1' to match Python regexp group reference format
self.o.national_prefix_transform_rule = re.sub('\$', r'\\', self.o.national_prefix_transform_rule)
self.o.preferred_extn_prefix = xterritory.get('preferredExtnPrefix', None)
national_prefix_formatting_rule = xterritory.get('nationalPrefixFormattingRule', None)
national_prefix_optional_when_formatting = get_true_attrib(xterritory, 'nationalPrefixOptionalWhenFormatting')
carrier_code_formatting_rule = xterritory.get('carrierCodeFormattingRule', None)
# Post-processing for the territory-default formatting rules. These are used
# in NumberFormat elements that don't supply their own formatting rules.
if self.o.national_prefix is not None:
if self.o.national_prefix_for_parsing is None:
# Default to self.national_prefix when national_prefix_for_parsing not set
self.o.national_prefix_for_parsing = self.o.national_prefix
national_prefix_formatting_rule = _expand_formatting_rule(national_prefix_formatting_rule,
self.o.national_prefix)
carrier_code_formatting_rule = _expand_formatting_rule(carrier_code_formatting_rule,
self.o.national_prefix)
self.o.main_country_for_code = get_true_attrib(xterritory, 'mainCountryForCode')
self.o.leading_zero_possible = get_true_attrib(xterritory, 'leadingZeroPossible')
self.o.mobile_number_portable_region = get_true_attrib(xterritory, 'mobileNumberPortableRegion')
# Retrieve the various PhoneNumberDesc elements. The general_desc is
# first and most important; it will be used to fill out missing fields in
# many of the other PhoneNumberDesc elements.
self.o.general_desc = XPhoneNumberDesc(_get_unique_child(xterritory, 'generalDesc'),
fill_na=False, lengths_expected=False)
# As a special case, the possible lengths for the general_desc should be empty
# (they will be deduced below).
if self.o.general_desc.o.possible_length is not None or self.o.general_desc.o.possible_length_local_only is not None:
raise Exception("Found generalDesc for %s with unexpected possibleLength element" % self.o.general_desc.id)
# areaCodeOptional is in the XML but not used in the code.
self.o.area_code_optional = XPhoneNumberDesc(_get_unique_child(xterritory, 'areaCodeOptional'),
template=self.o.general_desc.o)
self.o.toll_free = XPhoneNumberDesc(_get_unique_child(xterritory, 'tollFree'),
template=self.o.general_desc.o)
self.o.premium_rate = XPhoneNumberDesc(_get_unique_child(xterritory, 'premiumRate'),
template=self.o.general_desc.o)
if not short_data:
self.o.fixed_line = XPhoneNumberDesc(_get_unique_child(xterritory, 'fixedLine'),
template=self.o.general_desc.o, fill_na=False)
self.o.mobile = XPhoneNumberDesc(_get_unique_child(xterritory, 'mobile'),
template=self.o.general_desc.o, fill_na=False)
self.o.pager = XPhoneNumberDesc(_get_unique_child(xterritory, 'pager'),
template=self.o.general_desc.o)
self.o.shared_cost = XPhoneNumberDesc(_get_unique_child(xterritory, 'sharedCost'),
template=self.o.general_desc.o)
self.o.personal_number = XPhoneNumberDesc(_get_unique_child(xterritory, 'personalNumber'),
template=self.o.general_desc.o)
self.o.voip = XPhoneNumberDesc(_get_unique_child(xterritory, 'voip'),
template=self.o.general_desc.o)
self.o.uan = XPhoneNumberDesc(_get_unique_child(xterritory, 'uan'),
template=self.o.general_desc.o)
self.o.voicemail = XPhoneNumberDesc(_get_unique_child(xterritory, 'voicemail'),
template=self.o.general_desc.o)
self.o.no_international_dialling = XPhoneNumberDesc(_get_unique_child(xterritory, 'noInternationalDialling'),
template=self.o.general_desc.o)
# Skip noInternationalDialling when combining possible length information
sub_descs = (self.o.area_code_optional, self.o.toll_free, self.o.premium_rate,
self.o.fixed_line, self.o.mobile, self.o.pager, self.o.shared_cost,
self.o.personal_number, self.o.voip, self.o.uan, self.o.voicemail)
all_descs = (self.o.area_code_optional, self.o.toll_free, self.o.premium_rate,
self.o.fixed_line, self.o.mobile, self.o.pager, self.o.shared_cost,
self.o.personal_number, self.o.voip, self.o.uan, self.o.voicemail,
self.o.no_international_dialling)
else:
self.o.standard_rate = XPhoneNumberDesc(_get_unique_child(xterritory, 'standardRate'),
template=self.o.general_desc.o)
self.o.short_code = XPhoneNumberDesc(_get_unique_child(xterritory, 'shortCode'),
template=self.o.general_desc.o)
self.o.carrier_specific = XPhoneNumberDesc(_get_unique_child(xterritory, 'carrierSpecific'),
template=self.o.general_desc.o)
self.o.emergency = XPhoneNumberDesc(_get_unique_child(xterritory, 'emergency'),
template=self.o.general_desc.o)
# For short number metadata, copy the lengths from the "short code" section only.
sub_descs = (self.o.short_code,)
all_descs = (self.o.area_code_optional, self.o.toll_free, self.o.premium_rate,
self.o.standard_rate, self.o.short_code, self.o.carrier_specific,
self.o.emergency)
# Build the possible length information for general_desc based on all the different types of number.
possible_lengths = set()
local_lengths = set()
for desc in sub_descs:
if desc.o is None:
continue
if desc.o.possible_length is not None and desc.o.possible_length != (-1,):
possible_lengths.update(desc.o.possible_length)
if desc.o.possible_length_local_only is not None and desc.o.possible_length_local_only != (-1, ):
local_lengths.update(desc.o.possible_length_local_only)
self.o.general_desc.o.possible_length = sorted(list(possible_lengths))
self.o.general_desc.o.possible_length_local_only = sorted(list(local_lengths))
if -1 in self.o.general_desc.o.possible_length:
raise Exception("Found -1 length in general_desc.possible_length")
if -1 in self.o.general_desc.o.possible_length_local_only:
raise Exception("Found -1 length in general_desc.possible_length_local_only")
# Now that the union of length information is available, trickle it back down to those types
# of number that didn't specify any length information (indicated by having those fields set
# to None). But only if they're non
for desc in all_descs:
if desc.o is not None:
if desc.o.national_number_pattern == DATA_NA:
desc.o.possible_length = []
desc.o.possible_length_local_only = []
continue
if desc.o.possible_length is None:
desc.o.possible_length = copy.copy(self.o.general_desc.o.possible_length)
if desc.o.possible_length_local_only is None:
desc.o.possible_length_local_only = copy.copy(self.o.general_desc.o.possible_length_local_only)
# Look for available formats
self.has_explicit_intl_format = False
formats = _get_unique_child(xterritory, "availableFormats")
if formats is not None:
for xelt in formats.findall("numberFormat"):
# Create an XNumberFormat object, which contains a NumberFormat object
# or two, and which self-registers them with self.o
XNumberFormat(self,
xelt,
self.o.national_prefix,
national_prefix_formatting_rule,
national_prefix_optional_when_formatting,
carrier_code_formatting_rule)
if len(self.o.number_format) == 0:
raise Exception("No number formats found in available formats")
if not self.has_explicit_intl_format:
# Only a small number of regions need to specify the intlFormats
# in the XML. For the majority of countries the intlNumberFormat
# metadata is an exact copy of the national NumberFormat metadata.
# To minimize the size of the metadata file, we only keep
# intlNumberFormats that actually differ in some way to the
# national formats.
self.o.intl_number_format = []
0
Example 52
Project: p2ptv-pi Source File: Session.py
def __init__(self, scfg = None, ignore_singleton = False, on_error = lambda e: None, on_stop = lambda : None, app_http_handler = None, network_thread_daemon = True):
if not ignore_singleton:
if Session.__single:
raise RuntimeError, 'Session is singleton'
Session.__single = self
self.sesslock = RLock()
self.on_error = on_error
self.on_stop = on_stop
self.app_http_handler = app_http_handler
first_run = False
if scfg is None:
try:
state_dir = Session.get_default_state_dir()
cfgfilename = Session.get_default_config_filename(state_dir)
scfg = SessionStartupConfig.load(cfgfilename)
except:
log_exc()
scfg = SessionStartupConfig()
self.sessconfig = scfg.sessconfig
else:
self.sessconfig = copy.copy(scfg.sessconfig)
state_dir = self.sessconfig['state_dir']
if state_dir is None:
state_dir = Session.get_default_state_dir()
self.sessconfig['state_dir'] = state_dir
if not os.path.isdir(state_dir):
first_run = True
os.makedirs(state_dir)
collected_torrent_dir = self.sessconfig['torrent_collecting_dir']
if not collected_torrent_dir:
collected_torrent_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_TORRENTCOLL_DIR)
self.sessconfig['torrent_collecting_dir'] = collected_torrent_dir
collected_subtitles_dir = self.sessconfig.get('subtitles_collecting_dir', None)
if not collected_subtitles_dir:
collected_subtitles_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_SUBSCOLL_DIR)
self.sessconfig['subtitles_collecting_dir'] = collected_subtitles_dir
if not os.path.exists(collected_torrent_dir):
first_run = True
os.makedirs(collected_torrent_dir)
buffer_dir = self.sessconfig.get('buffer_dir', None)
if not buffer_dir:
buffer_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_BUFFER_DIR)
self.sessconfig['buffer_dir'] = buffer_dir
if not os.path.exists(buffer_dir):
first_run = True
os.makedirs(buffer_dir)
ads_dir = self.sessconfig.get('ads_dir', None)
if not ads_dir:
ads_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_ADS_DIR)
self.sessconfig['ads_dir'] = ads_dir
if not os.path.exists(ads_dir):
first_run = True
os.makedirs(ads_dir)
if 'ts_login' in self.sessconfig:
if first_run and len(self.sessconfig['ts_login']) == 0:
self.sessconfig['ts_login'] = 'test'
else:
self.sessconfig['ts_login'] = sessdefaults['ts_login']
if 'ts_password' in self.sessconfig:
if first_run and len(self.sessconfig['ts_password']) == 0:
self.sessconfig['ts_password'] = 'test'
else:
self.sessconfig['ts_password'] = sessdefaults['ts_password']
if 'ts_user_key' not in self.sessconfig:
self.sessconfig['ts_user_key'] = sessdefaults['ts_user_key']
if 'max_socket_connects' not in self.sessconfig:
self.sessconfig['max_socket_connects'] = sessdefaults['max_socket_connects']
if not self.sessconfig['peer_icon_path']:
self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'], STATEDIR_PEERICON_DIR)
if GOTM2CRYPTO:
permidmod.init()
pairfilename = os.path.join(self.sessconfig['state_dir'], 'ec.pem')
if self.sessconfig['eckeypairfilename'] is None:
self.sessconfig['eckeypairfilename'] = pairfilename
if os.access(self.sessconfig['eckeypairfilename'], os.F_OK):
self.keypair = permidmod.read_keypair(self.sessconfig['eckeypairfilename'])
else:
self.keypair = permidmod.generate_keypair()
pubfilename = os.path.join(self.sessconfig['state_dir'], 'ecpub.pem')
permidmod.save_keypair(self.keypair, pairfilename)
permidmod.save_pub_key(self.keypair, pubfilename)
else:
self.keypair = None
dlpstatedir = os.path.join(self.sessconfig['state_dir'], STATEDIR_DLPSTATE_DIR)
if not os.path.isdir(dlpstatedir):
os.mkdir(dlpstatedir)
dl_direct_pstatedir = os.path.join(self.sessconfig['state_dir'], STATEDIR_DLDIRECT_PSTATE_DIR)
if not os.path.isdir(dl_direct_pstatedir):
os.mkdir(dl_direct_pstatedir)
trackerdir = self.get_internal_tracker_dir()
if not os.path.isdir(trackerdir):
os.mkdir(trackerdir)
if self.sessconfig['tracker_dfile'] is None:
self.sessconfig['tracker_dfile'] = os.path.join(trackerdir, 'tracker.db')
if self.sessconfig['tracker_allowed_dir'] is None:
self.sessconfig['tracker_allowed_dir'] = trackerdir
if self.sessconfig['tracker_logfile'] is None:
if sys.platform == 'win32':
sink = 'nul'
else:
sink = '/dev/null'
self.sessconfig['tracker_logfile'] = sink
if self.sessconfig['superpeer_file'] is None:
self.sessconfig['superpeer_file'] = os.path.join(self.sessconfig['install_dir'], LIBRARYNAME, 'Core', 'superpeer.txt')
if 'crawler_file' not in self.sessconfig or self.sessconfig['crawler_file'] is None:
self.sessconfig['crawler_file'] = os.path.join(self.sessconfig['install_dir'], LIBRARYNAME, 'Core', 'Statistics', 'crawler.txt')
if self.sessconfig['overlay'] and self.sessconfig['download_help']:
if self.sessconfig['download_help_dir'] is None:
self.sessconfig['download_help_dir'] = os.path.join(get_default_dest_dir(), DESTDIR_COOPDOWNLOAD)
if not os.path.isdir(self.sessconfig['download_help_dir']):
os.makedirs(self.sessconfig['download_help_dir'])
if self.sessconfig['peer_icon_path'] is None:
self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'], STATEDIR_PEERICON_DIR)
if not os.path.isdir(self.sessconfig['peer_icon_path']):
os.mkdir(self.sessconfig['peer_icon_path'])
for key, defvalue in sessdefaults.iteritems():
if key not in self.sessconfig:
self.sessconfig[key] = defvalue
if 'live_aux_seeders' not in self.sessconfig:
self.sessconfig['live_aux_seeders'] = sessdefaults['live_aux_seeders']
if 'nat_detect' not in self.sessconfig:
self.sessconfig['nat_detect'] = sessdefaults['nat_detect']
if 'puncturing_internal_port' not in self.sessconfig:
self.sessconfig['puncturing_internal_port'] = sessdefaults['puncturing_internal_port']
if 'stun_servers' not in self.sessconfig:
self.sessconfig['stun_servers'] = sessdefaults['stun_servers']
if 'pingback_servers' not in self.sessconfig:
self.sessconfig['pingback_servers'] = sessdefaults['pingback_servers']
if 'mainline_dht' not in self.sessconfig:
self.sessconfig['mainline_dht'] = sessdefaults['mainline_dht']
self.http_seeds = {}
self.save_pstate_sessconfig()
self.uch = UserCallbackHandler(self)
self.lm = ACEStreamLaunchMany(network_thread_daemon)
self.lm.register(self, self.sesslock)
self.lm.start()
0
Example 53
Project: django-calaccess-campaign-browser Source File: loadcalaccesscampaigncontributions.py
def transform_quarterly_contributions_csv(self):
self.log(" Marking duplicates")
self.log(" Dumping CSV sorted by unique identifier")
sql = """
SELECT
`FILING_ID`,
`AMEND_ID`,
`LINE_ITEM`,
`REC_TYPE`,
`FORM_TYPE`,
`TRAN_ID`,
`ENTITY_CD`,
`CTRIB_NAML`,
`CTRIB_NAMF`,
`CTRIB_NAMT`,
`CTRIB_NAMS`,
`CTRIB_CITY`,
`CTRIB_ST`,
`CTRIB_ZIP4`,
`CTRIB_EMP`,
`CTRIB_OCC`,
`CTRIB_SELF`,
`TRAN_TYPE`,
`RCPT_DATE`,
`DATE_THRU`,
`AMOUNT`,
`CUM_YTD`,
`CUM_OTH`,
`CTRIB_DSCR`,
`CMTE_ID`,
`TRES_NAML`,
`TRES_NAMF`,
`TRES_NAMT`,
`TRES_NAMS`,
`TRES_CITY`,
`TRES_ST`,
`TRES_ZIP4`,
`INTR_NAML`,
`INTR_NAMF`,
`INTR_NAMT`,
`INTR_NAMS`,
`INTR_CITY`,
`INTR_ST`,
`INTR_ZIP4`,
`INTR_EMP`,
`INTR_OCC`,
`INTR_SELF`,
`CAND_NAML`,
`CAND_NAMF`,
`CAND_NAMT`,
`CAND_NAMS`,
`OFFICE_CD`,
`OFFIC_DSCR`,
`JURIS_CD`,
`JURIS_DSCR`,
`DIST_NO`,
`OFF_S_H_CD`,
`BAL_NAME`,
`BAL_NUM`,
`BAL_JURIS`,
`SUP_OPP_CD`,
`MEMO_CODE`,
`MEMO_REFNO`,
`BAKREF_TID`,
`XREF_SCHNM`,
`XREF_MATCH`,
`INT_RATE`,
`INTR_CMTEID`
FROM %(raw_model)s
ORDER BY FILING_ID, TRAN_ID, AMEND_ID DESC
INTO OUTFILE '%(tmp_csv)s'
FIELDS TERMINATED BY ','
ENCLOSED BY '"'
LINES TERMINATED BY '\n'
""" % dict(
raw_model=RcptCd._meta.db_table,
tmp_csv=self.quarterly_tmp_csv,
)
self.cursor.execute(sql)
INHEADERS = [
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
"TRAN_ID",
"ENTITY_CD",
"CTRIB_NAML",
"CTRIB_NAMF",
"CTRIB_NAMT",
"CTRIB_NAMS",
"CTRIB_CITY",
"CTRIB_ST",
"CTRIB_ZIP4",
"CTRIB_EMP",
"CTRIB_OCC",
"CTRIB_SELF",
"TRAN_TYPE",
"RCPT_DATE",
"DATE_THRU",
"AMOUNT",
"CUM_YTD",
"CUM_OTH",
"CTRIB_DSCR",
"CMTE_ID",
"TRES_NAML",
"TRES_NAMF",
"TRES_NAMT",
"TRES_NAMS",
"TRES_CITY",
"TRES_ST",
"TRES_ZIP4",
"INTR_NAML",
"INTR_NAMF",
"INTR_NAMT",
"INTR_NAMS",
"INTR_CITY",
"INTR_ST",
"INTR_ZIP4",
"INTR_EMP",
"INTR_OCC",
"INTR_SELF",
"CAND_NAML",
"CAND_NAMF",
"CAND_NAMT",
"CAND_NAMS",
"OFFICE_CD",
"OFFIC_DSCR",
"JURIS_CD",
"JURIS_DSCR",
"DIST_NO",
"OFF_S_H_CD",
"BAL_NAME",
"BAL_NUM",
"BAL_JURIS",
"SUP_OPP_CD",
"MEMO_CODE",
"MEMO_REFNO",
"BAKREF_TID",
"XREF_SCHNM",
"XREF_MATCH",
"INT_RATE",
"INTR_CMTEID"
]
OUTHEADERS = copy.copy(INHEADERS)
OUTHEADERS.append("IS_DUPLICATE")
self.log(" Marking duplicates in a new CSV")
with open(self.quarterly_tmp_csv, 'rU') as fin:
fout = csv.DictWriter(
open(self.quarterly_target_csv, 'wb'),
fieldnames=OUTHEADERS
)
fout.writeheader()
last_uid = ''
for r in csv.DictReader(fin, fieldnames=INHEADERS):
r.pop(None, None)
uid = '%s-%s' % (r['FILING_ID'], r['TRAN_ID'])
if uid != last_uid:
r['IS_DUPLICATE'] = 0
last_uid = uid
else:
r['IS_DUPLICATE'] = 1
try:
fout.writerow(r)
except ValueError:
continue
0
Example 54
Project: federatednode Source File: fednode.py
def main():
global DOCKER_CONFIG_PATH
setup_env()
args = parse_args()
# run utility commands (docker_clean) if specified
if args.command == 'docker_clean':
docker_containers = subprocess.check_output("{} docker ps -a -q".format(SUDO_CMD), shell=True).decode("utf-8").split('\n')
docker_images = subprocess.check_output("{} docker images -q".format(SUDO_CMD), shell=True).decode("utf-8").split('\n')
for container in docker_containers:
if not container:
continue
os.system("{} docker rm {}".format(SUDO_CMD, container))
for image in docker_images:
if not image:
continue
os.system("{} docker rmi {}".format(SUDO_CMD, image))
sys.exit(1)
# for all other commands
# if config doesn't exist, only the 'install' command may be run
config_existed = os.path.exists(FEDNODE_CONFIG_PATH)
config = configparser.SafeConfigParser()
if not config_existed:
if args.command != 'install':
print("config file {} does not exist. Please run the 'install' command first".format(FEDNODE_CONFIG_FILE))
sys.exit(1)
# write default config
config.add_section('Default')
config.set('Default', 'branch', args.branch)
config.set('Default', 'config', args.config)
write_config(config)
# load and read config
assert os.path.exists(FEDNODE_CONFIG_PATH)
config.read(FEDNODE_CONFIG_PATH)
build_config = config.get('Default', 'config')
docker_config_file = "docker-compose.{}.yml".format(build_config)
DOCKER_CONFIG_PATH = os.path.join(SCRIPTDIR, docker_config_file)
repo_branch = config.get('Default', 'branch')
os.environ['FEDNODE_RELEASE_TAG'] = 'latest' if repo_branch == 'master' else repo_branch
os.environ['HOSTNAME_BASE'] = socket.gethostname()
os.environ['MONGODB_HOST_INTERFACE'] = getattr(args, 'mongodb_interface', "127.0.0.1")
# perform action for the specified command
if args.command == 'install':
if config_existed:
print("Cannot install, as it appears a configuration already exists. Please run the 'uninstall' command first")
sys.exit(1)
# check port usage
for port in HOST_PORTS_USED[build_config]:
if is_port_open(port):
print("Cannot install, as it appears a process is already listening on host port {}".format(port))
sys.exit(1)
# check out the necessary source trees (don't use submodules due to detached HEAD and other problems)
REPOS = REPOS_BASE if build_config == 'base' else (REPOS_COUNTERBLOCK if build_config == 'counterblock' else REPOS_FULL)
for repo in REPOS:
repo_url = REPO_BASE_SSH.format(repo) if args.use_ssh_uris else REPO_BASE_HTTPS.format(repo)
repo_dir = os.path.join(SCRIPTDIR, "src", repo)
if not os.path.exists(repo_dir):
git_cmd = "git clone -b {} {} {}".format(repo_branch, repo_url, repo_dir)
if not IS_WINDOWS: # make sure to check out the code as the original user, so the permissions are right
os.system("{} -u {} bash -c \"{}\"".format(SUDO_CMD, SESSION_USER, git_cmd))
else:
os.system(git_cmd)
# make sure we have the newest image for each service
run_compose_cmd("pull --ignore-pull-failures")
# copy over the configs from .default to active versions, if they don't already exist
for default_config in glob.iglob(os.path.join(SCRIPTDIR, 'config', '**/*.default'), recursive=True):
active_config = default_config.replace('.default', '')
if not os.path.exists(active_config):
print("Generating config from defaults at {} ...".format(active_config))
shutil.copy2(default_config, active_config)
default_config_stat = os.stat(default_config)
if not IS_WINDOWS:
os.chown(active_config, default_config_stat.st_uid, default_config_stat.st_gid)
# create symlinks to the data volumes (for ease of use)
if not IS_WINDOWS:
data_dir = os.path.join(SCRIPTDIR, "data")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
for volume in VOLUMES_USED[build_config]:
symlink_path = os.path.join(data_dir, volume.replace('-data', ''))
volume_name = "{}_{}".format(PROJECT_NAME, volume)
mountpoint_path = get_docker_volume_path(volume_name)
if mountpoint_path is not None and not os.path.lexists(symlink_path):
os.symlink(mountpoint_path, symlink_path)
print("For convenience, symlinking {} to {}".format(mountpoint_path, symlink_path))
# launch
run_compose_cmd("up -d")
elif args.command == 'uninstall':
run_compose_cmd("down")
os.remove(FEDNODE_CONFIG_PATH)
elif args.command == 'start':
run_compose_cmd("start {}".format(' '.join(args.services)))
elif args.command == 'stop':
run_compose_cmd("stop {}".format(' '.join(args.services)))
elif args.command == 'restart':
run_compose_cmd("restart {}".format(' '.join(args.services)))
elif args.command == 'reparse':
run_compose_cmd("stop {}".format(args.service))
run_compose_cmd("run -e COMMAND=reparse {}".format(args.service))
elif args.command == 'tail':
run_compose_cmd("logs -f --tail={} {}".format(args.num_lines, ' '.join(args.services)))
elif args.command == 'logs':
run_compose_cmd("logs {}".format(' '.join(args.services)))
elif args.command == 'ps':
run_compose_cmd("ps")
elif args.command == 'exec':
if len(args.cmd) == 1 and re.match("['\"].*?['\"]", args.cmd[0]):
cmd = args.cmd
else:
cmd = '"{}"'.format(' '.join(args.cmd).replace('"', '\\"'))
os.system("{} docker exec -i -t federatednode_{}_1 bash -c {}".format(SUDO_CMD, args.service, cmd))
elif args.command == 'shell':
container_running = is_container_running(args.service)
if container_running:
os.system("{} docker exec -i -t federatednode_{}_1 bash".format(SUDO_CMD, args.service))
else:
print("Container is not running -- creating a transient container with a 'bash' shell entrypoint...")
run_compose_cmd("run --no-deps --rm --entrypoint bash {}".format(args.service))
elif args.command == 'update':
# validate
if args.services != ['', ]:
for service in args.services:
if service not in UPDATE_CHOICES:
print("Invalid service: {}".format(service))
sys.exit(1)
services_to_update = copy.copy(UPDATE_CHOICES) if not len(args.services) else args.services
git_has_updated = []
while services_to_update:
# update source code
service = services_to_update.pop(0)
service_base = service.replace('-testnet', '')
if service_base not in git_has_updated:
git_has_updated.append(service_base)
if service_base == 'counterparty': # special case
service_dirs = [os.path.join(SCRIPTDIR, "src", "counterparty-lib"), os.path.join(SCRIPTDIR, "src", "counterparty-cli")]
else:
service_dirs = [service_base,]
for service_dir in service_dirs:
service_dir_path = os.path.join(SCRIPTDIR, "src", service_dir)
if not os.path.exists(service_dir_path):
continue
service_branch = subprocess.check_output("cd {};git symbolic-ref --short -q HEAD;cd {}".format(service_dir_path, CURDIR), shell=True).decode("utf-8").strip()
if not service_branch:
print("Unknown service git branch name, or repo in detached state")
sys.exit(1)
git_cmd = "cd {}; git pull origin {}; cd {}".format(service_dir_path, service_branch, CURDIR)
if not IS_WINDOWS: # make sure to update the code as the original user, so the permissions are right
os.system("{} -u {} bash -c \"{}\"".format(SUDO_CMD, SESSION_USER, git_cmd))
else:
os.system(git_cmd)
# delete installed egg (to force egg recreate and deps re-check on next start)
if service_base in ('counterparty', 'counterblock', 'armory-utxsvr'):
for path in glob.glob(os.path.join(service_dir_path, "*.egg-info")):
print("Removing egg path {}".format(path))
if not IS_WINDOWS: # have to use root
os.system("{} bash -c \"rm -rf {}\"".format(SUDO_CMD, path))
else:
shutil.rmtree(path)
if service_base == 'counterwallet' and os.path.exists(os.path.join(SCRIPTDIR, "src", "counterwallet")): # special case
transifex_cfg_path = os.path.join(os.path.expanduser("~"), ".transifex")
if os.path.exists(transifex_cfg_path):
os.system("{} docker cp {} federatednode_counterwallet_1:/root/.transifex".format(SUDO_CMD, transifex_cfg_path))
os.system("{} docker exec -i -t federatednode_counterwallet_1 bash -c \"cd /counterwallet/src ".format(SUDO_CMD) +
"&& bower --allow-root update && cd /counterwallet && npm update && grunt build\"")
if not os.path.exists(transifex_cfg_path):
print("NOTE: Did not update locales because there is no .transifex file in your home directory")
print("If you want locales compiled, sign up for transifex and create this file to" +
" contain 'your_transifex_username:your_transifex_password'")
# and restart container
if not args.no_restart:
run_compose_cmd("restart {}".format(service))
elif args.command == 'rebuild':
run_compose_cmd("pull --ignore-pull-failures {}".format(' '.join(args.services)))
run_compose_cmd("up -d --build --force-recreate --no-deps {}".format(' '.join(args.services)))
0
Example 55
Project: cgstudiomap Source File: legends.py
def draw(self):
colorNamePairs = self.colorNamePairs
autoCP = isAuto(colorNamePairs)
if autoCP:
chart = getattr(colorNamePairs,'chart',getattr(colorNamePairs,'obj',None))
swatchMarker = None
autoCP = Auto(obj=chart)
n = chart._seriesCount
chartTexts = self._getTexts(colorNamePairs)
else:
swatchMarker = getattr(self,'swatchMarker',None)
if isAuto(swatchMarker):
chart = getattr(swatchMarker,'chart',getattr(swatchMarker,'obj',None))
swatchMarker = Auto(obj=chart)
n = len(colorNamePairs)
dx = self.dx
dy = self.dy
alignment = self.alignment
columnMaximum = self.columnMaximum
deltax = self.deltax
deltay = self.deltay
dxTextSpace = self.dxTextSpace
fontName = self.fontName
fontSize = self.fontSize
fillColor = self.fillColor
strokeWidth = self.strokeWidth
strokeColor = self.strokeColor
subCols = self.subCols
leading = fontSize*1.2
yGap = self.yGap
if not deltay:
deltay = max(dy,leading)+self.autoYPadding
ba = self.boxAnchor
maxWidth = self._calculateMaxBoundaries(colorNamePairs)
nCols = int((n+columnMaximum-1)/(columnMaximum*1.0))
xW = dx+dxTextSpace+self.autoXPadding
variColumn = self.variColumn
if variColumn:
width = sum([m[-1] for m in maxWidth])+xW*nCols
else:
deltax = max(maxWidth[-1]+xW,deltax)
width = nCols*deltax
maxWidth = nCols*[maxWidth]
thisx = self.x
thisy = self.y - self.dy
if ba not in ('ne','n','nw','autoy'):
height = self._calcHeight()
if ba in ('e','c','w'):
thisy += height/2.
else:
thisy += height
if ba not in ('nw','w','sw','autox'):
if ba in ('n','c','s'):
thisx -= width/2
else:
thisx -= width
upperlefty = thisy
g = Group()
ascent=getFont(fontName).face.ascent/1000.
if ascent==0: ascent=0.718 # default (from helvetica)
ascent *= fontSize # normalize
lim = columnMaximum - 1
callout = getattr(self,'callout',None)
scallout = getattr(self,'swatchCallout',None)
dividerLines = self.dividerLines
if dividerLines:
dividerWidth = self.dividerWidth
dividerColor = self.dividerColor
dividerDashArray = self.dividerDashArray
dividerOffsX = self.dividerOffsX
dividerOffsY = self.dividerOffsY
for i in xrange(n):
if autoCP:
col = autoCP
col.index = i
name = chartTexts[i]
else:
col, name = colorNamePairs[i]
if isAuto(swatchMarker):
col = swatchMarker
col.index = i
if isAuto(name):
name = getattr(swatchMarker,'chart',getattr(swatchMarker,'obj',None)).getSeriesName(i,'series %d' % i)
T = _getLines(name)
S = []
aS = S.append
j = int(i/(columnMaximum*1.0))
jOffs = maxWidth[j]
# thisy+dy/2 = y+leading/2
y = y0 = thisy+(dy-ascent)*0.5
if callout: callout(self,g,thisx,y,(col,name))
if alignment == "left":
x = thisx
xn = thisx+jOffs[-1]+dxTextSpace
elif alignment == "right":
x = thisx+dx+dxTextSpace
xn = thisx
else:
raise ValueError("bad alignment")
if not isSeq(name):
T = [T]
yd = y
for k,lines in enumerate(T):
y = y0
kk = k*2
x1 = x+jOffs[kk]
x2 = x+jOffs[kk+1]
sc = subCols[k,i]
anchor = sc.align
scdx = sc.dx
scdy = sc.dy
fN = getattr(sc,'fontName',fontName)
fS = getattr(sc,'fontSize',fontSize)
fC = getattr(sc,'fillColor',fillColor)
fL = getattr(sc,'leading',1.2*fontSize)
if fN==fontName:
fA = (ascent*fS)/fontSize
else:
fA = getFont(fontName).face.ascent/1000.
if fA==0: fA=0.718
fA *= fS
if anchor=='left':
anchor = 'start'
xoffs = x1
elif anchor=='right':
anchor = 'end'
xoffs = x2
elif anchor=='numeric':
xoffs = x2
else:
anchor = 'middle'
xoffs = 0.5*(x1+x2)
for t in lines:
aS(String(xoffs+scdx,y+scdy,t,fontName=fN,fontSize=fS,fillColor=fC, textAnchor = anchor))
y -= fL
yd = min(yd,y)
y += fL
for iy, a in ((y-max(fL-fA,0),'underlines'),(y+fA,'overlines')):
il = getattr(sc,a,None)
if il:
if not isinstance(il,(tuple,list)): il = (il,)
for l in il:
l = copy.copy(l)
l.y1 += iy
l.y2 += iy
l.x1 += x1
l.x2 += x2
aS(l)
x = xn
y = yd
leadingMove = 2*y0-y-thisy
if dividerLines:
xd = thisx+dx+dxTextSpace+jOffs[-1]+dividerOffsX[1]
yd = thisy+dy*0.5+dividerOffsY
if ((dividerLines&1) and i%columnMaximum) or ((dividerLines&2) and not i%columnMaximum):
g.add(Line(thisx+dividerOffsX[0],yd,xd,yd,
strokeColor=dividerColor, strokeWidth=dividerWidth, strokeDashArray=dividerDashArray))
if (dividerLines&4) and (i%columnMaximum==lim or i==(n-1)):
yd -= max(deltay,leadingMove)+yGap
g.add(Line(thisx+dividerOffsX[0],yd,xd,yd,
strokeColor=dividerColor, strokeWidth=dividerWidth, strokeDashArray=dividerDashArray))
# Make a 'normal' color swatch...
swatchX = x + getattr(self,'swdx',0)
swatchY = thisy + getattr(self,'swdy',0)
if isAuto(col):
chart = getattr(col,'chart',getattr(col,'obj',None))
c = chart.makeSwatchSample(getattr(col,'index',i),swatchX,swatchY,dx,dy)
elif isinstance(col, colors.Color):
if isSymbol(swatchMarker):
c = uSymbol2Symbol(swatchMarker,swatchX+dx/2.,swatchY+dy/2.,col)
else:
c = self._defaultSwatch(swatchX,swatchY,dx,dy,fillColor=col,strokeWidth=strokeWidth,strokeColor=strokeColor)
elif col is not None:
try:
c = copy.deepcopy(col)
c.x = swatchX
c.y = swatchY
c.width = dx
c.height = dy
except:
c = None
else:
c = None
if c:
g.add(c)
if scallout: scallout(self,g,thisx,y0,i,(col,name),c)
for s in S: g.add(s)
if self.colEndCallout and (i%columnMaximum==lim or i==(n-1)):
if alignment == "left":
xt = thisx
else:
xt = thisx+dx+dxTextSpace
yd = thisy+dy*0.5+dividerOffsY - (max(deltay,leadingMove)+yGap)
self.colEndCallout(self, g, thisx, xt, yd, jOffs[-1], jOffs[-1]+dx+dxTextSpace)
if i%columnMaximum==lim:
if variColumn:
thisx += jOffs[-1]+xW
else:
thisx = thisx+deltax
thisy = upperlefty
else:
thisy = thisy-max(deltay,leadingMove)-yGap
return g
0
Example 56
def updateData(self, labels, foo, **args):
self.clear()
# initial var values
self.showKNNModel = 0
self.showCorrect = 1
self.__dict__.update(args)
length = len(labels)
self.dataMap = {} # dictionary with keys of form "x_i-y_i" with values (x_i, y_i, color, data)
self.XAnchor = self.createXAnchors(length)
self.YAnchor = self.createYAnchors(length)
self.shownAttributes = labels
polyvizLineCoordsX = []; polyvizLineCoordsY = [] # if class is discrete we will optimize drawing by storing computed values and adding less data curves to plot
# we must have at least 3 attributes to be able to show anything
if not self.haveData or len(labels) < 3:
self.updateLayout()
return
dataSize = len(self.rawData)
if self.dataHasClass: useDifferentColors = self.useDifferentColors # don't use colors if we don't have a class
else: useDifferentColors = 0
self.setAxisScale(xBottom, -1.20, 1.20, 1)
# store indices to shown attributes
indices = [self.attributeNameIndex[label] for label in labels]
# will we show different symbols?
useDifferentSymbols = self.useDifferentSymbols and self.dataHasDiscreteClass and len(self.dataDomain.classVar.values) < len(self.curveSymbols)
# ##########
# draw text at lines
for i in range(length):
# print attribute name
self.addMarker(labels[i], 0.6*(self.XAnchor[i]+ self.XAnchor[(i+1)%length]), 0.6*(self.YAnchor[i]+ self.YAnchor[(i+1)%length]), Qt.AlignHCenter | Qt.AlignVCenter, bold = 1)
if self.dataDomain[labels[i]].varType == orange.VarTypes.Discrete:
# print all possible attribute values
values = getVariableValuesSorted(self.dataDomain[labels[i]])
count = len(values)
k = 1.08
for j in range(count):
pos = (1.0 + 2.0*float(j)) / float(2*count)
self.addMarker(values[j], k*(1-pos)*self.XAnchor[i]+k*pos*self.XAnchor[(i+1)%length], k*(1-pos)*self.YAnchor[i]+k*pos*self.YAnchor[(i+1)%length], Qt.AlignHCenter | Qt.AlignVCenter)
else:
# min and max value
if self.tooltipValue == TOOLTIPS_SHOW_SPRINGS:
names = ["%.1f" % (0.0), "%.1f" % (1.0)]
elif self.tooltipValue == TOOLTIPS_SHOW_DATA:
names = ["%%.%df" % (self.dataDomain[labels[i]].numberOfDecimals) % (self.attrValues[labels[i]][0]), "%%.%df" % (self.dataDomain[labels[i]].numberOfDecimals) % (self.attrValues[labels[i]][1])]
self.addMarker(names[0],0.95*self.XAnchor[i]+0.15*self.XAnchor[(i+1)%length], 0.95*self.YAnchor[i]+0.15*self.YAnchor[(i+1)%length], Qt.AlignHCenter | Qt.AlignVCenter)
self.addMarker(names[1], 0.15*self.XAnchor[i]+0.95*self.XAnchor[(i+1)%length], 0.15*self.YAnchor[i]+0.95*self.YAnchor[(i+1)%length], Qt.AlignHCenter | Qt.AlignVCenter)
XAnchorPositions = numpy.zeros([length, dataSize], numpy.float)
YAnchorPositions = numpy.zeros([length, dataSize], numpy.float)
XAnchor = self.createXAnchors(length)
YAnchor = self.createYAnchors(length)
for i in range(length):
Xdata = XAnchor[i] * (1-self.noJitteringScaledData[indices[i]]) + XAnchor[(i+1)%length] * self.noJitteringScaledData[indices[i]]
Ydata = YAnchor[i] * (1-self.noJitteringScaledData[indices[i]]) + YAnchor[(i+1)%length] * self.noJitteringScaledData[indices[i]]
XAnchorPositions[i] = Xdata
YAnchorPositions[i] = Ydata
XAnchorPositions = numpy.swapaxes(XAnchorPositions, 0,1)
YAnchorPositions = numpy.swapaxes(YAnchorPositions, 0,1)
selectedData = numpy.take(self.scaledData, indices, axis = 0)
sum_i = numpy.add.reduce(selectedData)
# test if there are zeros in sum_i
if len(numpy.nonzero(sum_i)) < len(sum_i):
add = numpy.where(sum_i == 0, 1.0, 0.0)
sum_i += add
x_positions = numpy.sum(numpy.swapaxes(XAnchorPositions * numpy.swapaxes(selectedData, 0,1), 0,1), axis=0) * self.scaleFactor / sum_i
y_positions = numpy.sum(numpy.swapaxes(YAnchorPositions * numpy.swapaxes(selectedData, 0,1), 0,1), axis=0) * self.scaleFactor / sum_i
validData = self.getValidList(indices)
xPointsToAdd = {}
yPointsToAdd = {}
self.xLinesToAdd = {} # this is filled in addAnchorLine function
self.yLinesToAdd = {}
if self.showKNNModel == 1 and self.dataHasClass:
# variables and domain for the table
domain = orange.Domain([orange.FloatVariable("xVar"), orange.FloatVariable("yVar"), self.dataDomain.classVar])
table = orange.ExampleTable(domain)
# build an example table
for i in range(dataSize):
if validData[i]:
table.append(orange.Example(domain, [x_positions[i], y_positions[i], self.rawData[i].getclass()]))
kNNValues, probabilities = self.kNNOptimization.kNNClassifyData(table)
accuracy = copy(kNNValues)
measure = self.kNNOptimization.getQualityMeasure()
if self.dataDomain.classVar.varType == orange.VarTypes.Discrete:
if ((measure == CLASS_ACCURACY or measure == AVERAGE_CORRECT) and self.showCorrect) or (measure == BRIER_SCORE and not self.showCorrect):
kNNValues = [1.0 - val for val in kNNValues]
else:
if self.showCorrect:
kNNValues = [1.0 - val for val in kNNValues]
# fill and edge color palettes
bwColors = ColorPaletteBW(-1, 55, 255)
if self.dataHasContinuousClass:
preText = 'Mean square error : '
classColors = self.contPalette
else:
classColors = self.discPalette
if measure == CLASS_ACCURACY: preText = "Classification accuracy : "
elif measure == AVERAGE_CORRECT: preText = "Average correct classification : "
else: preText = "Brier score : "
for i in range(len(table)):
fillColor = bwColors.getRGB(kNNValues[i])
edgeColor = classColors.getRGB(self.originalData[self.dataClassIndex][i])
if not xPointsToAdd.has_key((fillColor, edgeColor, OWPoint.Ellipse, 1)):
xPointsToAdd[(fillColor, edgeColor, OWPoint.Ellipse, 1)] = []
yPointsToAdd[(fillColor, edgeColor, OWPoint.Ellipse, 1)] = []
xPointsToAdd[(fillColor, edgeColor, OWPoint.Ellipse, 1)].append(table[i][0].value)
yPointsToAdd[(fillColor, edgeColor, OWPoint.Ellipse, 1)].append(table[i][1].value)
self.addAnchorLine(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], fillColor, i, length)
# CONTINUOUS class
elif self.dataHasContinuousClass:
for i in range(dataSize):
if not validData[i]: continue
if useDifferentColors:
newColor = self.contPalette[self.noJitteringScaledData[self.dataClassIndex][i]]
else:
newColor = self.color(OWPalette.Data)
self.addCurve(str(i), newColor, newColor, self.pointWidth, xData = [x_positions[i]], yData = [y_positions[i]])
self.addTooltipKey(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], newColor, i)
self.addAnchorLine(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], (newColor.red(), newColor.green(), newColor.blue()), i, length)
# DISCRETE class or no class at all
else:
color = self.color(OWPalette.Data).getRgb()
symbol = self.curveSymbols[0]
for i in range(dataSize):
if not validData[i]: continue
if self.dataHasClass:
if self.useDifferentSymbols:
symbol = self.curveSymbols[int(self.originalData[self.dataClassIndex][i])]
if useDifferentColors:
color = self.discPalette.getRGB(self.originalData[self.dataClassIndex][i])
if not xPointsToAdd.has_key((color, color, symbol, 1)):
xPointsToAdd[(color, color, symbol, 1)] = []
yPointsToAdd[(color, color, symbol, 1)] = []
xPointsToAdd[(color, color, symbol, 1)].append(x_positions[i])
yPointsToAdd[(color, color, symbol, 1)].append(y_positions[i])
self.addAnchorLine(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], color, i, length)
self.addTooltipKey(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], QColor(*color), i)
# draw the points
for i, (fillColor, edgeColor, symbol, showFilled) in enumerate(xPointsToAdd.keys()):
xData = xPointsToAdd[(fillColor, edgeColor, symbol, showFilled)]
yData = yPointsToAdd[(fillColor, edgeColor, symbol, showFilled)]
self.addCurve(str(i), QColor(*fillColor), QColor(*edgeColor), self.pointWidth, symbol = symbol, xData = xData, yData = yData, showFilledSymbols = showFilled)
self.showAnchorLines()
self.xLinesToAdd = {}
self.yLinesToAdd = {}
# draw polygon
polygon_color = self.color(OWPalette.Axis)
self.addCurve("polygon", polygon_color, polygon_color, 0, OWCurve.Lines, symbol = OWPoint.NoSymbol, xData = list(self.XAnchor) + [self.XAnchor[0]], yData = list(self.YAnchor) + [self.YAnchor[0]], lineWidth = 2)
#################
# draw the legend
if self.dataHasDiscreteClass:
category = self.dataDomain.classVar.name
for index, value in enumerate(getVariableValuesSorted(self.dataDomain.classVar)):
if useDifferentColors:
color = self.discPalette[index]
else:
color = self.color(OWPalette.Data)
if self.useDifferentSymbols:
curveSymbol = self.curveSymbols[index]
else:
curveSymbol = self.curveSymbols[0]
self.legend().add_item(category, str(value), OWPoint(curveSymbol, color, self.point_width))
# show legend for continuous class
elif self.dataHasContinuousClass:
self.legend().add_color_gradient(self.dataDomain.classVar.name, [("%%.%df" % self.dataDomain.classVar.numberOfDecimals % v) for v in self.attrValues[self.dataDomain.classVar.name]])
self.replot()
0
Example 57
Project: gae-flask-todo Source File: datastructures.py
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
0
Example 58
Project: nodewatcher Source File: base.py
@transaction.atomic(savepoint=False)
def prepare_root_forms(regpoint, request, root=None, data=None, save=False, form_state=None, flags=0):
"""
Prepares a list of configuration forms for use on a regpoint root's
configuration page.
:param regpoint: Registration point name or instance
:param request: Request instance
:param root: Registration point root instance for which to generate forms
:param data: User-supplied POST data
:param save: Are we performing a save or rendering an initial form
"""
# Ensure that all registry forms, form processors and CGMs are registered.
loader.load_modules('forms', 'formprocessors', 'cgm')
if save and flags & FORM_ONLY_DEFAULTS:
raise ValueError("You cannot use save and FORM_ONLY_DEFAULTS at the same time!")
if isinstance(regpoint, basestring):
regpoint = registration.point(regpoint)
# Transform data into a mutable dictionary in case an immutable one is passed
data = copy.copy(data)
# Prepare context
context = RegistryFormContext(
regpoint=regpoint,
request=request,
root=root,
data=data,
save=save,
validation_errors=False,
pending_save_forms={},
pending_save_foreign_keys={},
form_state=form_state,
flags=flags,
)
# Parse form actions.
if data:
form_actions = json.loads(data.get('ACTIONS', '{}'))
else:
form_actions = {}
for action, options in form_actions.items():
if action == 'defaults':
context.form_state.set_using_defaults(options['value'])
elif action == 'simple_mode':
# Simple mode should also automatically enable defaults.
if options['value']:
context.form_state.set_using_defaults(True)
if flags & FORM_SET_DEFAULTS:
context.form_state.set_using_defaults(flags & FORM_DEFAULTS_ENABLED)
if flags & FORM_INITIAL and flags & FORM_ROOT_CREATE and context.form_state.is_using_defaults():
# Set simple mode to its configured default value.
context.form_state.set_using_simple_mode(
getattr(settings, 'REGISTRY_SIMPLE_MODE', {}).get(regpoint.name, {}).get('default', False)
)
# Prepare form processors.
form_processors = []
for form_processor in regpoint.get_form_processors():
form_processor = form_processor()
form_processor.preprocess(root)
form_processors.append(form_processor)
try:
sid = transaction.savepoint()
forms = RootRegistryRenderItem(context, prepare_forms(context))
if flags & (FORM_DEFAULTS | FORM_ONLY_DEFAULTS):
# Apply form actions before applying defaults.
for action, options in form_actions.items():
if action == 'append':
context.form_state.append_default_item(options['registry_id'], options['parent_id'])
elif action == 'remove':
context.form_state.remove_item(options['index'])
elif action == 'simple_mode':
context.form_state.set_using_simple_mode(options['value'])
# Apply form defaults.
context.form_state.apply_form_defaults(regpoint, flags & FORM_ROOT_CREATE)
if flags & FORM_ONLY_DEFAULTS:
# If only defaults application is requested, we should set defaults and then rollback
# the savepoint in any case; all validation errors are ignored.
transaction.savepoint_rollback(sid)
return context.form_state
# Process forms when saving and there are no validation errors
if save and root is not None and not context.validation_errors:
# Resolve form dependencies and save all forms
for layer, linear_forms in enumerate(toposort.topological_sort(context.pending_save_forms)):
for info in linear_forms:
form = info['form']
# Before saving the form perform the validation again so dependent
# fields can be recalculated
form._clean_fields()
form._clean_form()
form._post_clean()
if form.is_valid():
# Save the form and store the instance into partial configuration so
# dependent objects can reference the new instance. Before we save,
# we also store the form's index into the display_order attribute of
# the instances, so that we preserve order when loading back from db.
form.instance.display_order = info['index']
instance = form.save()
# Only overwrite instances at the top layer (forms which have no dependencies
# on anything else). Models with dependencies will already be updated when
# calling save.
if layer == 0 and info['registry_id'] in context.form_state:
context.form_state[info['registry_id']][info['index']] = instance
for form_id, field in context.pending_save_foreign_keys.get(info['form_id'], []):
setattr(
context.pending_save_forms[form_id]['form'].instance,
field,
instance
)
else:
context.validation_errors = True
# Execute any validation hooks.
for processor in form_processors:
try:
processor.postprocess(root)
except RegistryValidationError, e:
context.validation_errors = True
forms.add_error(e.message)
if not context.validation_errors:
# Persist metadata.
regpoint.set_root_metadata(root, context.form_state.get_metadata())
root.save()
transaction.savepoint_commit(sid)
if flags & FORM_CLEAR_STATE:
context.form_state.clear_session()
else:
transaction.savepoint_rollback(sid)
except RegistryValidationError:
transaction.savepoint_rollback(sid)
except (transaction.TransactionManagementError, django_db.DatabaseError):
# Do not perform a rollback in case of a database error as this will just raise another
# database error exception as the transaction has been aborted.
raise
except:
transaction.savepoint_rollback(sid)
raise
return forms if not save else (context.validation_errors, forms)
0
Example 59
def findroot(ctx, f, x0, solver=Secant, tol=None, verbose=False, verify=True, **kwargs):
r"""
Find a solution to `f(x) = 0`, using *x0* as starting point or
interval for *x*.
Multidimensional overdetermined systems are supported.
You can specify them using a function or a list of functions.
If the found root does not satisfy `|f(x)|^2 \leq \mathrm{tol}`,
an exception is raised (this can be disabled with *verify=False*).
**Arguments**
*f*
one dimensional function
*x0*
starting point, several starting points or interval (depends on solver)
*tol*
the returned solution has an error smaller than this
*verbose*
print additional information for each iteration if true
*verify*
verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
*solver*
a generator for *f* and *x0* returning approximative solution and error
*maxsteps*
after how many steps the solver will cancel
*df*
first derivative of *f* (used by some solvers)
*d2f*
second derivative of *f* (used by some solvers)
*multidimensional*
force multidimensional solving
*J*
Jacobian matrix of *f* (used by multidimensional solvers)
*norm*
used vector norm (used by multidimensional solvers)
solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
yielding pairs of approximative solution and estimated error (which is
expected to be positive).
You can use the following string aliases:
'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
'ridder', 'anewton', 'bisect'
See mpmath.calculus.optimization for their docuementation.
**Examples**
The function :func:`~mpmath.findroot` locates a root of a given function using the
secant method by default. A simple example use of the secant method is to
compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> findroot(sin, 3)
3.14159265358979323846264338328
The secant method can be used to find complex roots of analytic functions,
although it must in that case generally be given a nonreal starting value
(or else it will never leave the real line)::
>>> mp.dps = 15
>>> findroot(lambda x: x**3 + 2*x + 1, j)
(0.226698825758202 + 1.46771150871022j)
A nice application is to compute nontrivial roots of the Riemann zeta
function with many digits (good initial values are needed for convergence)::
>>> mp.dps = 30
>>> findroot(zeta, 0.5+14j)
(0.5 + 14.1347251417346937904572519836j)
The secant method can also be used as an optimization algorithm, by passing
it a derivative of a function. The following example locates the positive
minimum of the gamma function::
>>> mp.dps = 20
>>> findroot(lambda x: diff(gamma, x), 1)
1.4616321449683623413
Finally, a useful application is to compute inverse functions, such as the
Lambert W function which is the inverse of `w e^w`, given the first
term of the solution's asymptotic expansion as the initial value. In basic
cases, this gives identical results to mpmath's built-in ``lambertw``
function::
>>> def lambert(x):
... return findroot(lambda w: w*exp(w) - x, log(1+x))
...
>>> mp.dps = 15
>>> lambert(1); lambertw(1)
0.567143290409784
0.567143290409784
>>> lambert(1000); lambert(1000)
5.2496028524016
5.2496028524016
Multidimensional functions are also supported::
>>> f = [lambda x1, x2: x1**2 + x2,
... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
>>> findroot(f, (0, 0))
[-0.618033988749895]
[-0.381966011250105]
>>> findroot(f, (10, 10))
[ 1.61803398874989]
[-2.61803398874989]
You can verify this by solving the system manually.
Please note that the following (more general) syntax also works::
>>> def f(x1, x2):
... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
...
>>> findroot(f, (0, 0))
[-0.618033988749895]
[-0.381966011250105]
**Multiple roots**
For multiple roots all methods of the Newtonian family (including secant)
converge slowly. Consider this example::
>>> f = lambda x: (x - 1)**99
>>> findroot(f, 0.9, verify=False)
0.918073542444929
Even for a very close starting point the secant method converges very
slowly. Use ``verbose=True`` to illustrate this.
It is possible to modify Newton's method to make it converge regardless of
the root's multiplicity::
>>> findroot(f, -10, solver='mnewton')
1.0
This variant uses the first and second derivative of the function, which is
not very efficient.
Alternatively you can use an experimental Newtonian solver that keeps track
of the speed of convergence and accelerates it using Steffensen's method if
necessary::
>>> findroot(f, -10, solver='anewton', verbose=True)
x: -9.88888888888888888889
error: 0.111111111111111111111
converging slowly
x: -9.77890011223344556678
error: 0.10998877665544332211
converging slowly
x: -9.67002233332199662166
error: 0.108877778911448945119
converging slowly
accelerating convergence
x: -9.5622443299551077669
error: 0.107778003366888854764
converging slowly
x: 0.99999999999999999214
error: 10.562244329955107759
x: 1.0
error: 7.8598304758094664213e-18
ZeroDivisionError: canceled with x = 1.0
1.0
**Complex roots**
For complex roots it's recommended to use Muller's method as it converges
even for real starting points very fast::
>>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
(0.727136084491197 + 0.934099289460529j)
**Intersection methods**
When you need to find a root in a known interval, it's highly recommended to
use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
Usually they converge faster and more reliable. They have however problems
with multiple roots and usually need a sign change to find a root::
>>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
0.0
Be careful with symmetric functions::
>>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
Traceback (most recent call last):
...
ZeroDivisionError
It fails even for better starting points, because there is no sign change::
>>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1 > 2.1684e-19)
Try another starting point or tweak arguments.
"""
prec = ctx.prec
try:
ctx.prec += 20
# initialize arguments
if tol is None:
tol = ctx.eps * 2**10
kwargs['verbose'] = kwargs.get('verbose', verbose)
if 'd1f' in kwargs:
kwargs['df'] = kwargs['d1f']
kwargs['tol'] = tol
if isinstance(x0, (list, tuple)):
x0 = [ctx.convert(x) for x in x0]
else:
x0 = [ctx.convert(x0)]
if isinstance(solver, str):
try:
solver = str2solver[solver]
except KeyError:
raise ValueError('could not recognize solver')
# accept list of functions
if isinstance(f, (list, tuple)):
f2 = copy(f)
def tmp(*args):
return [fn(*args) for fn in f2]
f = tmp
# detect multidimensional functions
try:
fx = f(*x0)
multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
except TypeError:
fx = f(x0[0])
multidimensional = False
if 'multidimensional' in kwargs:
multidimensional = kwargs['multidimensional']
if multidimensional:
# only one multidimensional solver available at the moment
solver = MDNewton
if not 'norm' in kwargs:
norm = lambda x: ctx.norm(x, 'inf')
kwargs['norm'] = norm
else:
norm = kwargs['norm']
else:
norm = abs
# happily return starting point if it's a root
if norm(fx) == 0:
if multidimensional:
return ctx.matrix(x0)
else:
return x0[0]
# use solver
iterations = solver(ctx, f, x0, **kwargs)
if 'maxsteps' in kwargs:
maxsteps = kwargs['maxsteps']
else:
maxsteps = iterations.maxsteps
i = 0
for x, error in iterations:
if verbose:
print_('x: ', x)
print_('error:', error)
i += 1
if error < tol * max(1, norm(x)) or i >= maxsteps:
break
if not isinstance(x, (list, tuple, ctx.matrix)):
xl = [x]
else:
xl = x
if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
raise ValueError('Could not find root within given tolerance. '
'(%g > %g)\n'
'Try another starting point or tweak arguments.'
% (norm(f(*xl))**2, tol))
return x
finally:
ctx.prec = prec
0
Example 60
Project: Tickeys-linux Source File: markup.py
def _pre_render(self):
# split markup, words, and lines
# result: list of word with position and width/height
# during the first pass, we don't care about h/valign
self._cached_lines = lines = []
self._refs = {}
self._anchors = {}
clipped = False
w = h = 0
uw, uh = self.text_size
spush = self._push_style
spop = self._pop_style
opts = options = self.options
options['_ref'] = None
options['_anchor'] = None
options['script'] = 'normal'
shorten = options['shorten']
# if shorten, then don't split lines to fit uw, because it will be
# flattened later when shortening and broken up lines if broken
# mid-word will have space mid-word when lines are joined
uw_temp = None if shorten else uw
xpad = options['padding_x']
uhh = (None if uh is not None and options['valign'][-1] != 'p' or
options['shorten'] else uh)
options['strip'] = options['strip'] or options['halign'][-1] == 'y'
for item in self.markup:
if item == '[b]':
spush('bold')
options['bold'] = True
self.resolve_font_name()
elif item == '[/b]':
spop('bold')
self.resolve_font_name()
elif item == '[i]':
spush('italic')
options['italic'] = True
self.resolve_font_name()
elif item == '[/i]':
spop('italic')
self.resolve_font_name()
elif item[:6] == '[size=':
item = item[6:-1]
try:
if item[-2:] in ('px', 'pt', 'in', 'cm', 'mm', 'dp', 'sp'):
size = dpi2px(item[:-2], item[-2:])
else:
size = int(item)
except ValueError:
raise
size = options['font_size']
spush('font_size')
options['font_size'] = size
elif item == '[/size]':
spop('font_size')
elif item[:7] == '[color=':
color = parse_color(item[7:-1])
spush('color')
options['color'] = color
elif item == '[/color]':
spop('color')
elif item[:6] == '[font=':
fontname = item[6:-1]
spush('font_name')
options['font_name'] = fontname
self.resolve_font_name()
elif item == '[/font]':
spop('font_name')
self.resolve_font_name()
elif item[:5] == '[sub]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'subscript'
elif item == '[/sub]':
spop('font_size')
spop('script')
elif item[:5] == '[sup]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'superscript'
elif item == '[/sup]':
spop('font_size')
spop('script')
elif item[:5] == '[ref=':
ref = item[5:-1]
spush('_ref')
options['_ref'] = ref
elif item == '[/ref]':
spop('_ref')
elif not clipped and item[:8] == '[anchor=':
options['_anchor'] = item[8:-1]
elif not clipped:
item = item.replace('&bl;', '[').replace(
'&br;', ']').replace('&', '&')
opts = copy(options)
extents = self.get_cached_extents()
opts['space_width'] = extents(' ')[0]
w, h, clipped = layout_text(item, lines, (w, h),
(uw_temp, uhh), opts, extents, True, False)
if len(lines): # remove any trailing spaces from the last line
old_opts = self.options
self.options = copy(opts)
w, h, clipped = layout_text('', lines, (w, h), (uw_temp, uhh),
self.options, self.get_cached_extents(), True, True)
self.options = old_opts
if shorten:
options['_ref'] = None # no refs for you!
options['_anchor'] = None
w, h, lines = self.shorten_post(lines, w, h)
self._cached_lines = lines
# when valign is not top, for markup we layout everything (text_size[1]
# is temporarily set to None) and after layout cut to size if too tall
elif uh != uhh and h > uh and len(lines) > 1:
if options['valign'][-1] == 'm': # bottom
i = 0
while i < len(lines) - 1 and h > uh:
h -= lines[i].h
i += 1
del lines[:i]
else: # middle
i = 0
top = int(h / 2. + uh / 2.) # remove extra top portion
while i < len(lines) - 1 and h > top:
h -= lines[i].h
i += 1
del lines[:i]
i = len(lines) - 1 # remove remaining bottom portion
while i and h > uh:
h -= lines[i].h
i -= 1
del lines[i + 1:]
# now justify the text
if options['halign'][-1] == 'y' and uw is not None:
# XXX: update refs to justified pos
# when justify, each line shouldv'e been stripped already
split = partial(re.split, re.compile('( +)'))
uww = uw - 2 * xpad
chr = type(self.text)
space = chr(' ')
empty = chr('')
for i in range(len(lines)):
line = lines[i]
words = line.words
# if there's nothing to justify, we're done
if (not line.w or int(uww - line.w) <= 0 or not len(words) or
line.is_last_line):
continue
done = False
parts = [None, ] * len(words) # contains words split by space
idxs = [None, ] * len(words) # indices of the space in parts
# break each word into spaces and add spaces until it's full
# do first round of split in case we don't need to split all
for w in range(len(words)):
word = words[w]
sw = word.options['space_width']
p = parts[w] = split(word.text)
idxs[w] = [v for v in range(len(p)) if
p[v].startswith(' ')]
# now we have the indices of the spaces in split list
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# there's not a single space in the line?
if not any(idxs):
continue
# now keep adding spaces to already split words until done
while not done:
for w in range(len(words)):
if not idxs[w]:
continue
word = words[w]
sw = word.options['space_width']
p = parts[w]
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# if not completely full, push last words to right edge
diff = int(uww - line.w)
if diff > 0:
# find the last word that had a space
for w in range(len(words) - 1, -1, -1):
if not idxs[w]:
continue
break
old_opts = self.options
self.options = word.options
word = words[w]
# split that word into left/right and push right till uww
l_text = empty.join(parts[w][:idxs[w][-1]])
r_text = empty.join(parts[w][idxs[w][-1]:])
left = LayoutWord(word.options,
self.get_extents(l_text)[0], word.lh, l_text)
right = LayoutWord(word.options,
self.get_extents(r_text)[0], word.lh, r_text)
left.lw = max(left.lw, word.lw + diff - right.lw)
self.options = old_opts
# now put words back together with right/left inserted
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
words[w] = right
words.insert(w, left)
else:
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
line.w = uww
w = max(w, uww)
self._internal_size = w, h
if uw:
w = uw
if uh:
h = uh
if h > 1 and w < 2:
w = 2
if w < 1:
w = 1
if h < 1:
h = 1
return int(w), int(h)
0
Example 61
Project: jirafs Source File: cmdline.py
def main():
term = Terminal()
if sys.version_info < (2, 7):
raise RuntimeError(
"Jirafs requires minimally version 2.7 of Python 2, or "
"any version of Python 3. Please upgrade your version of "
"python before using Jirafs."
)
if utils.get_git_version() < NormalizedVersion('1.8'):
raise RuntimeError(
"Jirafs requires minimally version 1.8 of Git. Please "
"upgrade your version of git before using Jirafs."
)
commands = utils.get_installed_commands()
parser = argparse.ArgumentParser(
description='Edit Jira issues locally from your filesystem',
add_help=False,
)
parser.add_argument(
'command',
type=six.text_type,
choices=commands.keys()
)
parser.add_argument(
'--subtasks',
action='store_true',
default=False
)
parser.add_argument(
'--log-level',
default=None,
dest='log_level',
)
parser.add_argument(
'--folder',
default=os.getcwd()
)
parser.add_argument(
'--no-subfolders',
action='store_true',
default=False,
)
parser.add_argument(
'--traceback',
action='store_true',
default=False,
)
args, extra = parser.parse_known_args()
if args.log_level is not None:
logging.basicConfig(level=logging.getLevelName(args.log_level))
command_name = args.command
cmd_class = commands[command_name]
# Subtasks
if args.subtasks:
cmd_class.RUN_FOR_SUBTASKS = True
started = time.time()
logger.debug(
'Command %s(%s) started',
command_name,
extra
)
jira = utils.lazy_get_jira()
try:
value = cmd_class.execute_command(
extra, jira=jira, path=args.folder, command_name=command_name
)
logger.debug(
'Command %s(%s) finished in %s seconds',
command_name,
extra,
(time.time() - started)
)
if value:
value.echo()
sys.exit(value.return_code)
except GitCommandError as e:
print(
u"{t.red}Error (code: {code}) while running git "
u"command.{t.normal}".format(
t=term,
code=e.returncode
)
)
print("")
print(u"{t.red}Command:{t.normal}{t.red}{t.bold}".format(t=term))
print(u" {cmd}".format(cmd=e.command))
print(u"{t.normal}".format(t=term))
print(u"{t.red}Output:{t.normal}{t.red}{t.bold}".format(t=term))
for line in e.output.decode('utf8').split('\n'):
print(u" %s" % line)
print(u"{t.normal}".format(t=term))
if args.traceback:
traceback.print_exc()
sys.exit(10)
except NotTicketFolderException:
if not getattr(cmd_class, 'TRY_SUBFOLDERS', False):
print(
u"{t.red}The command '{cmd}' must be ran from "
u"within an issue folder.{t.normal}".format(
t=term,
cmd=command_name
)
)
sys.exit(20)
elif args.no_subfolders:
sys.exit(20)
count_runs = 0
for folder in os.listdir(os.getcwd()):
full_path = os.path.join(
os.getcwd(),
folder,
)
if not os.path.isdir(full_path):
continue
try:
full_args = copy.copy(sys.argv)
if '--no-subfolders' not in full_args:
full_args.append('--no-subfolders')
result = subprocess.call(
' '.join([shlex_quote(a) for a in full_args]),
cwd=full_path,
shell=True
)
if result == 0:
count_runs += 1
except NotTicketFolderException:
pass
if count_runs == 0:
if args.traceback:
traceback.print_exc()
sys.exit(21)
except JIRAError as e:
print(
u"{t.red}Jirafs encountered an error while interacting with "
u"your JIRA instance: {t.normal}{t.red}{t.bold}{error}"
u"{t.normal}".format(
t=term,
error=str(e)
)
)
if args.traceback:
traceback.print_exc()
sys.exit(70)
except JiraInteractionFailed as e:
print(
u"{t.red}JIRA was unable to satisfy your "
u"request: {t.normal}{t.red}{t.bold}{error}{t.normal}".format(
t=term,
error=str(e)
)
)
if args.traceback:
traceback.print_exc()
sys.exit(80)
except JirafsError as e:
print(
u"{t.red}Jirafs encountered an error processing your "
u"request: {t.normal}{t.red}{t.bold}{error}{t.normal}".format(
t=term,
error=str(e)
)
)
if args.traceback:
traceback.print_exc()
sys.exit(90)
0
Example 62
Project: fontlab-scripts Source File: MarkFeatureGenerator.py
def readInstanceFile(instancesFilePath):
f = open(instancesFilePath, "rt")
data = f.read()
f.close()
lines = data.splitlines()
i = 0
parseError = 0
keyDict = copy.copy(kFixedFieldKeys)
numKeys = kNumFixedFields
numLines = len(lines)
instancesList = []
for i in range(numLines):
line = lines[i]
# Skip over blank lines
line2 = line.strip()
if not line2:
continue
# Get rid of all comments. If we find a key definition comment line, parse it.
commentIndex = line.find('#')
if commentIndex >= 0:
if line.startswith(kFieldsKey):
if instancesList:
print "ERROR: Header line (%s) must preceed a data line." % kFieldsKey
raise ParseError
# parse the line with the field names.
line = line[len(kFieldsKey):]
line = line.strip()
keys = line.split('\t')
keys = map(lambda name: name.strip(), keys)
numKeys = len(keys)
k = kNumFixedFields
while k < numKeys:
keyDict[k] = keys[k]
k +=1
continue
else:
line = line[:commentIndex]
continue
# Must be a data line.
fields = line.split('\t')
fields = map(lambda datum: datum.strip(), fields)
numFields = len(fields)
if (numFields != numKeys):
print "ERROR: In line %s, the number of fields %s does not match the number of key names %s (FamilyName, FontName, FullName, Weight, Coords, IsBold)." % (i+1, numFields, numKeys)
parseError = 1
continue
instanceDict= {}
#Build a dict from key to value. Some kinds of values needs special processing.
for k in range(numFields):
key = keyDict[k]
field = fields[k]
if not field:
continue
if field in ["Default", "None", "FontBBox"]:
# FontBBox is no longer supported - I calculate the real
# instance fontBBox from the glyph metrics instead,
continue
if key == kFontName:
value = field
elif key in [kExtraGlyphs, kExceptionSuffixes]:
value = eval(field)
elif key in [kIsBoldKey, kIsItalicKey, kCoordsKey]:
try:
value = eval(field) # this works for all three fields.
if key == kIsBoldKey: # need to convert to Type 1 field key.
instanceDict[key] = value
# add kForceBold key.
key = kForceBold
if value == 1:
value = "true"
else:
value = "false"
elif key == kIsItalicKey:
if value == 1:
value = "true"
else:
value = "false"
elif key == kCoordsKey:
if type(value) == type(0):
value = (value,)
except (NameError, SyntaxError):
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
elif field[0] in ["[","{"]: # it is a Type 1 array value. Turn it into a list and verify that there's an even number of values for the alignment zones
value = field[1:-1].split() # Remove the begin and end brackets/braces, and make a list
if key in kAlignmentZonesKeys:
if len(value) % 2 != 0:
print "ERROR: In line %s, the %s field does not have an even number of values." % (i+1, key)
parseError = 1
continue
if key in kTopAlignZonesKeys: # The Type 1 spec only allows 7 top zones (7 pairs of values)
if len(value) > kMaxTopZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxTopZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kBotAlignZonesKeys: # The Type 1 spec only allows 5 top zones (5 pairs of values)
if len(value) > kMaxBotZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxBotZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kStdStemsKeys:
if len(value) > kMaxStdStemsSize:
print "ERROR: In line %s, the %s field can only have %d value." % (i+1, key, kMaxStdStemsSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
if key in kStemSnapKeys: # The Type 1 spec only allows 12 stem widths, including 1 standard stem
if len(value) > kMaxStemSnapSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxStemSnapSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
else:
# either a single number or a string.
if re.match(r"^[-.\d]+$", field):
value = field #it is a Type 1 number. Pass as is, as a string.
else:
value = field
instanceDict[key] = value
if (kStdHW in instanceDict and kStemSnapH not in instanceDict) or (kStdHW not in instanceDict and kStemSnapH in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdHW, kStemSnapH)
parseError = 1
elif (kStdHW in instanceDict and kStemSnapH in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapH][0] != instanceDict[kStdHW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapH, kStdHW)
parseError = 1
if (kStdVW in instanceDict and kStemSnapV not in instanceDict) or (kStdVW not in instanceDict and kStemSnapV in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdVW, kStemSnapV)
parseError = 1
elif (kStdVW in instanceDict and kStemSnapV in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapV][0] != instanceDict[kStdVW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapV, kStdVW)
parseError = 1
instancesList.append(instanceDict)
if parseError or len(instancesList) == 0:
raise(ParseError)
return instancesList
0
Example 63
Project: spitfire Source File: render_benchmark.py
def get_spitfire_tests():
if not spitfire:
return []
tmpl_src = """
<table>
#for $row in $table
<tr>
#for $column in $row.values()
<td>$column</td>
#end for
</tr>
#end for
</table>
"""
tmpl_search_list = [{'table': TABLE_DATA}]
default_opts = spitfire.compiler.options.default_options
o1_opts = spitfire.compiler.options.o1_options
o2_opts = spitfire.compiler.options.o2_options
o3_opts = spitfire.compiler.options.o3_options
def _spitfire_baked_opts(o):
o = copy.copy(o)
o.baked_mode = True
o.generate_unicode = False
return o
baked_opts = _spitfire_baked_opts(default_opts)
baked_o1_opts = _spitfire_baked_opts(o1_opts)
baked_o2_opts = _spitfire_baked_opts(o2_opts)
baked_o3_opts = _spitfire_baked_opts(o3_opts)
tmpl = spitfire.compiler.util.load_template(tmpl_src,
'tmpl',
analyzer_options=default_opts)
tmpl_o1 = spitfire.compiler.util.load_template(tmpl_src,
'tmpl_o1',
analyzer_options=o1_opts)
tmpl_o2 = spitfire.compiler.util.load_template(tmpl_src,
'tmpl_o2',
analyzer_options=o2_opts)
tmpl_o3 = spitfire.compiler.util.load_template(tmpl_src,
'tmpl_o3',
analyzer_options=o3_opts)
tmpl_baked = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked',
analyzer_options=baked_opts)
tmpl_baked_o1 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked_o1',
analyzer_options=baked_o1_opts)
tmpl_baked_o2 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked_o2',
analyzer_options=baked_o2_opts)
tmpl_baked_o3 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked_o3',
analyzer_options=baked_o3_opts)
tmpl_unfiltered = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered',
analyzer_options=default_opts,
compiler_options={'enable_filters': False})
tmpl_unfiltered_o1 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered_o1',
analyzer_options=o1_opts,
compiler_options={'enable_filters': False})
tmpl_unfiltered_o2 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered_o2',
analyzer_options=o2_opts,
compiler_options={'enable_filters': False})
tmpl_unfiltered_o3 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered_o3',
analyzer_options=o3_opts,
compiler_options={'enable_filters': False})
def test_spitfire():
"""Spitfire template"""
tmpl(search_list=tmpl_search_list).main()
def test_spitfire_o1():
"""Spitfire template -O1"""
tmpl_o1(search_list=tmpl_search_list).main()
def test_spitfire_o2():
"""Spitfire template -O2"""
tmpl_o2(search_list=tmpl_search_list).main()
def test_spitfire_o3():
"""Spitfire template -O3"""
tmpl_o3(search_list=tmpl_search_list).main()
def test_spitfire_baked():
"""Spitfire template baked"""
tmpl_baked(search_list=tmpl_search_list).main()
def test_spitfire_baked_o1():
"""Spitfire template baked -O1"""
tmpl_baked_o2(search_list=tmpl_search_list).main()
def test_spitfire_baked_o2():
"""Spitfire template baked -O2"""
tmpl_baked_o2(search_list=tmpl_search_list).main()
def test_spitfire_baked_o3():
"""Spitfire template baked -O3"""
tmpl_baked_o3(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered():
"""Spitfire template unfiltered"""
tmpl_unfiltered(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered_o1():
"""Spitfire template unfiltered -O1"""
tmpl_unfiltered_o2(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered_o2():
"""Spitfire template unfiltered -O2"""
tmpl_unfiltered_o2(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered_o3():
"""Spitfire template unfiltered -O3"""
tmpl_unfiltered_o3(search_list=tmpl_search_list).main()
return [
test_spitfire,
test_spitfire_o1,
test_spitfire_o2,
test_spitfire_o3,
test_spitfire_baked,
test_spitfire_baked_o1,
test_spitfire_baked_o2,
test_spitfire_baked_o3,
test_spitfire_unfiltered,
test_spitfire_unfiltered_o1,
test_spitfire_unfiltered_o2,
test_spitfire_unfiltered_o3,
]
0
Example 64
Project: python-documentcloud Source File: test_all.py
def test_private_actions(self):
"""
Test all the stuff that requires a login.
"""
# Get an editable docuement
obj_id = self.get_editable_docuement(self.version)
obj = self.private_client.docuements.get(obj_id)
# Make sure `data` attribute will only accept a dictionary.
obj.data = dict(foo='bar')
self.assertRaises(TypeError, obj.set_data, "string")
self.assertRaises(TypeError, obj.set_data, 666)
self.assertRaises(TypeError, obj.set_data, obj)
# Test whether we can put random noise to all the editable fields.
title = get_random_string()
source = get_random_string()
description = get_random_string()
data = {get_random_string(): get_random_string()}
if obj.resources.related_article == 'http://docuements.latimes.com':
related_article = 'http://docuementcloud.org'
else:
related_article = 'http://docuements.latimes.com'
if obj.resources.published_url == 'http://docuements.latimes.com':
published_url = 'http://docuementcloud.org'
else:
published_url = 'http://docuements.latimes.com'
obj.title = title
obj.source = source
obj.description = description
obj.data = data
obj.resources.related_article = related_article
obj.resources.published_url = published_url
# Save the changes up to DocuementCloud
obj.put()
# Pull the object again and verify the changes stuck
obj = self.private_client.docuements.get(obj_id)
self.assertEqual(obj.title, title)
self.assertEqual(obj.source, source)
self.assertEqual(obj.description, description)
self.assertEqual(obj.data, data)
self.assertEqual(obj.resources.related_article, related_article)
self.assertEqual(obj.resources.published_url, published_url)
# Test reserved namespaces to make sure they're protected
black_list = [
'person', 'organization', 'place', 'term', 'email', 'phone',
'city', 'state', 'country', 'title', 'description', 'source',
'account', 'group', 'project', 'projectid', 'docuement', 'access',
'filter',
]
for key in black_list:
self.assertRaises(ValueError, setattr, obj, "data", {key: 'foo'})
obj.data = dict(boom='bap')
# Test to make sure non-strings can't get into the data dictionary
with self.assertRaises(TypeError):
obj.data = dict(a=1)
with self.assertRaises(TypeError):
obj.data = {1: 'a'}
obj.data = dict(boom='bap')
with self.assertRaises(TypeError):
obj.data[1] = 2
# Resources
self.assertEqual(obj.published_url, obj.resources.published_url)
self.assertEqual(obj.related_article, obj.resources.related_article)
# And their shortcuts
obj.published_url = 'http://latimes.com'
obj.related_article = 'http://palewi.re'
self.assertEqual(obj.published_url, obj.resources.published_url)
self.assertEqual(obj.related_article, obj.resources.related_article)
# Test whether the save method properly aliases `put`.
title = get_random_string()
obj.title = title
obj.save()
obj = self.private_client.docuements.get(obj_id)
self.assertEqual(obj.title, title)
# Test whether you can save an attribute with some weird encoding
before_title = copy(obj.title)
before_description = copy(obj.description)
obj.title = random.choice(list(PANGRAMS.keys()))
obj.description = random.choice(list(PANGRAMS.keys()))
obj.put()
obj.title = before_title
obj.description = before_description
obj.put()
# Upload
title = get_random_string()
obj = self.private_client.docuements.upload(
os.path.join(os.path.dirname(__file__), "test.pdf"),
title,
description='Blah blah',
related_article='http://www.latimes.com',
data=dict(like_this='like+that', boom='bap'),
)
self.assertTrue(isinstance(obj, Docuement))
self.assertEqual(obj.title, title)
self.assertEqual(obj.description, 'Blah blah')
self.assertEqual(obj.related_article, 'http://www.latimes.com')
self.assertEqual(
obj.data,
{u'like_this': u'like+that', u'boom': u'bap'}
)
# Delete
obj.delete()
self.assertRaises(
DoesNotExistError,
self.private_client.docuements.get,
obj.id
)
# Test upload with bad keyword
title = '001 - Test upload (%s)' % get_random_string()
self.assertRaises(
ValueError,
self.private_client.docuements.upload,
os.path.join(os.path.dirname(__file__), "test.pdf"),
title,
description='Blah blah',
related_article='http://www.latimes.com',
# Access is an reserved keyword so this should fail
data=dict(access='this', boom='bap'),
)
# Upload with a file object, not a path
title = get_random_string()
obj = self.private_client.docuements.upload(
open(os.path.join(os.path.dirname(__file__), "test.pdf"), "rb"),
title,
)
self.assertTrue(isinstance(obj, Docuement))
self.assertEqual(obj.title, title)
obj.delete()
# Ensure that docuements with non-english characters can be uploaded
pdf = os.path.join(os.path.dirname(__file__), "español.pdf")
obj = self.private_client.docuements.upload(open(pdf, 'rb'))
self.assertTrue(isinstance(obj, Docuement))
obj.delete()
# Test virtual file upload and delete
path = os.path.join(os.path.dirname(__file__), "español.pdf")
real_file = open(path, 'rb')
if six.PY3:
virtual_file = io.BytesIO(real_file.read())
else:
virtual_file = io.StringIO(real_file.read())
obj = self.private_client.docuements.upload(
virtual_file,
title='Espanola!'
)
self.assertTrue(isinstance(obj, Docuement))
obj.delete()
# Test secure upload
title = get_random_string()
obj = self.private_client.docuements.upload(
os.path.join(os.path.dirname(__file__), "test.pdf"),
title,
secure=True,
)
self.assertTrue(isinstance(obj, Docuement))
obj.delete()
# Upload everything in this directory.
obj_list = self.private_client.docuements.upload_directory(
'./',
source='Los Angeles Times',
published_url='http://www.latimes.com',
)
self.assertEqual(len(obj_list), 2)
self.assertTrue(isinstance(obj_list[0], Docuement))
self.assertEqual(obj_list[0].source, 'Los Angeles Times')
self.assertEqual(obj_list[0].published_url, 'http://www.latimes.com')
[i.delete() for i in obj_list]
# Test URL upload
url = 'http://ord.legistar.com/Chicago/attachments/e3a0cbcb-044d-4ec3-9848-23c5692b1943.pdf'
obj = self.private_client.docuements.upload(url)
obj.delete()
0
Example 65
Project: retriever Source File: parse_script_to_json.py
def parse_script_to_json(script_file, location=SCRIPT_DIR):
definition = open(os.path.join(location, script_file) + ".script", 'r')
values = {}
tables = []
last_table = {}
replace = []
keys_to_ignore = ["template"]
urls = {}
values["retriever"] = "True"
values["version"] = 1.0
values["retriever_minimum_version"] = "2.0.dev"
for line in [str(line).strip() for line in definition]:
if line and ':' in line and not line[0] == '#':
split_line = [a.strip() for a in line.split(":")]
key = split_line[0].lower()
value = ':'.join(split_line[1:])
if key == "name":
values["title"] = value
elif key == "shortname":
values["name"] = value
elif key == "description":
values["description"] = value
elif key == "tags":
values["keywords"] = [v.strip() for v in value.split(",")]
elif key == "url" or key == "ref":
values["homepage"] = value
elif key == "citation":
values["citation"] = value
elif key == "replace":
# could be made a dict
replace = [(v.split(',')[0].strip(), v.split(',')[1].strip())
for v in [val for val in value.split(';')]]
elif key == "table":
last_table = {}
last_table["name"] = value.split(',')[0].strip()
last_table["url"] = ','.join(value.split(',')[1:]).strip()
last_table["schema"] = {}
last_table["dialect"] = {}
tables.append(last_table)
urls[last_table["name"]] = last_table["url"]
if replace:
last_table["dialect"]["replace_columns"] = replace
elif key == "*column":
if last_table:
vs = [v.strip() for v in value.split(',')]
if "fields" not in last_table["schema"]:
last_table["schema"]["fields"] = []
column = {}
column['name'] = vs[0]
column['type'] = vs[1]
if len(vs) > 2:
column['size'] = vs[2]
last_table["schema"]["fields"].append(copy(column))
elif key == "*nulls":
if last_table:
nulls = [eval(v) for v in [val.strip()
for val in value.split(',')]]
last_table["dialect"]["nulls"] = nulls
elif key == "*ct_column":
if last_table:
last_table["schema"]["ct_column"] = value
elif key == "*ct_names":
if last_table:
last_table["schema"]["ct_names"] = [v.strip() for v in
value.split(',')]
elif key[0] == "*":
# attribute that should be applied to the most recently
# declared table
key = key[1:]
if last_table:
try:
e = eval(value)
except:
e = str(value)
last_table["dialect"][key] = str(e)
else:
values[key] = str(value)
values["resources"] = tables
values["urls"] = urls
if 'name' not in values:
try:
values['name'] = values['title']
except:
pass
for key, value in values.items():
if key in keys_to_ignore:
values.pop(key, None)
with open(os.path.join(location, values['name']) + '.json', 'w') as json_file:
json_str = json.dumps(values, json_file, sort_keys=True, indent=4,
separators=(',', ': '))
json_file.write(json_str + '\n')
json_file.close()
definition.close()
0
Example 66
Project: python-sdc-client Source File: _client.py
def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_by=None, limit=None, layout=None):
panel_configuration = {
'name': name,
'showAs': None,
'showAsType': None,
'metrics': [],
'gridConfiguration': {
'col': 1,
'row': 1,
'size_x': 12,
'size_y': 6
}
}
if panel_type == 'timeSeries':
#
# In case of a time series, the current dashboard implementation
# requires the timestamp to be explicitly specified as "key".
# However, this function uses the same abstraction of the data API
# that doesn't require to specify a timestamp key (you only need to
# specify time window and sampling)
#
metrics = copy.copy(metrics)
metrics.insert(0, {'id': 'timestamp'})
#
# Convert list of metrics to format used by Sysdig Cloud
#
property_names = {}
for i, metric in enumerate(metrics):
property_name = 'v' if 'aggregations' in metric else 'k'
property_names[metric['id']] = property_name + str(i)
panel_configuration['metrics'].append({
'metricId': metric['id'],
'aggregation': metric['aggregations']['time'] if 'aggregations' in metric else None,
'groupAggregation': metric['aggregations']['group'] if 'aggregations' in metric else None,
'propertyName': property_name + str(i)
})
#
# Convert scope to format used by Sysdig Cloud
#
if scope != None:
filter_expressions = scope.strip(' \t\n\r?!.').split(" and ")
filters = []
for filter_expression in filter_expressions:
values = filter_expression.strip(' \t\n\r?!.').split("=")
if len(values) != 2:
return [False, "invalid scope format"]
filters.append({
'metric': values[0].strip(' \t\n\r?!.'),
'op': '=',
'value': values[1].strip(' \t\n\r"?!.'),
'filters': None
})
if len(filters) > 0:
panel_configuration['filter'] = {
'filters': {
'logic': 'and',
'filters': filters
}
}
#
# Configure panel type
#
if panel_type == 'timeSeries':
panel_configuration['showAs'] = 'timeSeries'
panel_configuration['showAsType'] = 'line'
if limit != None:
panel_configuration['paging'] = {
'from': 0,
'to': limit - 1
}
elif panel_type == 'number':
panel_configuration['showAs'] = 'summary'
panel_configuration['showAsType'] = 'summary'
elif panel_type == 'top':
panel_configuration['showAs'] = 'top'
panel_configuration['showAsType'] = 'bars'
if sort_by is None:
panel_configuration['sorting'] = [{
'id': 'v0',
'mode': 'desc'
}]
else:
panel_configuration['sorting'] = [{
'id': property_names[sort_by['metric']],
'mode': sort_by['mode']
}]
if limit is None:
panel_configuration['paging'] = {
'from': 0,
'to': 10
}
else:
panel_configuration['paging'] = {
'from': 0,
'to': limit - 1
}
#
# Configure layout
#
if layout != None:
panel_configuration['gridConfiguration'] = layout
#
# Clone existing dashboard...
#
dashboard_configuration = copy.deepcopy(dashboard)
dashboard_configuration['id'] = None
#
# ... and add the new panel
#
dashboard_configuration['items'].append(panel_configuration)
#
# Update dashboard
#
res = requests.put(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}))
if not self.__checkResponse(res):
return [False, self.lasterr]
else:
return [True, res.json()]
0
Example 67
Project: bundlewrap Source File: __init__.py
def apply(
self,
autoskip_selector="",
my_soft_locks=(),
other_peoples_soft_locks=(),
interactive=False,
interactive_default=True,
):
self.node.repo.hooks.item_apply_start(
self.node.repo,
self.node,
self,
)
keys_to_fix = None
status_code = None
status_before = None
status_after = None
start_time = datetime.now()
if self.covered_by_autoskip_selector(autoskip_selector):
io.debug(_(
"autoskip matches {item} on {node}"
).format(item=self.id, node=self.node.name))
status_code = self.STATUS_SKIPPED
keys_to_fix = [_("cmdline")]
if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks):
status_code = self.STATUS_SKIPPED
keys_to_fix = [_("soft locked")]
if self.triggered and not self.has_been_triggered and status_code is None:
io.debug(_(
"skipping {item} on {node} because it wasn't triggered"
).format(item=self.id, node=self.node.name))
status_code = self.STATUS_SKIPPED
keys_to_fix = [_("not triggered")]
if status_code is None and self.cached_unless_result and status_code is None:
io.debug(_(
"'unless' for {item} on {node} succeeded, not fixing"
).format(item=self.id, node=self.node.name))
status_code = self.STATUS_SKIPPED
keys_to_fix = ["unless"]
if self._faults_missing_for_attributes and status_code is None:
if self.error_on_missing_fault:
self._raise_for_faults()
else:
io.debug(_(
"skipping {item} on {node} because it is missing faults "
"for these attributes: {attrs} "
"(most of the time this means you're missing "
"a required key in your .secrets.cfg)"
).format(
attrs=", ".join(sorted(self._faults_missing_for_attributes)),
item=self.id,
node=self.node.name,
))
status_code = self.STATUS_SKIPPED
keys_to_fix = [_("Fault unavailable")]
if status_code is None:
try:
status_before = self.cached_status
except FaultUnavailable:
if self.error_on_missing_fault:
self._raise_for_faults()
else:
io.debug(_(
"skipping {item} on {node} because it is missing Faults "
"(most of the time this means you're missing "
"a required key in your .secrets.cfg)"
).format(
item=self.id,
node=self.node.name,
))
status_code = self.STATUS_SKIPPED
keys_to_fix = [_("Fault unavailable")]
else:
if status_before.correct:
status_code = self.STATUS_OK
if status_code is None:
keys_to_fix = self.display_keys(
copy(self.cached_cdict),
copy(status_before.sdict),
status_before.keys_to_fix[:],
)
if not interactive:
with io.job(_(" {node} {bundle} {item} fixing...").format(
bundle=self.bundle.name,
item=self.id,
node=self.node.name,
)):
self.fix(status_before)
else:
if status_before.must_be_created:
question_text = _("Doesn't exist. Will be created.")
elif status_before.must_be_deleted:
question_text = _("Found on node. Will be removed.")
else:
cdict, sdict = self.display_dicts(
copy(self.cached_cdict),
copy(status_before.sdict),
keys_to_fix,
)
question_text = self.ask(cdict, sdict, keys_to_fix)
question = wrap_question(
self.id,
question_text,
_("Fix {}?").format(bold(self.id)),
prefix="{x} {node} ".format(
node=bold(self.node.name),
x=blue("?"),
),
)
answer = io.ask(
question,
interactive_default,
epilogue="{x} {node}".format(
node=bold(self.node.name),
x=blue("?"),
),
)
if answer:
with io.job(_(" {node} {bundle} {item} fixing...").format(
bundle=self.bundle.name,
item=self.id,
node=self.node.name,
)):
self.fix(status_before)
else:
status_code = self.STATUS_SKIPPED
keys_to_fix = [_("interactive")]
if status_code is None:
status_after = self.get_status(cached=False)
status_code = self.STATUS_FIXED if status_after.correct else self.STATUS_FAILED
if status_code == self.STATUS_SKIPPED:
# can't use else for this because status_before is None
changes = keys_to_fix
elif status_before.must_be_created:
changes = True
elif status_before.must_be_deleted:
changes = False
elif status_code == self.STATUS_FAILED:
changes = self.display_keys(
self.cached_cdict.copy(),
status_after.sdict.copy(),
status_after.keys_to_fix[:],
)
else:
changes = keys_to_fix
self.node.repo.hooks.item_apply_end(
self.node.repo,
self.node,
self,
duration=datetime.now() - start_time,
status_code=status_code,
status_before=status_before,
status_after=status_after,
)
return (status_code, changes)
0
Example 68
Project: openerp-7.0 Source File: report_aeroo.py
def create_aeroo_report(self, cr, uid, ids, data, report_xml, context=None, output='odt'):
""" Returns an aeroo report generated with aeroolib
"""
pool = pooler.get_pool(cr.dbname)
if not context:
context={}
context = context.copy()
if self.name=='report.printscreen.list':
context['model'] = data['model']
context['ids'] = ids
print_id = context.get('print_id', False)
aeroo_print = self.active_prints[print_id] # Aeroo print object
aeroo_print.subreports = []
#self.oo_subreports[print_id] = []
objects = self.getObjects_mod(cr, uid, ids, report_xml.report_type, context) or []
oo_parser = self.parser(cr, uid, self.name2, context=context)
oo_parser.localcontext.update(context)
oo_parser.set_context(objects, data, ids, report_xml.report_type)
self.set_xml_data_fields(objects, oo_parser) # Get/Set XML
oo_parser.localcontext['data'] = data
oo_parser.localcontext['user_lang'] = context.get('lang', False)
if len(objects)>0:
oo_parser.localcontext['o'] = objects[0]
xfunc = ExtraFunctions(cr, uid, report_xml.id, oo_parser.localcontext)
oo_parser.localcontext.update(xfunc.functions)
#company_id = objects and 'company_id' in objects[0]._table._columns.keys() and \
# objects[0].company_id and objects[0].company_id.id or False # for object company usage
company_id = False
style_io=self.get_styles_file(cr, uid, report_xml, company=company_id, context=context)
if report_xml.tml_source in ('file', 'database'):
if not report_xml.report_sxw_content or report_xml.report_sxw_content=='False':
raise osv.except_osv(_('Error!'), _('No template found!'))
file_data = base64.decodestring(report_xml.report_sxw_content)
else:
file_data = self.get_other_template(cr, uid, data, oo_parser)
if not file_data and not report_xml.report_sxw_content:
self.logger("End process %s (%s), elapsed time: %s" % (self.name, self.table, time.time() - aeroo_print.start_time), logging.INFO) # debug mode
return False, output
#elif file_data:
# template_io = StringIO()
# template_io.write(file_data or report_xml.report_sxw_content)
# basic = Template(source=template_io, styles=style_io)
else:
if report_xml.preload_mode == 'preload' and hasattr(self, 'serializer'):
serializer = copy.copy(self.serializer)
serializer.apply_style(style_io)
template_io = serializer.template
else:
template_io = StringIO()
template_io.write(file_data or base64.decodestring(report_xml.report_sxw_content) )
serializer = OOSerializer(template_io, oo_styles=style_io)
try:
basic = Template(source=template_io, serializer=serializer)
except Exception, e:
self._raise_exception(e, print_id)
#if not file_data:
# return False, output
#basic = Template(source=template_io, serializer=serializer)
aeroo_ooo = context.get('aeroo_ooo', False)
oo_parser.localcontext['include_subreport'] = self._subreport(cr, uid, aeroo_print, output='odt', aeroo_ooo=aeroo_ooo, context=context)
oo_parser.localcontext['include_docuement'] = self._include_docuement(aeroo_ooo, print_id)
deferred = context.get('deferred_process')
oo_parser.localcontext['progress_update'] = deferred and deferred.progress_update or (lambda:True)
####### Add counter functons to localcontext #######
oo_parser.localcontext.update({'def_inc':self._def_inc(aeroo_print),
'get_inc':self._get_inc(aeroo_print),
'prev':self._prev(aeroo_print),
'next':self._next(aeroo_print)})
user_name = pool.get('res.users').browse(cr, uid, uid, {}).name
model_id = pool.get('ir.model').search(cr, uid, [('model','=',context.get('active_model', data['model']) or data['model'])])[0]
model_name = pool.get('ir.model').browse(cr, uid, model_id).name
#basic = Template(source=None, filepath=odt_path)
basic.Serializer.add_title(model_name)
basic.Serializer.add_creation_user(user_name)
module_info = load_information_from_description_file('report_aeroo')
version = module_info['version']
basic.Serializer.add_generator_info('Aeroo Lib/%s Aeroo Reports/%s' % (aeroolib.__version__, version))
basic.Serializer.add_custom_property('Aeroo Reports %s' % version, 'Generator')
basic.Serializer.add_custom_property('OpenERP %s' % release.version, 'Software')
basic.Serializer.add_custom_property(module_info['website'], 'URL')
basic.Serializer.add_creation_date(time.strftime('%Y-%m-%dT%H:%M:%S'))
try:
data = basic.generate(**oo_parser.localcontext).render().getvalue()
except osv.except_osv, e:
raise
except Exception, e:
self._raise_exception(e, print_id)
######### OpenOffice extras #########
DC = netsvc.Service._services.get('openoffice')
#if (output!=report_xml.in_format[3:] or self.oo_subreports.get(print_id)):
if output!=report_xml.in_format[3:] or aeroo_print.subreports:
if aeroo_ooo and DC:
try:
data = self._generate_doc(DC, data, report_xml, print_id)
except Exception, e:
self.logger(_("OpenOffice.org related error!")+'\n'+str(e), logging.ERROR)
if DC._restart_ooo():
# We try again
try:
data = self._generate_doc(DC, data, report_xml, print_id)
except Exception, e:
self.logger(_("OpenOffice.org related error!")+'\n'+str(e), logging.ERROR)
if not report_xml.fallback_false:
output=report_xml.in_format[3:]
elif not report_xml.fallback_false:
output=report_xml.in_format[3:]
aeroo_print.subreports = []
else:
if report_xml.fallback_false:
if not aeroo_ooo:
raise osv.except_osv(_('OpenOffice.org related error!'), _('Module "report_aeroo_ooo" not installed.'))
elif not DC:
raise osv.except_osv(_('OpenOffice.org related error!'), _('Can not connect to OpenOffice.org.'))
else:
self.logger(_("PDF generator temporarily offline, please wait a minute"), logging.WARNING)
output=report_xml.in_format[3:]
elif output in ('pdf', 'doc', 'xls'):
output=report_xml.in_format[3:]
#####################################
if report_xml.content_fname:
output = report_xml.content_fname
self.logger("End process %s (%s), elapsed time: %s" % (self.name, self.table, time.time() - aeroo_print.start_time), logging.INFO) # debug mode
return data, output
0
Example 69
Project: VisTrails Source File: opm.py
def create_opm(workflow, version, log, reg):
id_scope = IdScope()
processes = []
# conn_artifacts = {}
artifacts = []
dependencies = []
accounts = []
depth_accounts = {}
file_artifacts = {}
db_artifacts = {}
def do_create_process(workflow, item_exec, account, module_processes):
process = create_process(item_exec, account, id_scope)
print 'adding process', process.db_id,
if hasattr(item_exec, 'db_module_name'):
print item_exec.db_module_name
elif hasattr(item_exec, 'db_group_name'):
print item_exec.db_group_name
processes.append(process)
module = workflow.db_modules_id_index[item_exec.db_module_id]
module_processes[module.db_id] = (module, process)
def get_package(reg, pkg_identifier, pkg_version=''):
if pkg_version:
try:
return reg.db_packages_identifier_index[(pkg_identifier,
pkg_version)]
except:
print (("Warning: Version '%s' package '%s' "
"is not in the registry") %
(pkg_version, pkg_identifier))
# spin and get current package
for pkg in reg.db_packages:
if pkg.db_identifier == pkg_identifier:
break
pkg = None
return pkg
def process_exec(item_exec, workflow, account, upstream_lookup,
downstream_lookup, depth, conn_artifacts=None,
function_artifacts=None, module_processes=None,
in_upstream_artifacts={}, in_downstream_artifacts={},
add_extras=False):
print 'in_upstream:', [(n, x.db_id)
for n, x_list in in_upstream_artifacts.iteritems() for x in x_list]
print 'in_downstream:', [(n, x.db_id)
for n, x_list in in_downstream_artifacts.iteritems() for x in x_list]
# FIXME merge conn_artifacts and function_artifacts
# problem is that a conn_artifact is OUTPUT while function_artifact
# is INPUT
if conn_artifacts is None:
conn_artifacts = {}
if function_artifacts is None:
function_artifacts = {}
if module_processes is None:
module_processes = {}
# while item_exec.vtType == DBLoopExec.vtType:
# item_exec = item_exec.db_item_execs[0]
(module, process) = module_processes[item_exec.db_module_id]
def process_connection(conn):
source = conn.db_ports_type_index['source']
source_t = (source.db_moduleId, source.db_name)
in_cache = False
print '!!! processing', source_t
if source_t in conn_artifacts:
artifact = conn_artifacts[source_t]
in_cache = True
else:
# key off source module and port name
# get descriptor from registry and then port_spec
# store port_spec as artifact
if source.db_moduleId < 0:
dest = conn.db_ports_type_index['destination']
module = source.db_module
else:
module = workflow.db_modules_id_index[source.db_moduleId]
print module.db_name, module.db_id
pkg = get_package(reg, module.db_package, module.db_version)
if not module.db_namespace:
module_namespace = ''
else:
module_namespace = module.db_namespace
module_desc = \
pkg.db_module_descriptors_name_index[(module.db_name,
module_namespace,
'')]
# FIXME make work for module port_specs, too
# for example, a PythonSource with a given port in
# module.db_portSpecs
port_spec = None
spec_t = (source.db_name, 'output')
if spec_t in module.db_portSpecs_name_index:
port_spec = module.db_portSpecs_name_index[spec_t]
while port_spec is None and \
module_desc.db_id != reg.db_root_descriptor_id:
if spec_t in module_desc.db_portSpecs_name_index:
port_spec = module_desc.db_portSpecs_name_index[spec_t]
base_id = module_desc.db_base_descriptor_id
# inefficient spin through db_packages but we do
# not have the descriptors_by_id index that exists
# on core.module_registry.ModuleRegistry here
module_desc = None
for pkg in reg.db_packages:
if base_id in pkg.db_module_descriptors_id_index:
module_desc = \
pkg.db_module_descriptors_id_index[base_id]
break
if module_desc is None:
raise KeyError("Cannot find base descriptor id %d" %
base_id)
# pkg = get_package(reg, module_desc.db_package,
# module_desc.db_package_version)
# module_desc = pkg.db_module_descriptors_id_index[base_id]
if port_spec is None:
port_spec = module_desc.db_portSpecs_name_index[spec_t]
print module_desc.db_name
artifact = \
create_artifact_from_port_spec(port_spec, account, id_scope)
artifacts.append(artifact)
print 'adding conn_artifact', artifact.db_id, source_t, \
source.db_moduleName
conn_artifacts[source_t] = artifact
return (artifact, in_cache)
def process_map(module, found_input_ports, found_output_ports):
print "*** Processing Map"
if depth+1 in depth_accounts:
account = depth_accounts[depth+1]
else:
account = create_account(depth+1, id_scope)
accounts.append(account)
depth_accounts[depth+1] = account
# need to have process that extracts artifacts for each iteration
input_list_artifact = found_input_ports['InputList']
result_artifact = found_output_ports.get('Result', None)
# if InputPort or OutputPort is a Connection we cannot do anything
if (found_input_ports['InputPort'].vtType == DBConnection.vtType or
found_input_ports['OutputPort'].vtType == DBConnection.vtType):
return
input_port_list = \
literal_eval(found_input_ports['InputPort'].db_parameters[0].db_val)
output_port = \
found_input_ports['OutputPort'].db_parameters[0].db_val
s_process = create_process_manual('Split', account, id_scope)
processes.append(s_process)
dependencies.append(create_used(s_process,
input_list_artifact,
account,
id_scope))
# need to have process that condenses artifacts from each iteration
if result_artifact is not None:
j_process = create_process_manual('Join', account, id_scope)
processes.append(j_process)
for loop_exec in item_exec.db_loop_execs:
for loop_iteration in loop_exec.db_loop_iterations:
loop_up_artifacts = {}
loop_down_artifacts = {}
for input_name in input_port_list:
port_spec = DBPortSpec(id=-1,
name=input_name,
type='output')
s_artifact = \
create_artifact_from_port_spec(port_spec, account,
id_scope)
artifacts.append(s_artifact)
dependencies.append(create_was_generated_by(s_artifact,
s_process,
account,
id_scope))
if input_name not in loop_up_artifacts:
loop_up_artifacts[input_name] = []
loop_up_artifacts[input_name].append(s_artifact)
# process output_port
if loop_iteration.db_completed == 1:
port_spec = DBPortSpec(id=-1,
name=output_port,
type='output')
o_artifact = \
create_artifact_from_port_spec(port_spec, account,
id_scope)
artifacts.append(o_artifact)
if output_port not in loop_down_artifacts:
loop_down_artifacts[output_port] = []
loop_down_artifacts[output_port].append(o_artifact)
if result_artifact is not None:
dependencies.append(create_used(j_process, o_artifact,
account, id_scope))
# now process a loop_exec
for child_exec in loop_iteration.db_item_execs:
do_create_process(workflow, child_exec, account,
module_processes)
for child_exec in loop_iteration.db_item_execs:
process_exec(child_exec, workflow, account, upstream_lookup,
downstream_lookup, depth+1, conn_artifacts,
function_artifacts, module_processes,
loop_up_artifacts, loop_down_artifacts, True)
# need to set Return artifact and connect j_process to it
if result_artifact is not None:
dependencies.append(create_was_generated_by(result_artifact,
j_process,
account,
id_scope))
def process_module_loop(module, found_input_ports, found_output_ports):
print "*** Processing Module with loops"
if depth+1 in depth_accounts:
account = depth_accounts[depth+1]
else:
account = create_account(depth+1, id_scope)
accounts.append(account)
depth_accounts[depth+1] = account
# need to have process that extracts artifacts for each iteration
result_artifacts = [a for r in found_output_ports
if found_output_ports[r] is not None
for a in found_output_ports[r]]
s_process = create_process_manual('Split', account, id_scope)
processes.append(s_process)
for input_port in found_input_ports:
for input_name in input_port:
dependencies.append(create_used(s_process,
found_input_ports[input_name],
account,
id_scope))
# need to have process that condenses artifacts from each iteration
if result_artifacts:
j_process = create_process_manual('Join', account, id_scope)
processes.append(j_process)
for loop_exec in item_exec.db_loop_execs:
for loop_iteration in loop_exec.db_loop_iterations:
loop_up_artifacts = {}
loop_down_artifacts = {}
for input_port in found_input_ports:
for input_name in input_port:
port_spec = DBPortSpec(id=-1,
name=input_name,
type='output')
s_artifact = \
create_artifact_from_port_spec(port_spec, account,
id_scope)
artifacts.append(s_artifact)
dependencies.append(create_was_generated_by(s_artifact,
s_process,
account,
id_scope))
if input_name not in loop_up_artifacts:
loop_up_artifacts[input_name] = []
loop_up_artifacts[input_name].append(s_artifact)
# process output_port
if loop_iteration.db_completed == 1:
for output_name in found_output_ports:
port_spec = DBPortSpec(id=-1,
name=output_name,
type='output')
o_artifact = \
create_artifact_from_port_spec(port_spec, account,
id_scope)
artifacts.append(o_artifact)
if output_name not in loop_down_artifacts:
loop_down_artifacts[output_name] = []
loop_down_artifacts[output_name].append(o_artifact)
if result_artifacts:
dependencies.append(create_used(j_process, o_artifact,
account, id_scope))
# now process a loop_exec
for child_exec in loop_iteration.db_item_execs:
do_create_process(workflow, child_exec, account,
module_processes)
for child_exec in loop_iteration.db_item_execs:
process_exec(child_exec, workflow, account, upstream_lookup,
downstream_lookup, depth+1, conn_artifacts,
function_artifacts, module_processes,
loop_up_artifacts, loop_down_artifacts, True)
# need to set Return artifacts and connect j_process to it
for result_artifact in result_artifacts:
dependencies.append(create_was_generated_by(result_artifact,
j_process,
account,
id_scope))
def process_group(module, found_input_ports, found_output_ports):
# identify depth and create new account if necessary
# recurse with new account
# need to link to upstream and downstream correctly
workflow = module.db_workflow
# run the whole upstream construction, etc, using this exec
# and the group's workflow
if depth+1 in depth_accounts:
account = depth_accounts[depth+1]
else:
account = create_account(depth+1, id_scope)
accounts.append(account)
depth_accounts[depth+1] = account
process_workflow(workflow, item_exec, account,
out_upstream_artifacts,
out_downstream_artifacts, depth+1)
def process_port_module(module, found_input_ports, found_output_ports):
port_name = found_input_ports['name'].db_parameters[0].db_val
if module.db_name == 'InputPort':
if port_name in in_upstream_artifacts:
for artifact in in_upstream_artifacts[port_name]:
dependencies.append(create_used(process, artifact,
account, id_scope))
elif module.db_name == 'OutputPort':
if port_name in in_downstream_artifacts:
for artifact in in_downstream_artifacts[port_name]:
dependencies.append(create_was_generated_by(artifact,
process,
account,
id_scope))
def process_if_module(module, found_input_ports, found_output_ports):
print 'processing IFFFF'
# need to decide which path was taken?
# check which module was executed, then know which branch was
# taken?
true_conn = found_input_ports['TruePort']
false_conn = found_input_ports['FalsePort']
true_id = true_conn.db_ports_type_index['source'].db_moduleId
false_id = false_conn.db_ports_type_index['source'].db_moduleId
print '$$ TRUE ID:', true_id
print '$$ FALSE ID:', false_id
for x,y in module_processes.iteritems():
print x, ':', y
if true_id in module_processes:
cond_process = module_processes[true_id][1]
elif false_id in module_processes:
cond_process = module_processes[false_id][1]
else:
raise RuntimeError("cannot process if")
# FIXME: assume true for now
# eventually need to check which module_id was execed for this
# current item exec
dependencies.append(create_was_triggered_by(cond_process,
process,
account,
id_scope))
if add_extras:
print '***adding extras'
out_upstream_artifacts = copy.copy(in_upstream_artifacts)
out_downstream_artifacts = copy.copy(in_downstream_artifacts)
for port_name, artifact_list in in_upstream_artifacts.iteritems():
for artifact in artifact_list:
dependencies.append(create_used(process, artifact,
account, id_scope))
for port_name, artifact_list in in_downstream_artifacts.iteritems():
for artifact in artifact_list:
# conn_artifacts[(port_name, 'output')] = artifact
dependencies.append(create_was_generated_by(artifact,
process,
account,
id_scope))
else:
out_upstream_artifacts = {}
out_downstream_artifacts = {}
ctrl_flow_pkg = 'org.vistrails.vistrails.control_flow'
basic_pkg = get_vistrails_basic_pkg_id()
all_special_ports = {'%s:Map' % ctrl_flow_pkg:
[{'InputPort': False,
'OutputPort': False,
'InputList': True,
'FunctionPort': False},
{'Result': True},
process_map],
'%s:Group' % basic_pkg:
[{},
{},
process_group],
'%s:InputPort' % basic_pkg:
[{'name': False,
'spec': False,
'old_name': False},
{},
process_port_module],
'%s:OutputPort' % basic_pkg:
[{'name': False,
'spec': False,
'old_name': False},
{},
process_port_module],
'%s:If' % ctrl_flow_pkg:
[{'TruePort': False,
'FalsePort': False},
{},
process_if_module],
}
module_desc_str = module.db_package + ':' + module.db_name
special_ports = all_special_ports.get(module_desc_str, [{}, {}, None])
found_input_ports = {}
found_output_ports = {}
# process used_files annotations
# process generated_tables annotations:
for annotation in item_exec.db_annotations:
def process_db_tuple(db_tuple):
db_tuple = (str(db_tuple[0]),) + db_tuple[1:]
if db_tuple not in db_artifacts:
artifact = create_artifact_from_db_tuple(db_tuple,
account,
id_scope)
artifacts.append(artifact)
db_artifacts[db_tuple] = artifact
else:
artifact = db_artifacts[db_tuple]
if int(artifact.db_accounts[0].db_id[4:]) > \
int(account.db_id[4:]):
artifact.db_accounts[0] = account
return artifact
if annotation.db_key == 'used_files':
used_files = literal_eval(annotation.db_value)
for fname in used_files:
if fname not in file_artifacts:
artifact = create_artifact_from_filename(fname,
account,
id_scope)
artifacts.append(artifact)
file_artifacts[fname] = artifact
else:
artifact = file_artifacts[fname]
if int(artifact.db_accounts[0].db_id[4:]) > \
int(account.db_id[4:]):
artifact.db_accounts[0] = account
dependencies.append(create_used(process, artifact,
account, id_scope))
elif annotation.db_key == 'generated_tables':
generated_tables = literal_eval(annotation.db_value)
for db_tuple in generated_tables:
artifact = process_db_tuple(db_tuple)
dependencies.append(create_was_generated_by(artifact,
process,
account,
id_scope))
elif annotation.db_key == 'used_tables':
used_tables = literal_eval(annotation.db_value)
for db_tuple in used_tables:
artifact = process_db_tuple(db_tuple)
dependencies.append(create_used(process, artifact,
account, id_scope))
# process functions
for function in module.db_functions:
# FIXME let found_input_ports, found_output_ports store lists?
if function.db_name in special_ports[0]:
if not special_ports[0][function.db_name]:
found_input_ports[function.db_name] = function
continue
function_t = (module.db_id, function.db_name)
if function_t in function_artifacts:
artifact = function_artifacts[function_t]
if int(artifact.db_accounts[0].db_id[4:]) > \
int(account.db_id[4:]):
artifact.db_accounts[0] = account
else:
artifact = create_artifact_from_function(function,
account,
id_scope)
print 'adding artifact', artifact.db_id
artifacts.append(artifact)
function_artifacts[function_t] = artifact
if function.db_name in special_ports[0]:
found_input_ports[function.db_name] = artifact
if function.db_name not in out_upstream_artifacts:
out_upstream_artifacts[function.db_name] = []
out_upstream_artifacts[function.db_name].append(artifact)
dependencies.append(create_used(process, artifact, account,
id_scope))
# process connections
if module.db_id in upstream_lookup:
for conns in upstream_lookup[module.db_id].itervalues():
for conn in conns:
dest = conn.db_ports_type_index['destination']
if dest.db_name in special_ports[0]:
if not special_ports[0][dest.db_name]:
found_input_ports[dest.db_name] = conn
continue
(artifact, in_cache) = process_connection(conn)
if dest.db_name in special_ports[0]:
found_input_ports[dest.db_name] = artifact
if dest.db_name not in out_upstream_artifacts:
out_upstream_artifacts[dest.db_name] = []
out_upstream_artifacts[dest.db_name].append(artifact)
print 'adding dependency (pa)', process.db_id, \
artifact.db_id
dependencies.append(create_used(process, artifact,
account, id_scope))
if item_exec.db_completed == 1:
if module.db_id in downstream_lookup:
# check if everything completed successfully for this?
for conns in downstream_lookup[module.db_id].itervalues():
for conn in conns:
source = conn.db_ports_type_index['source']
if source.db_name in special_ports[1]:
if not special_ports[1][source.db_name]:
found_output_ports[source.db_name] = conn
continue
dest = conn.db_ports_type_index['destination']
dest_module = \
workflow.db_modules_id_index[dest.db_moduleId]
dest_desc_str = dest_module.db_package + ':' + \
dest_module.db_name
dest_special_ports = all_special_ports.get(dest_desc_str,
[{}, {}, None])
if dest.db_name in dest_special_ports[0] and \
not dest_special_ports[0][dest.db_name]:
print 'skipping', dest.db_name
continue
(artifact, in_cache) = process_connection(conn)
if not in_cache:
if source.db_name in special_ports[1]:
found_output_ports[source.db_name] = artifact
if source.db_name not in out_downstream_artifacts:
out_downstream_artifacts[source.db_name] = []
out_downstream_artifacts[source.db_name].append(artifact)
print 'adding dependency (ap)', artifact.db_id, \
process.db_id
dependencies.append(create_was_generated_by(artifact,
process,
account,
id_scope))
if special_ports[2] is not None:
special_ports[2](module, found_input_ports, found_output_ports)
elif item_exec.db_loop_execs:
# A normal module that is looping internally
# Probably an automatic list loop
process_module_loop(module, in_upstream_artifacts, out_upstream_artifacts)
def process_workflow(workflow, parent_exec, account, upstream_artifacts={},
downstream_artifacts={}, depth=0, top_version=False):
# create process for each module_exec
# for each module, find parameters and upstream connections
# tie them in
# each connection's source port is
# associated with a transient data item
# use wasDerivedBy and used relationships to tie things together
# check run-time annotations?
print 'processing workflow', parent_exec
upstream_lookup = {}
downstream_lookup = {}
for connection in workflow.db_connections:
source = connection.db_ports_type_index['source']
if source.db_moduleId not in downstream_lookup:
downstream_lookup[source.db_moduleId] = {}
if source.db_name not in downstream_lookup[source.db_moduleId]:
downstream_lookup[source.db_moduleId][source.db_name] = []
downstream_lookup[source.db_moduleId][source.db_name].append(connection)
dest = connection.db_ports_type_index['destination']
if dest.db_moduleId not in upstream_lookup:
upstream_lookup[dest.db_moduleId] = {}
if dest.db_name not in upstream_lookup[dest.db_moduleId]:
upstream_lookup[dest.db_moduleId][dest.db_name] = []
upstream_lookup[dest.db_moduleId][dest.db_name].append(connection)
conn_artifacts = {}
function_artifacts = {}
module_processes = {}
print ' upstream_lookup:'
lookup = upstream_lookup
for id, name_list in lookup.iteritems():
print ' ', id, ':', name_list.keys()
print ' downstream_lookup:'
lookup = downstream_lookup
for id, name_list in lookup.iteritems():
print ' ', id, ':', name_list.keys()
# print ' upstream_lookup:', upstream_lookup
# print ' downstream_lookup:', downstream_lookup
if top_version:
for workflow_exec in parent_exec.db_workflow_execs:
if workflow_exec.db_parent_version != version:
continue
conn_artifacts = {}
function_artifacts = {}
module_processes = {}
upstream_artifacts = {}
downstream_artifacts = {}
for item_exec in workflow_exec.db_item_execs:
do_create_process(workflow, item_exec, account,
module_processes)
for item_exec in workflow_exec.db_item_execs:
process_exec(item_exec, workflow, account,
upstream_lookup, downstream_lookup,
depth, conn_artifacts, function_artifacts,
module_processes,
upstream_artifacts, downstream_artifacts)
else:
for item_exec in parent_exec.db_item_execs:
do_create_process(workflow, item_exec, account,
module_processes)
for item_exec in parent_exec.db_item_execs:
process_exec(item_exec, workflow, account, upstream_lookup,
downstream_lookup, depth, conn_artifacts,
function_artifacts, module_processes,
upstream_artifacts, downstream_artifacts)
account_id = id_scope.getNewId(DBOpmAccount.vtType)
account = DBOpmAccount(id='acct' + str(account_id),
value=str(0))
accounts.append(account)
depth_accounts[0] = account
process_workflow(workflow, log, account, {}, {}, 0, True)
#print processes
#print dependencies
max_depth = max(depth_accounts)
def add_finer_depths(objs, exclude_groups=False, exclude_deps=False,
p_ids=set()):
new_p_ids = []
for obj in objs:
can_update=True
if exclude_groups:
if obj.db_value.db_value.vtType == DBGroupExec.vtType:
new_p_ids.append(obj.db_id)
can_update = False
elif obj.db_value.db_value.vtType == DBModuleExec.vtType and \
len(obj.db_value.db_value.db_loop_execs) > 0:
new_p_ids.append(obj.db_id)
can_update = False
if exclude_deps:
if ((obj.vtType == DBOpmWasGeneratedBy.vtType and
obj.db_cause.db_id in p_ids) or
(obj.vtType == DBOpmUsed.vtType and
obj.db_effect.db_id in p_ids)):
can_update = False
if can_update:
min_depth = int(obj.db_accounts[0].db_id[4:])
for i in xrange(min_depth+1, max_depth+1):
obj.db_add_account(DBOpmAccountId(id='acct' + str(i)))
return new_p_ids
# FIXME: also exclude group dependencies (used, wasGeneratedBy)...
p_ids = add_finer_depths(processes, True)
print p_ids
add_finer_depths(artifacts)
add_finer_depths(dependencies, False, True, set(p_ids))
overlaps = []
for i in xrange(max_depth+1):
for j in xrange(i+1, max_depth+1):
ids = [DBOpmAccountId(id='acct' + str(i)),
DBOpmAccountId(id='acct' + str(j))]
overlaps.append(DBOpmOverlaps(opm_account_ids=ids))
opm_graph = DBOpmGraph(accounts=DBOpmAccounts(accounts=accounts,
opm_overlapss=overlaps),
processes=DBOpmProcesses(processs=processes),
artifacts=\
DBOpmArtifacts(artifacts=artifacts),
dependencies=\
DBOpmDependencies(dependencys=dependencies),
)
return opm_graph
0
Example 70
Project: frappe Source File: doctype.py
def validate_fields(meta):
"""Validate doctype fields. Checks
1. There are no illegal characters in fieldnames
2. If fieldnames are unique.
3. Fields that do have database columns are not mandatory.
4. `Link` and `Table` options are valid.
5. **Hidden** and **Mandatory** are not set simultaneously.
7. `Check` type field has default as 0 or 1.
8. `Dynamic Links` are correctly defined.
9. Precision is set in numeric fields and is between 1 & 6.
10. Fold is not at the end (if set).
11. `search_fields` are valid.
12. `title_field` and title field pattern are valid.
13. `unique` check is only valid for Data, Link and Read Only fieldtypes.
14. `unique` cannot be checked if there exist non-unique values.
:param meta: `frappe.model.meta.Meta` object to check."""
def check_illegal_characters(fieldname):
validate_column_name(fieldname)
def check_unique_fieldname(fieldname):
duplicates = filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields))
if len(duplicates) > 1:
frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates)))
def check_illegal_mandatory(d):
if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd:
frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype))
def check_link_table_options(d):
if d.fieldtype in ("Link", "Table"):
if not d.options:
frappe.throw(_("Options requried for Link or Table type field {0} in row {1}").format(d.label, d.idx))
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent:
options = frappe.db.get_value("DocType", d.options, "name")
if not options:
frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx))
else:
# fix case
d.options = options
def check_hidden_and_mandatory(d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and (d.fieldtype in not_allowed_in_list_view):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = filter(lambda df: df.fieldname==d.options, fields)
if not doctype_pointer or (doctype_pointer[0].fieldtype not in ("Link", "Select")) \
or (doctype_pointer[0].fieldtype=="Link" and doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_illegal_default(d):
if d.fieldtype == "Check" and d.default and d.default not in ('0', '1'):
frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'"))
if d.fieldtype == "Select" and d.default and (d.default not in d.options.split("\n")):
frappe.throw(_("Default for {0} must be an option").format(d.fieldname))
def check_precision(d):
if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6):
frappe.throw(_("Precision should be between 1 and 6"))
def check_unique_and_text(d):
if meta.issingle:
d.unique = 0
d.search_index = 0
if getattr(d, "unique", False):
if d.fieldtype not in ("Data", "Link", "Read Only"):
frappe.throw(_("Fieldtype {0} for {1} cannot be unique").format(d.fieldtype, d.label))
if not d.get("__islocal"):
try:
has_non_unique_values = frappe.db.sql("""select `{fieldname}`, count(*)
from `tab{doctype}` group by `{fieldname}` having count(*) > 1 limit 1""".format(
doctype=d.parent, fieldname=d.fieldname))
except MySQLdb.OperationalError, e:
if e.args and e.args[0]==1054:
# ignore if missing column, else raise
# this happens in case of Custom Field
pass
else:
raise
else:
# else of try block
if has_non_unique_values and has_non_unique_values[0][0]:
frappe.throw(_("Field '{0}' cannot be set as Unique as it has non-unique values").format(d.label))
if d.search_index and d.fieldtype in ("Text", "Long Text", "Small Text", "Code", "Text Editor"):
frappe.throw(_("Fieldtype {0} for {1} cannot be indexed").format(d.fieldtype, d.label))
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break":
frappe.throw(_("Fold must come before a Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
def check_search_fields(meta):
"""Throw exception if `search_fields` don't contain valid fields."""
if not meta.search_fields:
return
fieldname_list = [d.fieldname for d in fields]
for fieldname in (meta.search_fields or "").split(","):
fieldname = fieldname.strip()
if fieldname not in fieldname_list:
frappe.throw(_("Search field {0} is not valid").format(fieldname))
def check_title_field(meta):
"""Throw exception if `title_field` isn't a valid fieldname."""
if not meta.get("title_field"):
return
fieldname_list = [d.fieldname for d in fields]
if meta.title_field not in fieldname_list:
frappe.throw(_("Title field must be a valid fieldname"), InvalidFieldNameError)
def _validate_title_field_pattern(pattern):
if not pattern:
return
for fieldname in re.findall("{(.*?)}", pattern, re.UNICODE):
if fieldname.startswith("{"):
# edge case when double curlies are used for escape
continue
if fieldname not in fieldname_list:
frappe.throw(_("{{{0}}} is not a valid fieldname pattern. It should be {{field_name}}.").format(fieldname),
InvalidFieldNameError)
df = meta.get("fields", filters={"fieldname": meta.title_field})[0]
if df:
_validate_title_field_pattern(df.options)
_validate_title_field_pattern(df.default)
def check_image_field(meta):
'''check image_field exists and is of type "Attach Image"'''
if not meta.image_field:
return
df = meta.get("fields", {"fieldname": meta.image_field})
if not df:
frappe.throw(_("Image field must be a valid fieldname"), InvalidFieldNameError)
if df[0].fieldtype != 'Attach Image':
frappe.throw(_("Image field must be of type Attach Image"), InvalidFieldNameError)
def check_timeline_field(meta):
if not meta.timeline_field:
return
fieldname_list = [d.fieldname for d in fields]
if meta.timeline_field not in fieldname_list:
frappe.throw(_("Timeline field must be a valid fieldname"), InvalidFieldNameError)
df = meta.get("fields", {"fieldname": meta.timeline_field})[0]
if df.fieldtype not in ("Link", "Dynamic Link"):
frappe.throw(_("Timeline field must be a Link or Dynamic Link"), InvalidFieldNameError)
fields = meta.get("fields")
not_allowed_in_list_view = list(copy.copy(no_value_fields))
if meta.istable:
not_allowed_in_list_view.remove('Button')
for d in fields:
if not d.permlevel: d.permlevel = 0
if not d.fieldname:
frappe.throw(_("Fieldname is required in row {0}").format(d.idx))
d.fieldname = d.fieldname.lower()
check_illegal_characters(d.fieldname)
check_unique_fieldname(d.fieldname)
check_illegal_mandatory(d)
check_link_table_options(d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(d)
check_in_list_view(d)
check_illegal_default(d)
check_unique_and_text(d)
check_fold(fields)
check_search_fields(meta)
check_title_field(meta)
check_timeline_field(meta)
0
Example 71
Project: cgat Source File: Experiment.py
def Start(parser=None,
argv=sys.argv,
quiet=False,
no_parsing=False,
add_csv_options=False,
add_database_options=False,
add_pipe_options=True,
add_cluster_options=False,
add_output_options=False,
return_parser=False):
"""set up an experiment.
The :py:func:`Start` method will set up a file logger and add some
default and some optional options to the command line parser. It
will then parse the command line and set up input/output
redirection and start a timer for benchmarking purposes.
The default options added by this method are:
``-v/--verbose``
the :term:`loglevel`
``timeit``
turn on benchmarking information and save to file
``timeit-name``
name to use for timing information,
``timeit-header``
output header for timing information.
``seed``
the random seed. If given, the python random
number generator will be initialized with this
seed.
Optional options added are:
add_csv_options
``dialect``
csv_dialect. the default is ``excel-tab``, defaulting to
:term:`tsv` formatted files.
add_database_options
``-C/--connection``
psql connection string
``-U/--user``
psql user name
add_cluster_options
``--use-cluster``
use cluster
``--cluster-priority``
cluster priority to request
``--cluster-queue-manager``
cluster queue manager to use
``--cluster-queue``
cluster queue to use
``--cluster-num-jobs``
number of jobs to submit to the cluster at the same time
``--cluster-memory-resource``
name of the cluster memory resource (SGE specific option)
``--cluster-memory-default``
default amount of memory allocated per job.
``--cluster-options``
additional options to the cluster for each job.
add_output_options
``-P/--output-filename-pattern``
Pattern to use for output filenames.
Arguments
---------
param parser : :py:class:`E.OptionParser`
instance with command line options.
argv : list
command line options to parse. Defaults to
:py:data:`sys.argv`
quiet : bool
set :term:`loglevel` to 0 - no logging
no_parsing : bool
do not parse command line options
return_parser : bool
return the parser object, no parsing. Useful for inspecting
the command line options of a script without running it.
add_csv_options : bool
add common options for parsing :term:`tsv` separated files.
add_database_options : bool
add common options for connecting to various databases.
add_pipe_options : bool
add common options for redirecting input/output
add_cluster_options : bool
add common options for scripts submitting jobs to the cluster
add_output_options : bool
add commond options for working with multiple output files
Returns
-------
tuple
(:py:class:`E.OptionParser` object, list of positional
arguments)
"""
if not parser:
parser = OptionParser(
version="%prog version: $Id$")
global global_options, global_args, global_starting_time
# save default values given by user
user_defaults = copy.copy(parser.defaults)
global_starting_time = time.time()
group = OptionGroup(parser, "Script timing options")
group.add_option("--timeit", dest='timeit_file', type="string",
help="store timeing information in file [%default].")
group.add_option("--timeit-name", dest='timeit_name', type="string",
help="name in timing file for this class of jobs "
"[%default].")
group.add_option("--timeit-header", dest='timeit_header',
action="store_true",
help="add header for timing information [%default].")
parser.add_option_group(group)
group = OptionGroup(parser, "Common options")
group.add_option("--random-seed", dest='random_seed', type="int",
help="random seed to initialize number generator "
"with [%default].")
group.add_option("-v", "--verbose", dest="loglevel", type="int",
help="loglevel [%default]. The higher, the more output.")
group.add_option("-?", dest="short_help", action="callback",
callback=callbackShortHelp,
help="output short help (command line options only.")
parser.add_option_group(group)
if quiet:
parser.set_defaults(loglevel=0)
else:
parser.set_defaults(loglevel=1)
parser.set_defaults(
timeit_file=None,
timeit_name='all',
timeit_header=None,
random_seed=None,
)
if add_csv_options:
parser.add_option("--csv-dialect", dest="csv_dialect", type="string",
help="csv dialect to use [%default].")
parser.set_defaults(
csv_dialect="excel-tab",
csv_lineterminator="\n",
)
if add_cluster_options:
group = OptionGroup(parser, "cluster options")
group.add_option("--no-cluster", "--local", dest="without_cluster",
action="store_true",
help="do no use cluster - run locally [%default].")
group.add_option("--cluster-priority", dest="cluster_priority",
type="int",
help="set job priority on cluster [%default].")
group.add_option("--cluster-queue-manager", dest="cluster_queue_manager",
type="string",
help="set cluster queue manager [%default].")
group.add_option("--cluster-queue", dest="cluster_queue",
type="string",
help="set cluster queue [%default].")
group.add_option("--cluster-memory-resource", dest="cluster_memory_resource",
type="string",
help="set cluster memory resource [%default].")
group.add_option("--cluster-memory-default", dest="cluster_memory_default",
type="string",
help="set cluster memory default [%default].")
group.add_option("--cluster-num-jobs", dest="cluster_num_jobs",
type="int",
help="number of jobs to submit to the queue execute "
"in parallel [%default].")
group.add_option("--cluster-parallel",
dest="cluster_parallel_environment",
type="string",
help="name of the parallel environment to use "
"[%default].")
group.add_option("--cluster-options", dest="cluster_options",
type="string",
help="additional options for cluster jobs, passed "
"on to queuing system [%default].")
parser.set_defaults(without_cluster=False,
cluster_queue_manager=None,
cluster_queue=None,
cluster_memory_resource=None,
cluster_memory_default=None,
cluster_priority=None,
cluster_num_jobs=None,
cluster_parallel_environment=None,
cluster_options=None)
parser.add_option_group(group)
if add_output_options or add_pipe_options:
group = OptionGroup(parser, "Input/output options")
if add_output_options:
group.add_option(
"-P", "--output-filename-pattern",
dest="output_filename_pattern", type="string",
help="OUTPUT filename pattern for various methods "
"[%default].")
group.add_option("-F", "--force-output", dest="output_force",
action="store_true",
help="force over-writing of existing files.")
parser.set_defaults(output_filename_pattern="%s",
output_force=False)
if add_pipe_options:
group.add_option("-I", "--stdin", dest="stdin", type="string",
help="file to read stdin from [default = stdin].",
metavar="FILE")
group.add_option("-L", "--log", dest="stdlog", type="string",
help="file with logging information "
"[default = stdout].",
metavar="FILE")
group.add_option("-E", "--error", dest="stderr", type="string",
help="file with error information "
"[default = stderr].",
metavar="FILE")
group.add_option("-S", "--stdout", dest="stdout", type="string",
help="file where output is to go "
"[default = stdout].",
metavar="FILE")
parser.set_defaults(stderr=sys.stderr)
parser.set_defaults(stdout=sys.stdout)
parser.set_defaults(stdlog=sys.stdout)
parser.set_defaults(stdin=sys.stdin)
parser.add_option_group(group)
if add_database_options:
group = OptionGroup(parser, "Database connection options")
group.add_option(
"--database-backend", dest="database_backend", type="choice",
choices=("sqlite", "mysql", "postgres"),
help="database backend [%default].")
group.add_option(
"--database-host", dest="database_host", type="string",
help="database host [%default].")
group.add_option(
"--database-name", dest="database_name", type="string",
help="name of the database [%default].")
group.add_option(
"--database-username", dest="database_username", type="string",
help="database username [%default].")
group.add_option(
"--database-password", dest="database_password", type="string",
help="database password [%default].")
group.add_option(
"--database-port", dest="database_port", type="int",
help="database port [%default].")
parser.set_defaults(
database_backend="sqlite",
database_name="csvdb",
database_host="",
database_port=3306,
database_username="",
database_password="")
parser.add_option_group(group)
# restore user defaults
parser.defaults.update(user_defaults)
if return_parser:
return parser
if not no_parsing:
(global_options, global_args) = parser.parse_args(argv[1:])
if global_options.random_seed is not None:
random.seed(global_options.random_seed)
if add_pipe_options:
if global_options.stdout != sys.stdout:
global_options.stdout = openFile(global_options.stdout, "w")
if global_options.stderr != sys.stderr:
if global_options.stderr == "stderr":
global_options.stderr = global_options.stderr
else:
global_options.stderr = openFile(global_options.stderr, "w")
if global_options.stdlog != sys.stdout:
global_options.stdlog = openFile(global_options.stdlog, "a")
if global_options.stdin != sys.stdin:
global_options.stdin = openFile(global_options.stdin, "r")
else:
global_options.stderr = sys.stderr
global_options.stdout = sys.stdout
global_options.stdlog = sys.stdout
global_options.stdin = sys.stdin
if global_options.loglevel >= 1:
global_options.stdlog.write(getHeader() + "\n")
global_options.stdlog.write(getParams(global_options) + "\n")
global_options.stdlog.flush()
# configure logging
# map from 0-10 to logging scale
# 0: quiet
# 1: little verbositiy
# >1: increased verbosity
if global_options.loglevel == 0:
lvl = logging.ERROR
elif global_options.loglevel == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
if global_options.stdout == global_options.stdlog:
format = '# %(asctime)s %(levelname)s %(message)s'
else:
format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(
level=lvl,
format=format,
stream=global_options.stdlog)
# set up multi-line logging
# Note that .handlers is not part of the API, might change
# Solution is to configure handlers explicitely.
for handler in logging.getLogger().handlers:
handler.setFormatter(MultiLineFormatter(format))
return global_options, global_args
0
Example 72
Project: UMI-tools Source File: Utilities.py
def Start(parser=None,
argv=sys.argv,
quiet=False,
add_pipe_options=True,
return_parser=False):
"""set up an experiment.
The :py:func:`Start` method will set up a file logger and add some
default and some optional options to the command line parser. It
will then parse the command line and set up input/output
redirection and start a timer for benchmarking purposes.
The default options added by this method are:
``-v/--verbose``
the :term:`loglevel`
``timeit``
turn on benchmarking information and save to file
``timeit-name``
name to use for timing information,
``timeit-header``
output header for timing information.
``seed``
the random seed. If given, the python random
number generator will be initialized with this
seed.
Optional options added are:
Arguments
---------
param parser : :py:class:`U.OptionParser`
instance with command line options.
argv : list
command line options to parse. Defaults to
:py:data:`sys.argv`
quiet : bool
set :term:`loglevel` to 0 - no logging
return_parser : bool
return the parser object, no parsing. Useful for inspecting
the command line options of a script without running it.
add_pipe_options : bool
add common options for redirecting input/output
Returns
-------
tuple
(:py:class:`U.OptionParser` object, list of positional
arguments)
"""
if not parser:
parser = OptionParser(
version="%prog version: $Id$")
global global_options, global_args, global_starting_time
# save default values given by user
user_defaults = copy.copy(parser.defaults)
global_starting_time = time.time()
group = OptionGroup(parser, "Script timing options")
group.add_option("--timeit", dest='timeit_file', type="string",
help="store timeing information in file [%default].")
group.add_option("--timeit-name", dest='timeit_name', type="string",
help="name in timing file for this class of jobs "
"[%default].")
group.add_option("--timeit-header", dest='timeit_header',
action="store_true",
help="add header for timing information [%default].")
parser.add_option_group(group)
group = OptionGroup(parser, "Common options")
group.add_option("--random-seed", dest='random_seed', type="int",
help="random seed to initialize number generator "
"with [%default].")
group.add_option("-v", "--verbose", dest="loglevel", type="int",
help="loglevel [%default]. The higher, the more output.")
group.add_option("-?", dest="short_help", action="callback",
callback=callbackShortHelp,
help="output short help (command line options only.")
parser.add_option_group(group)
if quiet:
parser.set_defaults(loglevel=0)
else:
parser.set_defaults(loglevel=1)
parser.set_defaults(
timeit_file=None,
timeit_name='all',
timeit_header=None,
random_seed=None,
)
if add_pipe_options:
group = OptionGroup(parser, "Input/output options")
group.add_option("-I", "--stdin", dest="stdin", type="string",
help="file to read stdin from [default = stdin].",
metavar="FILE")
group.add_option("-L", "--log", dest="stdlog", type="string",
help="file with logging information "
"[default = stdout].",
metavar="FILE")
group.add_option("-E", "--error", dest="stderr", type="string",
help="file with error information "
"[default = stderr].",
metavar="FILE")
group.add_option("-S", "--stdout", dest="stdout", type="string",
help="file where output is to go "
"[default = stdout].",
metavar="FILE")
group.add_option("--log2stderr", dest="log2stderr",
action="store_true", help="send logging information"
" to stderr [default = False].")
parser.set_defaults(stderr=sys.stderr)
parser.set_defaults(stdout=sys.stdout)
parser.set_defaults(stdlog=sys.stdout)
parser.set_defaults(stdin=sys.stdin)
parser.set_defaults(log2stderr=False)
parser.add_option_group(group)
# restore user defaults
parser.defaults.update(user_defaults)
if return_parser:
return parser
global_options, global_args = parser.parse_args(argv[1:])
if global_options.random_seed is not None:
random.seed(global_options.random_seed)
if add_pipe_options:
if global_options.stdout != sys.stdout:
global_options.stdout = openFile(global_options.stdout, "w")
if global_options.stderr != sys.stderr:
if global_options.stderr == "stderr":
global_options.stderr = global_options.stderr
else:
global_options.stderr = openFile(global_options.stderr, "w")
if global_options.stdlog != sys.stdout:
global_options.stdlog = openFile(global_options.stdlog, "a")
elif global_options.log2stderr:
global_options.stdlog = global_options.stderr
if global_options.stdin != sys.stdin:
global_options.stdin = openFile(global_options.stdin, "r")
else:
global_options.stderr = sys.stderr
global_options.stdout = sys.stdout
global_options.stdin = sys.stdin
if global_options.log2stderr:
global_options.stdlog = sys.stderr
else:
global_options.stdlog = sys.stdout
if global_options.loglevel >= 1:
global_options.stdlog.write(getHeader() + "\n")
global_options.stdlog.write(getParams(global_options) + "\n")
global_options.stdlog.flush()
# configure logging
# map from 0-10 to logging scale
# 0: quiet
# 1: little verbositiy
# >1: increased verbosity
if global_options.loglevel == 0:
lvl = logging.ERROR
elif global_options.loglevel == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
if global_options.stdout == global_options.stdlog:
format = '# %(asctime)s %(levelname)s %(message)s'
else:
format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(
level=lvl,
format=format,
stream=global_options.stdlog)
# set up multi-line logging
# Note that .handlers is not part of the API, might change
# Solution is to configure handlers explicitely.
for handler in logging.getLogger().handlers:
handler.setFormatter(MultiLineFormatter(format))
return global_options, global_args
0
Example 73
Project: VisTrails Source File: spreadsheet_execute.py
def assignPipelineCellLocations(pipeline, sheetName,
row, col, cellIds=None,
minRowCount=None, minColCount=None):
reg = get_module_registry()
spreadsheet_cell_desc = reg.get_descriptor_by_name(spreadsheet_pkg,
'SpreadsheetCell')
output_module_desc = reg.get_descriptor_by_name(
'org.vistrails.vistrails.basic', 'OutputModule')
create_module = VistrailController.create_module_static
create_function = VistrailController.create_function_static
create_connection = VistrailController.create_connection_static
pipeline = copy.copy(pipeline)
root_pipeline = pipeline
if cellIds is None:
inspector = PipelineInspector()
inspector.inspect_spreadsheet_cells(pipeline)
inspector.inspect_ambiguous_modules(pipeline)
cellIds = inspector.spreadsheet_cells
def fix_cell_module(pipeline, mId):
# Delete connections to 'Location' input port
conns_to_delete = []
for c in pipeline.connection_list:
if c.destinationId == mId and c.destination.name == 'Location':
conns_to_delete.append(c.id)
for c_id in conns_to_delete:
pipeline.delete_connection(c_id)
# a hack to first get the id_scope to the local pipeline scope
# then make them negative by hacking the getNewId method
# all of this is reset at the end of this block
id_scope = pipeline.tmp_id
orig_getNewId = pipeline.tmp_id.__class__.getNewId
def getNewId(self, objType):
return -orig_getNewId(self, objType)
pipeline.tmp_id.__class__.getNewId = getNewId
# Add a sheet reference with a specific name
sheetReference = create_module(id_scope, spreadsheet_pkg,
"SheetReference")
sheetNameFunction = create_function(id_scope, sheetReference,
"SheetName", [str(sheetName)])
# ["%s %d" % (sheetPrefix, sheet)])
sheetReference.add_function(sheetNameFunction)
if minRowCount is not None:
minRowFunction = create_function(id_scope, sheetReference,
"MinRowCount", [str(minRowCount)])
# [str(rowCount*vRCount)])
sheetReference.add_function(minRowFunction)
if minColCount is not None:
minColFunction = create_function(id_scope, sheetReference,
"MinColumnCount",
[str(minColCount)])
# [str(colCount*vCCount)])
sheetReference.add_function(minColFunction)
# Add a cell location module with a specific row and column
cellLocation = create_module(id_scope, spreadsheet_pkg,
"CellLocation")
rowFunction = create_function(id_scope, cellLocation, "Row", [str(row)])
# [str(row*vRCount+vRow+1)])
colFunction = create_function(id_scope, cellLocation, "Column",
[str(col)])
# [str(col*vCCount+vCol+1)])
cellLocation.add_function(rowFunction)
cellLocation.add_function(colFunction)
# Then connect the SheetReference to the CellLocation
sheet_conn = create_connection(id_scope, sheetReference, "value",
cellLocation, "SheetReference")
# Then connect the CellLocation to the spreadsheet cell
cell_module = pipeline.get_module_by_id(mId)
cell_conn = create_connection(id_scope, cellLocation, "value",
cell_module, "Location")
pipeline.add_module(sheetReference)
pipeline.add_module(cellLocation)
pipeline.add_connection(sheet_conn)
pipeline.add_connection(cell_conn)
# replace the getNewId method
pipeline.tmp_id.__class__.getNewId = orig_getNewId
def fix_output_module(pipeline, mId):
# Remove all connections to 'configuration' input port
conns_to_delete = []
for c in pipeline.connection_list:
if (c.destinationId == mId and
c.destination.name == 'configuration'):
conns_to_delete.append(c.id)
for c_id in conns_to_delete:
pipeline.delete_connection(c_id)
m = pipeline.modules[mId]
# Remove all functions on 'configuration' input port
funcs_to_delete = []
for f in m.functions:
if f.name == 'configuration':
funcs_to_delete.append(f.real_id)
for f_id in funcs_to_delete:
m.delete_function_by_real_id(f_id)
# a hack to first get the id_scope to the local pipeline scope
# then make them negative by hacking the getNewId method
# all of this is reset at the end of this block
id_scope = pipeline.tmp_id
orig_getNewId = pipeline.tmp_id.__class__.getNewId
def getNewId(self, objType):
return -orig_getNewId(self, objType)
pipeline.tmp_id.__class__.getNewId = getNewId
config = {'row': row - 1, 'col': col - 1}
if minRowCount is not None:
config['sheetRowCount'] = minRowCount
if minColCount is not None:
config['sheetColCount'] = minColCount
if sheetName is not None:
config['sheetName']= sheetName
config = {'spreadsheet': config}
config_function = create_function(id_scope, m,
'configuration', [repr(config)])
m.add_function(config_function)
# replace the getNewId method
pipeline.tmp_id.__class__.getNewId = orig_getNewId
for id_list in cellIds:
cell_pipeline = pipeline
# find at which depth we need to be working
if isinstance(id_list, (int, long)):
mId = id_list
m = cell_pipeline.modules[mId]
else:
id_iter = iter(id_list)
mId = next(id_iter)
m = cell_pipeline.modules[mId]
for mId in id_iter:
cell_pipeline = m.pipeline
m = cell_pipeline.modules[mId]
if reg.is_descriptor_subclass(m.module_descriptor,
spreadsheet_cell_desc):
fix_cell_module(cell_pipeline, mId)
elif reg.is_descriptor_subclass(m.module_descriptor,
output_module_desc):
fix_output_module(cell_pipeline, mId)
return root_pipeline
0
Example 74
Project: nzbToMedia Source File: autoProcessTV.py
def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None):
cfg = dict(core.CFG[section][inputCategory])
host = cfg["host"]
port = cfg["port"]
ssl = int(cfg.get("ssl", 0))
web_root = cfg.get("web_root", "")
protocol = "https://" if ssl else "http://"
if not server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)):
logger.error("Server did not respond. Exiting", section)
return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
# auto-detect correct fork
fork, fork_params = autoFork(section, inputCategory)
username = cfg.get("username", "")
password = cfg.get("password", "")
apikey = cfg.get("apikey", "")
delete_failed = int(cfg.get("delete_failed", 0))
nzbExtractionBy = cfg.get("nzbExtractionBy", "Downloader")
process_method = cfg.get("process_method")
remote_path = int(cfg.get("remote_path", 0))
wait_for = int(cfg.get("wait_for", 2))
force = int(cfg.get("force", 0))
delete_on = int(cfg.get("delete_on", 0))
ignore_subs = int(cfg.get("ignore_subs", 0))
extract = int(cfg.get("extract", 0))
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0]
SpecificPath = os.path.join(dirName, str(inputName))
cleanName = os.path.splitext(SpecificPath)
if cleanName[1] == ".nzb":
SpecificPath = cleanName[0]
if os.path.isdir(SpecificPath):
dirName = SpecificPath
# Attempt to create the directory if it doesn't exist and ignore any
# error stating that it already exists. This fixes a bug where SickRage
# won't process the directory because it doesn't exist.
try:
os.makedirs(dirName) # Attempt to create the directory
except OSError as e:
# Re-raise the error if it wasn't about the directory not existing
if e.errno != errno.EEXIST:
raise
if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"):
if inputName:
process_all_exceptions(inputName, dirName)
inputName, dirName = convert_to_ascii(inputName, dirName)
# Now check if tv files exist in destination.
if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
if listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: {0}'.format(dirName))
core.extractFiles(dirName)
inputName, dirName = convert_to_ascii(inputName, dirName)
if listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed.
flatten(dirName)
# Check video files for corruption
status = int(failed)
good_files = 0
num_files = 0
for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
num_files += 1
if transcoder.isVideoGood(video, status):
good_files += 1
import_subs(video)
if num_files > 0:
if good_files == num_files and not status == 0:
logger.info('Found Valid Videos. Setting status Success')
status = 0
failed = 0
if good_files < num_files and status == 0:
logger.info('Found corrupt videos. Setting status Failed')
status = 1
failed = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD')
if failureLink:
failureLink += '&corrupt=true'
elif clientAgent == "manual":
logger.warning("No media files found in directory {0} to manually process.".format(dirName), section)
return [0, ""] # Success (as far as this script is concerned)
elif nzbExtractionBy == "Destination":
logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.")
if int(failed) == 0:
logger.info("Setting Status Success.")
status = 0
failed = 0
else:
logger.info("Downloader reported an error during download or verification. Processing this as a failed download.")
status = 1
failed = 1
else:
logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section)
status = 1
failed = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD')
if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads
result, newDirName = transcoder.Transcode_directory(dirName)
if result == 0:
logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section)
dirName = newDirName
chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8)
logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section)
if chmod_directory:
logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section)
core.rchmod(dirName, chmod_directory)
else:
logger.error("FAILED: Transcoding failed for files in {0}".format(dirName), section)
return [1, "{0}: Failed to post-process - Transcoding failed".format(section)]
# configure SB params to pass
fork_params['quiet'] = 1
fork_params['proc_type'] = 'manual'
if inputName is not None:
fork_params['nzbName'] = inputName
for param in copy.copy(fork_params):
if param == "failed":
fork_params[param] = failed
if param in ["dirName", "dir", "proc_dir"]:
fork_params[param] = dirName
if remote_path:
fork_params[param] = remoteDir(dirName)
if param == "process_method":
if process_method:
fork_params[param] = process_method
else:
del fork_params[param]
if param == "force":
if force:
fork_params[param] = force
else:
del fork_params[param]
if param == "delete_on":
if delete_on:
fork_params[param] = delete_on
else:
del fork_params[param]
if param == "ignore_subs":
if ignore_subs:
fork_params[param] = ignore_subs
else:
del fork_params[param]
# delete any unused params so we don't pass them to SB by mistake
[fork_params.pop(k) for k, v in fork_params.items() if v is None]
if status == 0:
logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section)
else:
core.FAILED = True
if failureLink:
reportNzb(failureLink, clientAgent)
if 'failed' in fork_params:
logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section)
elif section == "NzbDrone":
logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(fork), section)
return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader.
else:
logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section)
if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName)
return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader.
url = None
if section == "SickBeard":
url = "{0}{1}:{2}{3}/home/postprocess/processEpisode".format(protocol, host, port, web_root)
elif section == "NzbDrone":
url = "{0}{1}:{2}{3}/api/command".format(protocol, host, port, web_root)
url2 = "{0}{1}:{2}{3}/api/config/downloadClient".format(protocol, host, port, web_root)
headers = {"X-Api-Key": apikey}
# params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'}
if remote_path:
logger.debug("remote_path: {0}".format(remoteDir(dirName)), section)
data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id}
else:
logger.debug("path: {0}".format(dirName), section)
data = {"name": "DownloadedEpisodesScan", "path": dirName, "downloadClientId": download_id}
if not download_id:
data.pop("downloadClientId")
data = json.dumps(data)
try:
if section == "SickBeard":
logger.debug("Opening URL: {0} with params: {1}".format(url, fork_params), section)
s = requests.Session()
login = "{0}{1}:{2}{3}/login".format(protocol, host, port, web_root)
login_params = {'username': username, 'password': password}
s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60))
r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800))
elif section == "NzbDrone":
logger.debug("Opening URL: {0} with data: {1}".format(url, data), section)
r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error("Unable to open URL: {0}".format(url), section)
return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
Success = False
Started = False
if section == "SickBeard":
for line in r.iter_lines():
if line:
logger.postprocess("{0}".format(line), section)
if "Moving file from" in line:
inputName = os.path.split(line)[1]
if "Processing succeeded" in line or "Successfully processed" in line:
Success = True
elif section == "NzbDrone":
try:
res = json.loads(r.content)
scan_id = int(res['id'])
logger.debug("Scan started with id: {0}".format(scan_id), section)
Started = True
except Exception as e:
logger.warning("No scan id was returned due to: {0}".format(e), section)
scan_id = None
Started = False
if status != 0 and delete_failed and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName)
if Success:
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif section == "NzbDrone" and Started:
n = 0
params = {}
url = "{0}/{1}".format(url, scan_id)
while n < 6: # set up wait_for minutes to see if command completes..
time.sleep(10 * wait_for)
command_status = self.command_complete(url, params, headers, section)
if command_status and command_status in ['completed', 'failed']:
break
n += 1
if command_status:
logger.debug("The Scan command return status: {0}".format(command_status), section)
if not os.path.exists(dirName):
logger.debug("The directory {0} has been removed. Renaming was successful.".format(dirName), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif command_status and command_status in ['completed']:
logger.debug("The Scan command has completed successfully. Renaming was successful.", section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif command_status and command_status in ['failed']:
logger.debug("The Scan command has failed. Renaming was not successful.", section)
# return [1, "%s: Failed to post-process %s" % (section, inputName) ]
if self.CDH(url2, headers, section=section):
logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section)
return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)]
else:
logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section)
return [1, "{0}: Failed to post-process {1}".format(section, inputName)]
else:
return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] # We did not receive Success confirmation.
0
Example 75
Project: Tickeys-linux Source File: markup.py
def shorten_post(self, lines, w, h, margin=2):
''' Shortens the text to a single line according to the label options.
This function operates on a text that has already been laid out because
for markup, parts of text can have different size and options.
If :attr:`text_size` [0] is None, the lines are returned unchanged.
Otherwise, the lines are converted to a single line fitting within the
constrained width, :attr:`text_size` [0].
:params:
`lines`: list of `LayoutLine` instances describing the text.
`w`: int, the width of the text in lines, including padding.
`h`: int, the height of the text in lines, including padding.
`margin` int, the additional space left on the sides. This is in
addition to :attr:`padding_x`.
:returns:
3-tuple of (xw, h, lines), where w, and h is similar to the input
and contains the resulting width / height of the text, including
padding. lines, is a list containing a single `LayoutLine`, which
contains the words for the line.
'''
def n(line, c):
''' A function similar to text.find, except it's an iterator that
returns successive occurrences of string c in list line. line is
not a string, but a list of LayoutWord instances that we walk
from left to right returning the indices of c in the words as we
encounter them. Note that the options can be different among the
words.
:returns:
3-tuple: the index of the word in line, the index of the
occurrence in word, and the extents (width) of the combined
words until this occurrence, not including the occurrence char.
If no more are found it returns (-1, -1, total_w) where total_w
is the full width of all the words.
'''
total_w = 0
for w in range(len(line)):
word = line[w]
if not word.lw:
continue
f = partial(word.text.find, c)
i = f()
while i != -1:
self.options = word.options
yield w, i, total_w + self.get_extents(word.text[:i])[0]
i = f(i + 1)
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def p(line, c):
''' Similar to the `n` function, except it returns occurrences of c
from right to left in the list, line, similar to rfind.
'''
total_w = 0
offset = 0 if len(c) else 1
for w in range(len(line) - 1, -1, -1):
word = line[w]
if not word.lw:
continue
f = partial(word.text.rfind, c)
i = f()
while i != -1:
self.options = word.options
yield (w, i, total_w +
self.get_extents(word.text[i + 1:])[0])
if i:
i = f(0, i - offset)
else:
if not c:
self.options = word.options
yield (w, -1, total_w +
self.get_extents(word.text)[0])
break
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def n_restricted(line, uw, c):
''' Similar to the function `n`, except it only returns the first
occurrence and it's not an iterator. Furthermore, if the first
occurrence doesn't fit within width uw, it returns the index of
whatever amount of text will still fit in uw.
:returns:
similar to the function `n`, except it's a 4-tuple, with the
last element a boolean, indicating if we had to clip the text
to fit in uw (True) or if the whole text until the first
occurrence fitted in uw (False).
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line)):
word = line[w]
f = partial(word.text.find, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[:i])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
i = len(word.text)
# now just find whatever amount of the word does fit
e = 0
while e != i and total_w + extents(word.text[:e])[0] <= uw:
e += 1
e = max(0, e - 1)
return w, e, total_w + extents(word.text[:e])[0], True
return -1, -1, total_w, False
def p_restricted(line, uw, c):
''' Similar to `n_restricted`, except it returns the first
occurrence starting from the right, like `p`.
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line) - 1, -1, -1):
word = line[w]
f = partial(word.text.rfind, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[i + 1:])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
# now just find whatever amount of the word does fit
s = len(word.text) - 1
while s >= 0 and total_w + extents(word.text[s:])[0] <= uw:
s -= 1
return w, s, total_w + extents(word.text[s + 1:])[0], True
return -1, -1, total_w, False
textwidth = self.get_cached_extents()
uw = self.text_size[0]
if uw is None:
return w, h, lines
old_opts = copy(self.options)
uw = max(0, int(uw - old_opts['padding_x'] * 2 - margin))
chr = type(self.text)
ssize = textwidth(' ')
c = old_opts['split_str']
line_height = old_opts['line_height']
xpad, ypad = old_opts['padding_x'], old_opts['padding_y']
dir = old_opts['shorten_from'][0]
# flatten lines into single line
line = []
last_w = 0
for l in range(len(lines)):
# concatenate (non-empty) inside lines with a space
this_line = lines[l]
if last_w and this_line.w and not this_line.line_wrap:
line.append(LayoutWord(old_opts, ssize[0], ssize[1], chr(' ')))
last_w = this_line.w or last_w
for word in this_line.words:
if word.lw:
line.append(word)
# if that fits, just return the flattened line
lw = sum([word.lw for word in line])
if lw <= uw:
lh = max([word.lh for word in line] + [0]) * line_height
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line)]
# find the size of ellipsis that'll fit
elps_s = textwidth('...')
if elps_s[0] > uw: # even ellipsis didn't fit...
s = textwidth('..')
if s[0] <= uw:
return (s[0] + 2 * xpad, s[1] * line_height + 2 * ypad,
[LayoutLine(0, 0, s[0], s[1], 1, 0, [LayoutWord(old_opts,
s[0], s[1], '..')])])
else:
s = textwidth('.')
return (s[0] + 2 * xpad, s[1] * line_height + 2 * ypad,
[LayoutLine(0, 0, s[0], s[1], 1, 0, [LayoutWord(old_opts,
s[0], s[1], '.')])])
elps = LayoutWord(old_opts, elps_s[0], elps_s[1], '...')
uw -= elps_s[0]
# now find the first left and right words that fit
w1, e1, l1, clipped1 = n_restricted(line, uw, c)
w2, s2, l2, clipped2 = p_restricted(line, uw, c)
if dir != 'l': # center or right
line1 = None
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take first
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
return res
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
line1.append(LayoutWord(last_word.options, s[0], s[1],
last_text))
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if line1:
line1.append(elps)
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
if dir == 'r':
f = n(line, c) # iterator
assert next(f)[:-1] == (w1, e1) # first word should match
ww1, ee1, l1 = next(f)
while l2 + l1 <= uw:
w1, e1 = ww1, ee1
ww1, ee1, l1 = next(f)
if (w1, e1) == (w2, s2):
break
else: # center
f = n(line, c) # iterator
f_inv = p(line, c) # iterator
assert next(f)[:-1] == (w1, e1)
assert next(f_inv)[:-1] == (w2, s2)
while True:
if l1 <= l2:
ww1, ee1, l1 = next(f) # hypothesize that next fit
if l2 + l1 > uw:
break
w1, e1 = ww1, ee1
if (w1, e1) == (w2, s2):
break
else:
ww2, ss2, l2 = next(f_inv)
if l2 + l1 > uw:
break
w2, s2 = ww2, ss2
if (w1, e1) == (w2, s2):
break
else: # left
line1 = [elps]
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take last
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
return res
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if len(line1) != 1:
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
f_inv = p(line, c) # iterator
assert next(f_inv)[:-1] == (w2, s2) # last word should match
ww2, ss2, l2 = next(f_inv)
while l2 + l1 <= uw:
w2, s2 = ww2, ss2
ww2, ss2, l2 = next(f_inv)
if (w1, e1) == (w2, s2):
break
# now add back the left half
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
if len(last_text):
line1.append(LayoutWord(last_word.options, s[0], s[1], last_text))
line1.append(elps)
# now add back the right half
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
if len(first_text):
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
0
Example 76
Project: htm-challenge Source File: NeuroskyConnector.py
def processDataRow(self, \
extended_code_level, \
code, \
length, \
data_values, \
timestamp):
'''CODE Definitions Table
Single-Byte CODEs
Extended (Byte)
Code Level [CODE] [LENGTH] Data Value Meaning
---------- ------ -------- ------------------
0 0x02 - POOR_SIGNAL Quality (0-255)
0 0x04 - ATTENTION eSense (0 to 100)
0 0x05 - MEDITATION eSense (0 to 100)
0 0x16 - Blink Strength. (0-255) Sent only
when Blink event occurs.
Multi-Byte CODEs
Extended (Byte)
Code Level [CODE] [LENGTH] Data Value Meaning
---------- ------ -------- ------------------
0 0x80 2 RAW Wave Value: a single big-endian
16-bit two's-compliment signed value
(high-order byte followed by
low-order byte) (-32768 to 32767)
0 0x83 24 ASIC_EEG_POWER: eight big-endian
3-byte unsigned integer values
representing delta, theta, low-alpha
high-alpha, low-beta, high-beta,
low-gamma, and mid-gamma EEG band
power values
Any 0x55 - NEVER USED (reserved for [EXCODE])
Any 0xAA - NEVER USED (reserved for [SYNC])
MindWave CODEs
Extended (Byte)
Code Level [CODE] [LENGTH] Data Value Meaning
---------- ------ -------- ------------------
0 0xD0 3 Headset Connect Success
0 0xD1 2 Headset Not Found
0 0xD2 3 Headset Disconnected
0 0xD3 0 Request Denied
0 0xD4 1 Standby/Scan Mode'''
packet_update = {}
packet_update['timestamp'] = timestamp
#self.packet_count += 1
#self.parent.plugin_session.packet_count += 1
self.parent.incrementPacketCount()
if extended_code_level == 0:
if code == '02':
poor_signal_quality = int(data_values, 16)
if self.DEBUG > 1:
print # Empty line at the beginning of most packets
print "poorSignalLevel:",
print poor_signal_quality
self.current_signal = copy.copy(poor_signal_quality)
packet_update['poorSignalLevel'] = poor_signal_quality
elif code == '04':
attention = int(data_values, 16)
if self.DEBUG > 1:
print "attention:",
print attention
self.current_attention = copy.copy(attention)
#if (attention > self.detection_threshold):
#self.current_detection = 1
#else:
#self.current_detection = 0
packet_update['eSense'] = {}
packet_update['eSense']['attention'] = attention
elif code == '05':
meditation = int(data_values, 16)
if self.DEBUG > 1:
print "meditation:",
print meditation
packet_update['eSense'] = {}
packet_update['eSense']['meditation'] = meditation
elif code == '16':
blink_strength = int(data_values, 16)
if self.DEBUG > 1:
print "blinkStrength:",
print blink_strength
packet_update['blinkStrength'] = blink_strength
elif code == '80':
#self.packet_count -= 1 # We don't count raw EEG packets for Interface
raw_wave_value = data_values
if self.DEBUG > 3:
print "Raw EEG:",
print raw_wave_value
raw_eeg_value = self.processRawEEGValue(data_values)
if self.DEBUG > 2:
print "Raw EEG Value:",
print raw_eeg_value
packet_update['rawEeg'] = raw_eeg_value
elif code == '83':
asic_eeg_power = data_values
if self.DEBUG > 2:
print "ASIC_EEG_POWER:",
print asic_eeg_power
eegPower = self.processAsicEegPower(data_values)
if self.DEBUG > 1:
for key in EEG_POWER_BAND_ORDER:
print "%s: %i" % (key, eegPower[key])
packet_update['eegPower'] = {}
for key in eegPower.keys():
packet_update['eegPower'][key] = eegPower[key]
elif code == 'd0':
if self.DEBUG:
print "INFO: ThinkGear Headset Connect Success"
#self.session_start_timestamp = time.time()
self.parent.resetSessionStartTime()
#print "okay"
#self.packet_count = 0
#self.parent.plugin_session.packet_count = 0
self.parent.setPacketCount(0)
#self.bad_packets = 0
#self.parent.plugin_session.bad_packets = 0
self.parent.setBadPackets(0)
elif code == 'd1':
#current_time = time.time()
current_time = int(time.time() * 1000000)
if current_time - self.auto_connect_timestamp > \
THINKGEAR_DEVICE_AUTOCONNECT_INTERVAL:
if self.DEBUG:
print "INFO: ThinkGear device not found. Writing auto-connect packet."
self.auto_connect_timestamp = current_time
self.device.device.write('\xc2')
#self.device.device.write('\xc0\xe4\x68')
elif code == 'd2':
#current_time = time.time()
current_time = int(time.time() * 1000000)
if current_time - self.auto_connect_timestamp > \
THINKGEAR_DEVICE_AUTOCONNECT_INTERVAL:
if self.DEBUG:
print "INFO: ThinkGear device disconnected. Writing auto-connect packet."
self.auto_connect_timestamp = current_time
self.device.device.write('\xc2')
#self.device.device.write('\xc0\xe4\x68')
elif code == 'd3':
#current_time = time.time()
current_time = int(time.time() * 1000000)
if current_time - self.auto_connect_timestamp > \
THINKGEAR_DEVICE_AUTOCONNECT_INTERVAL:
if self.DEBUG:
print "INFO: ThinkGear device request denied. Writing auto-connect packet."
self.auto_connect_timestamp = current_time
self.device.device.write('\xc2')
#self.device.device.write('\xc0\xe4\x68')
elif code == 'd4':
#current_time = time.time()
current_time = int(time.time() * 1000000)
if current_time - self.auto_connect_timestamp > \
THINKGEAR_DEVICE_AUTOCONNECT_INTERVAL:
if self.DEBUG:
print "INFO: ThinkGear device in standby/scan mode. Writing auto-connect packet."
self.auto_connect_timestamp = current_time
self.device.device.write('\xc2')
#self.device.device.write('\xc0\xe4\x68')
else:
#self.bad_packets += 1
#self.parent.plugin_session.bad_packets += 1
self.parent.incrementBadPackets()
if self.DEBUG:
print "ERROR: data payload row code not matched:",
print code
return(packet_update)
0
Example 77
def readInstanceFile(instancesFilePath):
f = open(instancesFilePath, "rt")
data = f.read()
f.close()
lines = data.splitlines()
i = 0
parseError = 0
keyDict = copy.copy(kFixedFieldKeys)
numKeys = kNumFixedFields
numLines = len(lines)
instancesList = []
for i in range(numLines):
line = lines[i]
# Skip over blank lines
line2 = line.strip()
if not line2:
continue
# Get rid of all comments. If we find a key definition comment line, parse it.
commentIndex = line.find('#')
if commentIndex >= 0:
if line.startswith(kFieldsKey):
if instancesList:
print "ERROR: Header line (%s) must preceed a data line." % kFieldsKey
raise ParseError
# parse the line with the field names.
line = line[len(kFieldsKey):]
line = line.strip()
keys = line.split('\t')
keys = map(lambda name: name.strip(), keys)
numKeys = len(keys)
k = kNumFixedFields
while k < numKeys:
keyDict[k] = keys[k]
k +=1
continue
else:
line = line[:commentIndex]
continue
# Must be a data line.
fields = line.split('\t')
fields = map(lambda datum: datum.strip(), fields)
numFields = len(fields)
if (numFields != numKeys):
print "ERROR: In line %s, the number of fields %s does not match the number of key names %s (FamilyName, FontName, FullName, Weight, Coords, IsBold)." % (i+1, numFields, numKeys)
parseError = 1
continue
instanceDict= {}
#Build a dict from key to value. Some kinds of values needs special processing.
for k in range(numFields):
key = keyDict[k]
field = fields[k]
if not field:
continue
if field in ["Default", "None", "FontBBox"]:
# FontBBox is no longer supported - I calculate the real
# instance fontBBox from the glyph metrics instead,
continue
if key == kFontName:
value = field
elif key in [kExtraGlyphs, kExceptionSuffixes]:
value = eval(field)
elif key in [kIsBoldKey, kIsItalicKey, kCoordsKey]:
try:
value = eval(field) # this works for all three fields.
if key == kIsBoldKey: # need to convert to Type 1 field key.
instanceDict[key] = value
# add kForceBold key.
key = kForceBold
if value == 1:
value = "true"
else:
value = "false"
elif key == kIsItalicKey:
if value == 1:
value = "true"
else:
value = "false"
elif key == kCoordsKey:
if type(value) == type(0):
value = (value,)
except (NameError, SyntaxError):
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
elif field[0] in ["[","{"]: # it is a Type 1 array value. Turn it into a list and verify that there's an even number of values for the alignment zones
value = field[1:-1].split() # Remove the begin and end brackets/braces, and make a list
if key in kAlignmentZonesKeys:
if len(value) % 2 != 0:
print "ERROR: In line %s, the %s field does not have an even number of values." % (i+1, key)
parseError = 1
continue
if key in kTopAlignZonesKeys: # The Type 1 spec only allows 7 top zones (7 pairs of values)
if len(value) > kMaxTopZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxTopZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kBotAlignZonesKeys: # The Type 1 spec only allows 5 top zones (5 pairs of values)
if len(value) > kMaxBotZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxBotZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kStdStemsKeys:
if len(value) > kMaxStdStemsSize:
print "ERROR: In line %s, the %s field can only have %d value." % (i+1, key, kMaxStdStemsSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
if key in kStemSnapKeys: # The Type 1 spec only allows 12 stem widths, including 1 standard stem
if len(value) > kMaxStemSnapSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxStemSnapSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
else:
# either a single number or a string.
if re.match(r"^[-.\d]+$", field):
value = field #it is a Type 1 number. Pass as is, as a string.
else:
value = field
instanceDict[key] = value
if (kStdHW in instanceDict and kStemSnapH not in instanceDict) or (kStdHW not in instanceDict and kStemSnapH in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdHW, kStemSnapH)
parseError = 1
elif (kStdHW in instanceDict and kStemSnapH in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapH][0] != instanceDict[kStdHW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapH, kStdHW)
parseError = 1
if (kStdVW in instanceDict and kStemSnapV not in instanceDict) or (kStdVW not in instanceDict and kStemSnapV in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdVW, kStemSnapV)
parseError = 1
elif (kStdVW in instanceDict and kStemSnapV in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapV][0] != instanceDict[kStdVW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapV, kStdVW)
parseError = 1
instancesList.append(instanceDict)
if parseError or len(instancesList) == 0:
raise(ParseError)
return instancesList
0
Example 78
Project: xhtml2pdf Source File: paragraph.py
def test():
doc = SimpleDocTemplate("test.pdf")
story = []
style = Style(fontName="Helvetica", textIndent=24.0)
fn = style["fontName"]
fs = style["fontSize"]
sampleText1 = createText(TEXT[:100], fn, fs)
sampleText2 = createText(TEXT[100:], fn, fs)
text = Text(sampleText1 + [
Space(
fontName=fn,
fontSize=fs),
Word(
text="TrennbarTrennbar",
pairs=[("Trenn-", "barTrennbar")],
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Normal",
color=Color(1, 0, 0),
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="gGrößer",
fontName=fn,
fontSize=fs * 1.5),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Bold",
fontName="Times-Bold",
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="jItalic",
fontName="Times-Italic",
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
# <span style="border: 1px solid red;">ipsum <span style="border: 1px solid green; padding: 4px; padding-left: 20px; background: yellow; margin-bottom: 8px; margin-left: 10px;">
# Lo<font size="12pt">re</font>m</span> <span style="background:blue; height: 30px;">ipsum</span> Lorem</span>
BoxBegin(
fontName=fn,
fontSize=fs,
**makeBorder(0.5, "solid", Color(0, 1, 0))),
Word(
text="Lorem",
fontName="Times-Bold",
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName="Times-Bold",
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
BoxBegin(
fontName=fn,
fontSize=fs,
backgroundColor=Color(1, 1, 0),
**makeBorder(1, "solid", Color(1, 0, 0))),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
BoxEnd(),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
BoxEnd(),
LineBreak(
fontName=fn,
fontSize=fs),
LineBreak(
fontName=fn,
fontSize=fs),
] + sampleText2)
story.append(Paragraph(
copy.copy(text),
style,
debug=0))
for i in range(10):
style = copy.deepcopy(style)
style["textAlign"] = ALIGNMENTS[i % 4]
text = createText(("(%d) " % i) + TEXT, fn, fs)
story.append(Paragraph(
copy.copy(text),
style,
debug=0))
doc.build(story)
0
Example 79
Project: pymtl Source File: visitors.py
def visit_Assign( self, node ):
# Catch untranslatable constructs
if len(node.targets) != 1:
raise VerilogTranslationError(
'Chained assignments are not supported!\n'
'Please modify "x = y = ..." to be two separate lines.',
node.lineno
)
if isinstance(node.targets[0], ast.Tuple):
raise VerilogTranslationError(
'Multiple items on the left of an assignment are not supported!\n'
'Please modify "x,y = ..." to be two separate lines.',
node.lineno
)
# First visit the RHS to update Name nodes that have been inferred
self.visit( node.value )
# Need this to visit potential temporaries used in slice indices!
self.visit( node.targets[0] )
# The LHS doesn't have a type, we need to infer it
if node.targets[0]._object == None:
# The LHS should be a Name node!
if not isinstance(node.targets[0], _ast.Name):
raise VerilogTranslationError(
'An internal error occured when performing type inference!\n'
'Please contact the PyMTL developers!',
node.lineno
)
# Assign unique name to this temporary in case the same temporary
# name is used in another concurrent block.
node.targets[0].id = self._uniq_name( node.targets[0].id )
# Copy the object returned by the RHS, set the name appropriately
if isinstance( node.value, ast.Name ):
if isinstance( node.value._object, int ):
self._insert( node, (node.targets[0].id, node.value._object ) )
else:
obj = copy.copy( node.value._object )
obj.name = node.targets[0].id
obj.parent = None
self._insert( node, obj )
elif isinstance( node.value, ast.Attribute ):
if isinstance( node.value._object, int ):
self._insert( node, (node.targets[0].id, node.value._object ) )
else:
obj = copy.copy( node.value._object )
obj.name = node.targets[0].id
obj.parent = None
self._insert( node, obj )
elif isinstance( node.value, ast.Num ):
self._insert( node, (node.targets[0].id, int( node.value.n )) )
elif isinstance( node.value, ast.BoolOp ):
obj = Wire( 1 )
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Compare ):
obj = Wire( 1 )
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Subscript ):
# TODO: assumes ast.Index does NOT contain a slice object
if not isinstance( node.value.slice, ast.Index ):
raise VerilogTranslationError(
'Type inference from slices > 1-bit is not currently supported!'
'\nCannot infer type of temporary variable "{}".'
.format( node.targets[0].id ),
node.lineno
)
if isinstance( node.value._object, Signal ):
obj = Wire( 1 )
elif isinstance( node.value._object, list ) and \
isinstance( node.value._object[0], Signal ):
obj = Wire( node.value._object[0].nbits )
else:
raise VerilogTranslationError(
'Type inference from unsupported list construct!'
'\nCannot infer type of temporary variable "{}".'
.format( node.targets[0].id ),
node.lineno
)
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Call ):
func_name = node.value.func.id
if func_name in ['sext', 'zext']:
nbits_arg = node.value.args[1]
if isinstance( nbits_arg, ast.Num ): nbits = nbits_arg.n
else: nbits = nbits_arg._object
if not isinstance( nbits, int ):
raise VerilogTranslationError(
'The second argument to function "{}" must be an int!'
.format( func_name ),
node.lineno
)
obj = Wire( nbits )
elif func_name == 'concat':
nbits = sum( [x._object.nbits for x in node.value.args ] )
obj = Wire( nbits )
elif func_name in ['reduce_and', 'reduce_or', 'reduce_xor']:
obj = Wire( 1 )
elif func_name == 'Bits':
nbits_arg = node.value.args[0]
if isinstance( nbits_arg, ast.Num ): nbits = nbits_arg.n
else: nbits = nbits_arg._object
if not isinstance( nbits, int ):
raise VerilogTranslationError(
'The first argument to the Bits constructor must be an int!',
node.lineno
)
obj = Wire( nbits )
else:
print_simple_ast( node )
raise VerilogTranslationError(
'Type inference from the function "{}" is not currently supported!'
'\nCannot infer type of temporary variable "{}".'
.format( func_name, node.targets[0].id ),
node.lineno
)
obj.name = node.targets[0].id
self._insert( node, obj )
else:
print_simple_ast( node )
raise VerilogTranslationError(
'Type inference of "{}" AST nodes is not currently supported!'
'\nCannot infer type of temporary variable "{}".'
.format( type(node.value).__name__, node.targets[0].id ),
node.lineno
)
return node
0
Example 80
def run(self):
while True:
# cmd is in json format
# cmd = {
# network_type: "followers", # or friends
# user_id: id,
# data_type: 'ids' # users
#}
cmd = self.get_cmd()
command = cmd['cmd']
logger.debug("new cmd: %s"%(cmd))
redis_cmd_handler = None
#maybe change this to a map will be less expressive, and easier to read... but well, not too many cases here yet...
if (command == 'TERMINATE'):
# make sure we need to flush all existing data in the handlers..
for handler in self.handlers:
handler.flush_all()
break
elif (command == 'CRAWLER_FLUSH'):
for handler in self.handlers:
handler.flush_all()
else:
# figure out args first...
args = {}
if (command == 'CRAWL_TWEET'):
args = {
"tweet_id": cmd['tweet_id'],
"write_to_handlers": self.handlers,
"cmd_handlers" : []
}
elif (command == 'SEARCH'):
args = {
"write_to_handlers": self.handlers,
"cmd_handlers" : []
}
else:
args = {
"user_id": cmd['user_id'],
"write_to_handlers": self.handlers,
"cmd_handlers" : []
}
bucket = cmd["bucket"] if "bucket" in cmd else None
if (bucket):
args["bucket"] = bucket
func = None
if (command in ['CRAWL_USER_TIMELINE', 'CRAWL_TWEET']):
func = getattr(self.twitter_api, self.tasks[command])
elif (command in ['SEARCH']):
if "lang" in cmd:
args['lang'] = cmd['lang']
if "geocode" in cmd:
args['geocode'] = cmd['geocode']
if "key" in cmd:
args['key'] = cmd['key']
#logger.info("new cmd: %s"%(cmd))
# q is required, otherwise let it fail...
if "query" in cmd:
args['query'] = cmd['query']
func = getattr(self.twitter_api, self.tasks[command])
elif (command in ['CRAWL_FRIENDS', 'CRAWL_FOLLOWERS']):
data_type = cmd['data_type']
try:
depth = cmd["depth"] if "depth" in cmd else None
depth = int(depth)
# for handler in self.handlers:
# if isinstance(handler, InMemoryHandler):
# inmemory_handler = handler
if (depth > 1):
template = copy.copy(cmd)
# template = {
# network_type: "followers", # or friends
# user_id: id,
# data_type: 'ids' # object
# depth: depth
#}
# will throw out exception if redis_config doesn't exist...
args["cmd_handlers"].append(CrawlUserRelationshipCommandHandler(template=template, redis_config=self.redis_config))
logger.info("depth: %d, # of cmd_handlers: %d"%(depth, len(args['cmd_handlers'])))
except Exception as exc:
logger.warn(exc)
func = getattr(self.twitter_api, self.tasks[command][data_type])
if func:
try:
#logger.info(args)
func(**args)
del args['cmd_handlers']
for handler in self.handlers:
handler.flush_all()
except Exception as exc:
logger.error("%s"%exc)
try:
self.init_twitter_api()
except StopIteration as init_twitter_api_exc:
# import exceptions
# if (isinstance(init_user_api_exc, exceptions.StopIteration)): # no more proxy to try... so kill myself...
for handler in self.handlers:
handler.flush_all()
logger.warn('not enough proxy servers, kill me... %s'%(self.crawler_id))
# flush first
self.node_queue.put({
'cmd':'CRAWLER_FAILED',
'crawler_id': self.crawler_id
})
del self.node_queue
return False
#raise
else:
#put current task back to queue...
logger.info('pushing current task back to the queue: %s'%(json.dumps(cmd)))
self.enqueue(cmd)
#logger.error(full_stack())
else:
logger.warn("whatever are you trying to do?")
logger.info("looks like i'm done...")
return True
0
Example 81
Project: gffutils Source File: parser.py
def _split_keyvals(keyval_str, dialect=None):
"""
Given the string attributes field of a GFF-like line, split it into an
attributes dictionary and a "dialect" dictionary which contains information
needed to reconstruct the original string.
Lots of logic here to handle all the corner cases.
If `dialect` is None, then do all the logic to infer a dialect from this
attribute string.
Otherwise, use the provided dialect (and return it at the end).
"""
infer_dialect = False
if dialect is None:
# Make a copy of default dialect so it can be modified as needed
dialect = copy.copy(constants.dialect)
infer_dialect = True
from gffutils import feature
quals = feature.dict_class()
if not keyval_str:
return quals, dialect
# If a dialect was provided, then use that directly.
if not infer_dialect:
if dialect['trailing semicolon']:
keyval_str = keyval_str.rstrip(';')
parts = keyval_str.split(dialect['field separator'])
kvsep = dialect['keyval separator']
if dialect['leading semicolon']:
pieces = []
for p in parts:
if p and p[0] == ';':
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
if dialect['fmt'] == 'gff3':
key_vals = [p.split(kvsep) for p in parts]
else:
leadingsemicolon = dialect['leading semicolon']
pieces = []
for i, p in enumerate(parts):
if i == 0 and leadingsemicolon:
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
quoted = dialect['quoted GFF2 values']
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
try:
quals[key]
except KeyError:
quals[key] = []
if quoted:
if (len(val) > 0 and val[0] == '"' and val[-1] == '"'):
val = val[1:-1]
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
quals[key].extend(vals)
return quals, dialect
# If we got here, then we need to infer the dialect....
#
# Reset the order to an empty list so that it will only be populated with
# keys that are found in the file.
dialect['order'] = []
# ensembl GTF has trailing semicolon
if keyval_str[-1] == ';':
keyval_str = keyval_str[:-1]
dialect['trailing semicolon'] = True
# GFF2/GTF has a semicolon with at least one space after it.
# Spaces can be on both sides (e.g. wormbase)
# GFF3 works with no spaces.
# So split on the first one we can recognize...
for sep in (' ; ', '; ', ';'):
parts = keyval_str.split(sep)
if len(parts) > 1:
dialect['field separator'] = sep
break
# Is it GFF3? They have key-vals separated by "="
if gff3_kw_pat.match(parts[0]):
key_vals = [p.split('=') for p in parts]
dialect['fmt'] = 'gff3'
dialect['keyval separator'] = '='
# Otherwise, key-vals separated by space. Key is first item.
else:
dialect['keyval separator'] = " "
pieces = []
for p in parts:
# Fix misplaced semicolons in keys in some GFF2 files
if p and p[0] == ';':
p = p[1:]
dialect['leading semicolon'] = True
pieces.append(p.strip().split(' '))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
# Pathological cases where values of a key have within them the key-val
# separator, e.g.,
# Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
# Is the key already in there?
if key in quals:
dialect['repeated keys'] = True
else:
quals[key] = []
# Remove quotes in GFF2
if len(val) > 0 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
dialect['quoted GFF2 values'] = True
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
if (len(vals) > 1) and dialect['repeated keys']:
raise AttributeStringError(
"Internally inconsistent attributes formatting: "
"some have repeated keys, some do not.")
quals[key].extend(vals)
# keep track of the order of keys
dialect['order'].append(key)
#for key, vals in quals.items():
#
# TODO: urllib.unquote breaks round trip invariance for "hybrid1.gff3"
# test file. This is because the "Note" field has %xx escape chars,
# but "Dbxref" has ":" which, if everything were consistent, should
# have also been escaped.
#
# (By the way, GFF3 spec says only literal use of \t, \n, \r, %, and
# control characters should be encoded)
#
# Solution 1: don't unquote
# Solution 2: store, along with each attribute, whether or not it
# should be quoted later upon reconstruction
# Solution 3: don't care about invariance
# unquoted = [urllib.unquote(v) for v in vals]
#quals[key] = vals
if (
(dialect['keyval separator'] == ' ') and
(dialect['quoted GFF2 values'])
):
dialect['fmt'] = 'gtf'
return quals, dialect
0
Example 82
def updateData(self, labels, foo, **args):
#self.removeCurves()
self.removeDrawingCurves() # my function, that doesn't delete selection curves
self.removeMarkers()
# initial var values
self.showKNNModel = 0
self.showCorrect = 1
self.__dict__.update(args)
length = len(labels)
self.dataMap = {} # dictionary with keys of form "x_i-y_i" with values (x_i, y_i, color, data)
self.XAnchor = self.createXAnchors(length)
self.YAnchor = self.createYAnchors(length)
self.shownAttributes = labels
polyvizLineCoordsX = []; polyvizLineCoordsY = [] # if class is discrete we will optimize drawing by storing computed values and adding less data curves to plot
# we must have at least 3 attributes to be able to show anything
if not self.haveData or len(labels) < 3:
self.updateLayout()
return
dataSize = len(self.rawData)
if self.dataHasClass: useDifferentColors = self.useDifferentColors # don't use colors if we don't have a class
else: useDifferentColors = 0
self.setAxisScale(QwtPlot.xBottom, -1.20, 1.20 + 0.05 * self.showLegend, 1)
# store indices to shown attributes
indices = [self.attributeNameIndex[label] for label in labels]
# will we show different symbols?
useDifferentSymbols = self.useDifferentSymbols and self.dataHasDiscreteClass and len(self.dataDomain.classVar.values) <= len(self.curveSymbols)
# ##########
# draw text at lines
for i in range(length):
# print attribute name
self.addMarker(labels[i], 0.6*(self.XAnchor[i]+ self.XAnchor[(i+1)%length]), 0.6*(self.YAnchor[i]+ self.YAnchor[(i+1)%length]), Qt.AlignHCenter | Qt.AlignVCenter, bold = 1)
if self.dataDomain[labels[i]].varType == orange.VarTypes.Discrete:
# print all possible attribute values
values = getVariableValuesSorted(self.dataDomain[labels[i]])
count = len(values)
k = 1.08
for j in range(count):
pos = (1.0 + 2.0*float(j)) / float(2*count)
self.addMarker(values[j], k*(1-pos)*self.XAnchor[i]+k*pos*self.XAnchor[(i+1)%length], k*(1-pos)*self.YAnchor[i]+k*pos*self.YAnchor[(i+1)%length], Qt.AlignHCenter | Qt.AlignVCenter)
else:
# min and max value
if self.tooltipValue == TOOLTIPS_SHOW_SPRINGS:
names = ["%.1f" % (0.0), "%.1f" % (1.0)]
elif self.tooltipValue == TOOLTIPS_SHOW_DATA:
names = ["%%.%df" % (self.dataDomain[labels[i]].numberOfDecimals) % (self.attrValues[labels[i]][0]), "%%.%df" % (self.dataDomain[labels[i]].numberOfDecimals) % (self.attrValues[labels[i]][1])]
self.addMarker(names[0],0.95*self.XAnchor[i]+0.15*self.XAnchor[(i+1)%length], 0.95*self.YAnchor[i]+0.15*self.YAnchor[(i+1)%length], Qt.AlignHCenter | Qt.AlignVCenter)
self.addMarker(names[1], 0.15*self.XAnchor[i]+0.95*self.XAnchor[(i+1)%length], 0.15*self.YAnchor[i]+0.95*self.YAnchor[(i+1)%length], Qt.AlignHCenter | Qt.AlignVCenter)
XAnchorPositions = numpy.zeros([length, dataSize], numpy.float)
YAnchorPositions = numpy.zeros([length, dataSize], numpy.float)
XAnchor = self.createXAnchors(length)
YAnchor = self.createYAnchors(length)
for i in range(length):
Xdata = XAnchor[i] * (1-self.noJitteringScaledData[indices[i]]) + XAnchor[(i+1)%length] * self.noJitteringScaledData[indices[i]]
Ydata = YAnchor[i] * (1-self.noJitteringScaledData[indices[i]]) + YAnchor[(i+1)%length] * self.noJitteringScaledData[indices[i]]
XAnchorPositions[i] = Xdata
YAnchorPositions[i] = Ydata
XAnchorPositions = numpy.swapaxes(XAnchorPositions, 0,1)
YAnchorPositions = numpy.swapaxes(YAnchorPositions, 0,1)
selectedData = numpy.take(self.scaledData, indices, axis = 0)
sum_i = numpy.add.reduce(selectedData)
# test if there are zeros in sum_i
if len(numpy.nonzero(sum_i)) < len(sum_i):
add = numpy.where(sum_i == 0, 1.0, 0.0)
sum_i += add
x_positions = numpy.sum(numpy.swapaxes(XAnchorPositions * numpy.swapaxes(selectedData, 0,1), 0,1), axis=0) * self.scaleFactor / sum_i
y_positions = numpy.sum(numpy.swapaxes(YAnchorPositions * numpy.swapaxes(selectedData, 0,1), 0,1), axis=0) * self.scaleFactor / sum_i
validData = self.getValidList(indices)
xPointsToAdd = {}
yPointsToAdd = {}
self.xLinesToAdd = {} # this is filled in addAnchorLine function
self.yLinesToAdd = {}
if self.showKNNModel == 1 and self.dataHasClass:
# variables and domain for the table
domain = orange.Domain([orange.FloatVariable("xVar"), orange.FloatVariable("yVar"), self.dataDomain.classVar])
table = orange.ExampleTable(domain)
# build an example table
for i in range(dataSize):
if validData[i]:
table.append(orange.Example(domain, [x_positions[i], y_positions[i], self.rawData[i].getclass()]))
kNNValues, probabilities = self.kNNOptimization.kNNClassifyData(table)
accuracy = copy(kNNValues)
measure = self.kNNOptimization.getQualityMeasure()
if self.dataDomain.classVar.varType == orange.VarTypes.Discrete:
if ((measure == CLASS_ACCURACY or measure == AVERAGE_CORRECT) and self.showCorrect) or (measure == BRIER_SCORE and not self.showCorrect):
kNNValues = [1.0 - val for val in kNNValues]
else:
if self.showCorrect:
kNNValues = [1.0 - val for val in kNNValues]
# fill and edge color palettes
bwColors = ColorPaletteBW(-1, 55, 255)
if self.dataHasContinuousClass:
preText = 'Mean square error : '
classColors = self.contPalette
else:
classColors = self.discPalette
if measure == CLASS_ACCURACY: preText = "Classification accuracy : "
elif measure == AVERAGE_CORRECT: preText = "Average correct classification : "
else: preText = "Brier score : "
for i in range(len(table)):
fillColor = bwColors.getRGB(kNNValues[i])
edgeColor = classColors.getRGB(self.originalData[self.dataClassIndex][i])
if not xPointsToAdd.has_key((fillColor, edgeColor, QwtSymbol.Ellipse, 1)):
xPointsToAdd[(fillColor, edgeColor, QwtSymbol.Ellipse, 1)] = []
yPointsToAdd[(fillColor, edgeColor, QwtSymbol.Ellipse, 1)] = []
xPointsToAdd[(fillColor, edgeColor, QwtSymbol.Ellipse, 1)].append(table[i][0].value)
yPointsToAdd[(fillColor, edgeColor, QwtSymbol.Ellipse, 1)].append(table[i][1].value)
self.addAnchorLine(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], fillColor, i, length)
# CONTINUOUS class
elif self.dataHasContinuousClass:
for i in range(dataSize):
if not validData[i]: continue
if useDifferentColors: newColor = self.contPalette[self.noJitteringScaledData[self.dataClassIndex][i]]
else: newColor = QColor(0,0,0)
self.addCurve(str(i), newColor, newColor, self.pointWidth, xData = [x_positions[i]], yData = [y_positions[i]])
self.addTooltipKey(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], newColor, i)
self.addAnchorLine(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], (newColor.red(), newColor.green(), newColor.blue()), i, length)
# DISCRETE class or no class at all
else:
color = (0,0,0)
symbol = self.curveSymbols[0]
for i in range(dataSize):
if not validData[i]: continue
if self.dataHasClass:
if useDifferentSymbols:
symbol = self.curveSymbols[int(self.originalData[self.dataClassIndex][i])]
if useDifferentColors:
color = self.discPalette.getRGB(self.originalData[self.dataClassIndex][i])
if not xPointsToAdd.has_key((color, color, symbol, 1)):
xPointsToAdd[(color, color, symbol, 1)] = []
yPointsToAdd[(color, color, symbol, 1)] = []
xPointsToAdd[(color, color, symbol, 1)].append(x_positions[i])
yPointsToAdd[(color, color, symbol, 1)].append(y_positions[i])
self.addAnchorLine(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], color, i, length)
self.addTooltipKey(x_positions[i], y_positions[i], XAnchorPositions[i], YAnchorPositions[i], QColor(*color), i)
# draw the points
for i, (fillColor, edgeColor, symbol, showFilled) in enumerate(xPointsToAdd.keys()):
xData = xPointsToAdd[(fillColor, edgeColor, symbol, showFilled)]
yData = yPointsToAdd[(fillColor, edgeColor, symbol, showFilled)]
self.addCurve(str(i), QColor(*fillColor), QColor(*edgeColor), self.pointWidth, symbol = symbol, xData = xData, yData = yData, showFilledSymbols = showFilled)
self.showAnchorLines()
self.xLinesToAdd = {}
self.yLinesToAdd = {}
# draw polygon
self.addCurve("polygon", QColor(0,0,0), QColor(0,0,0), 0, QwtPlotCurve.Lines, symbol = QwtSymbol.NoSymbol, xData = list(self.XAnchor) + [self.XAnchor[0]], yData = list(self.YAnchor) + [self.YAnchor[0]], lineWidth = 2)
#################
# draw the legend
if self.showLegend and self.dataHasClass:
# show legend for discrete class
if self.dataHasDiscreteClass:
self.addMarker(self.dataDomain.classVar.name, 0.87, 1.06, Qt.AlignLeft)
classVariableValues = getVariableValuesSorted(self.dataDomain.classVar)
for index in range(len(classVariableValues)):
if useDifferentColors: color = self.discPalette[index]
else: color = QColor(0,0,0)
y = 1.0 - index * 0.05
if not useDifferentSymbols:
curveSymbol = self.curveSymbols[0]
else:
curveSymbol = self.curveSymbols[index]
self.addCurve(str(index), color, color, self.pointWidth, symbol = curveSymbol, xData = [0.95, 0.95], yData = [y, y])
self.addMarker(classVariableValues[index], 0.90, y, Qt.AlignLeft | Qt.AlignVCenter)
# show legend for continuous class
elif self.dataHasContinuousClass:
xs = [1.15, 1.20, 1.20, 1.15]
count = 200
height = 2 / float(count)
for i in range(count):
y = -1.0 + i*2.0/float(count)
col = self.contPalette[i/float(count)]
c = PolygonCurve(QPen(col), QBrush(col), xs, [y,y, y+height, y+height])
c.attach(self)
# add markers for min and max value of color attribute
[minVal, maxVal] = self.attrValues[self.dataDomain.classVar.name]
self.addMarker("%s = %%.%df" % (self.dataDomain.classVar.name, self.dataDomain.classVar.numberOfDecimals) % (minVal), xs[0] - 0.02, -1.0 + 0.04, Qt.AlignLeft)
self.addMarker("%s = %%.%df" % (self.dataDomain.classVar.name, self.dataDomain.classVar.numberOfDecimals) % (maxVal), xs[0] - 0.02, +1.0 - 0.04, Qt.AlignLeft)
self.replot()
0
Example 83
Project: cstar_perf Source File: stress_compare.py
def stress_compare(revisions,
title,
log,
operations = [],
subtitle = '',
capture_fincore=False,
initial_destroy=True,
leave_data=False,
keep_page_cache=False,
git_fetch_before_test=True,
bootstrap_before_test=True,
teardown_after_test=True
):
"""
Run Stress on multiple C* branches and compare them.
revisions - List of dictionaries that contain cluster configurations
to trial. This is combined with the default config.
title - The title of the comparison
subtitle - A subtitle for more information (displayed smaller underneath)
log - The json file path to record stats to
operations - List of dictionaries indicating the operations. Example:
[# cassandra-stress command, node defaults to cluster defined 'stress_node'
{'type': 'stress',
'command': 'write n=19M -rate threads=50',
'node': 'node1',
'wait_for_compaction': True},
# nodetool command to run in parallel on nodes:
{'type': 'nodetool',
'command': 'decomission',
'nodes': ['node1','node2']},
# cqlsh script, node defaults to cluster defined 'stress_node'
{'type': 'cqlsh',
'script': "use my_ks; INSERT INTO blah (col1, col2) VALUES (val1, val2);",
'node': 'node1'}
]
capture_fincore - Enables capturing of linux-fincore logs of C* data files.
initial_destroy - Destroy all data before the first revision is run.
leave_data - Whether to leave the Cassandra data/commitlog/etc directories intact between revisions.
keep_page_cache - Whether to leave the linux page cache intact between revisions.
git_fetch_before_test (bool): If True, will update the cassandra.git with fab_common.git_repos
bootstrap_before_test (bool): If True, will bootstrap DSE / C* before running the operations
teardown_after_test (bool): If True, will shutdown DSE / C* after all of the operations
"""
validate_revisions_list(revisions)
validate_operations_list(operations)
pristine_config = copy.copy(fab_config)
# initial_destroy and git_fetch_before_test can be set in the job configuration,
# or manually in the call to this function.
# Either is fine, but they shouldn't conflict. If they do, a ValueError is raised.
initial_destroy = get_bool_if_method_and_config_values_do_not_conflict('initial_destroy',
initial_destroy,
pristine_config,
method_name='stress_compare')
if initial_destroy:
logger.info("Cleaning up from prior runs of stress_compare ...")
teardown(destroy=True, leave_data=False)
# https://datastax.jira.com/browse/CSTAR-633
git_fetch_before_test = get_bool_if_method_and_config_values_do_not_conflict('git_fetch_before_test',
git_fetch_before_test,
pristine_config,
method_name='stress_compare')
stress_shas = maybe_update_cassandra_git_and_setup_stress(operations, git_fetch=git_fetch_before_test)
# Flamegraph Setup
if flamegraph.is_enabled():
execute(flamegraph.setup)
with GracefulTerminationHandler() as handler:
for rev_num, revision_config in enumerate(revisions):
config = copy.copy(pristine_config)
config.update(revision_config)
revision = revision_config['revision']
config['log'] = log
config['title'] = title
config['subtitle'] = subtitle
product = dse if config.get('product') == 'dse' else cstar
# leave_data, bootstrap_before_test, and teardown_after_test can be set in the job configuration,
# or manually in the call to this function.
# Either is fine, but they shouldn't conflict. If they do, a ValueError is raised.
leave_data = get_bool_if_method_and_config_values_do_not_conflict('leave_data',
leave_data,
revision_config,
method_name='stress_compare')
# https://datastax.jira.com/browse/CSTAR-638
bootstrap_before_test = get_bool_if_method_and_config_values_do_not_conflict('bootstrap_before_test',
bootstrap_before_test,
revision_config,
method_name='stress_compare')
# https://datastax.jira.com/browse/CSTAR-639
teardown_after_test = get_bool_if_method_and_config_values_do_not_conflict('teardown_after_test',
teardown_after_test,
revision_config,
method_name='stress_compare')
logger.info("Bringing up {revision} cluster...".format(revision=revision))
# Drop the page cache between each revision, especially
# important when leave_data=True :
if not keep_page_cache:
drop_page_cache()
# Only fetch from git on the first run and if git_fetch_before_test is True
git_fetch_before_bootstrap = True if rev_num == 0 and git_fetch_before_test else False
if bootstrap_before_test:
revision_config['git_id'] = git_id = bootstrap(config,
destroy=initial_destroy,
leave_data=leave_data,
git_fetch=git_fetch_before_bootstrap)
else:
revision_config['git_id'] = git_id = config['revision']
if flamegraph.is_enabled(revision_config):
execute(flamegraph.ensure_stopped_perf_agent)
execute(flamegraph.start_perf_agent, rev_num)
if capture_fincore:
start_fincore_capture(interval=10)
last_stress_operation_id = 'None'
for operation_i, operation in enumerate(operations, 1):
try:
start = datetime.datetime.now()
stats = {
"id": str(uuid.uuid1()),
"type": operation['type'],
"revision": revision,
"git_id": git_id,
"start_date": start.isoformat(),
"label": revision_config.get('label', revision_config['revision']),
"test": '{operation_i}_{operation}'.format(
operation_i=operation_i,
operation=operation['type'])
}
if operation['type'] == 'stress':
last_stress_operation_id = stats['id']
# Default to all the nodes of the cluster if no
# nodes were specified in the command:
if operation.has_key('nodes'):
cmd = "{command} -node {hosts}".format(
command=operation['command'],
hosts=",".join(operation['nodes']))
elif '-node' in operation['command']:
cmd = operation['command']
else:
cmd = "{command} -node {hosts}".format(
command=operation['command'],
hosts=",".join([n for n in fab_config['hosts']]))
stats['command'] = cmd
stats['intervals'] = []
stats['test'] = '{operation_i}_{operation}'.format(
operation_i=operation_i, operation=cmd.strip().split(' ')[0]).replace(" ", "_")
logger.info('Running stress operation : {cmd} ...'.format(cmd=cmd))
# Run stress:
# (stress takes the stats as a parameter, and adds
# more as it runs):
stress_sha = stress_shas[operation.get('stress_revision', 'default')]
stats = stress(cmd, revision, stress_sha, stats=stats)
# Wait for all compactions to finish (unless disabled):
if operation.get('wait_for_compaction', True):
compaction_throughput = revision_config.get("compaction_throughput_mb_per_sec", 16)
wait_for_compaction(compaction_throughput=compaction_throughput)
elif operation['type'] == 'nodetool':
if 'nodes' not in operation:
operation['nodes'] = 'all'
if operation['nodes'] in ['all','ALL']:
nodes = [n for n in fab_config['hosts']]
else:
nodes = operation['nodes']
set_nodetool_path(os.path.join(product.get_bin_path(), 'nodetool'))
logger.info("Running nodetool on {nodes} with command: {command}".format(nodes=operation['nodes'], command=operation['command']))
stats['command'] = operation['command']
output = nodetool_multi(nodes, operation['command'])
stats['output'] = output
logger.info("Nodetool command finished on all nodes")
elif operation['type'] == 'cqlsh':
logger.info("Running cqlsh commands on {node}".format(node=operation['node']))
set_cqlsh_path(os.path.join(product.get_bin_path(), 'cqlsh'))
output = cqlsh(operation['script'], operation['node'])
stats['output'] = output.split("\n")
stats['command'] = operation['script']
logger.info("Cqlsh commands finished")
elif operation['type'] == 'bash':
nodes = operation.get('nodes', [n for n in fab_config['hosts']])
logger.info("Running bash commands on: {nodes}".format(nodes=nodes))
stats['output'] = bash(operation['script'], nodes)
stats['command'] = operation['script']
logger.info("Bash commands finished")
elif operation['type'] == 'spark_cassandra_stress':
nodes = operation.get('nodes', [n for n in fab_config['hosts']])
stress_node = config.get('stress_node', None)
# Note: once we have https://datastax.jira.com/browse/CSTAR-617, we should fix this to use
# client-tool when DSE_VERSION >= 4.8.0
# https://datastax.jira.com/browse/DSP-6025: dse client-tool
master_regex = re.compile(r"(.|\n)*(?P<master>spark:\/\/\d+.\d+.\d+.\d+:\d+)(.|\n)*")
master_out = dsetool_cmd(nodes[0], options='sparkmaster')[nodes[0]]
master_match = master_regex.match(master_out)
if not master_match:
raise ValueError('Could not find master address from "dsetool sparkmaster" cmd\n'
'Found output: {f}'.format(f=master_out))
master_string = master_match.group('master')
build_spark_cassandra_stress = bool(distutils.util.strtobool(
str(operation.get('build_spark_cassandra_stress', 'True'))))
remove_existing_spark_data = bool(distutils.util.strtobool(
str(operation.get('remove_existing_spark_data', 'True'))))
logger.info("Running spark_cassandra_stress on {stress_node} "
"using spark.cassandra.connection.host={node} and "
"spark-master {master}".format(stress_node=stress_node,
node=nodes[0],
master=master_string))
output = spark_cassandra_stress(operation['script'], nodes, stress_node=stress_node,
master=master_string,
build_spark_cassandra_stress=build_spark_cassandra_stress,
remove_existing_spark_data=remove_existing_spark_data)
stats['output'] = output.get('output', 'No output captured')
stats['spark_cass_stress_time_in_seconds'] = output.get('stats', {}).get('TimeInSeconds', 'No time captured')
stats['spark_cass_stress_ops_per_second'] = output.get('stats', {}).get('OpsPerSecond', 'No ops/s captured')
logger.info("spark_cassandra_stress finished")
elif operation['type'] == 'ctool':
logger.info("Running ctool with parameters: {command}".format(command=operation['command']))
ctool = Ctool(operation['command'], common.config)
output = execute(ctool.run)
stats['output'] = output
logger.info("ctool finished")
elif operation['type'] == 'dsetool':
if 'nodes' not in operation:
operation['nodes'] = 'all'
if operation['nodes'] in ['all','ALL']:
nodes = [n for n in fab_config['hosts']]
else:
nodes = operation['nodes']
dsetool_options = operation['script']
logger.info("Running dsetool {command} on {nodes}".format(nodes=operation['nodes'], command=dsetool_options))
stats['command'] = dsetool_options
output = dsetool_cmd(nodes=nodes, options=dsetool_options)
stats['output'] = output
logger.info("dsetool command finished on all nodes")
elif operation['type'] == 'dse':
logger.info("Running dse command on {node}".format(node=operation['node']))
output = dse_cmd(node=operation['node'], options=operation['script'])
stats['output'] = output.split("\n")
stats['command'] = operation['script']
logger.info("dse commands finished")
end = datetime.datetime.now()
stats['end_date'] = end.isoformat()
stats['op_duration'] = str(end - start)
log_stats(stats, file=log)
finally:
# Copy node logs:
retrieve_logs_and_create_tarball(job_id=stats['id'])
revision_config['last_log'] = stats['id']
if capture_fincore:
stop_fincore_capture()
log_dir = os.path.join(CSTAR_PERF_LOGS_DIR, stats['id'])
retrieve_fincore_logs(log_dir)
# Restart fincore capture if this is not the last
# operation:
if operation_i < len(operations):
start_fincore_capture(interval=10)
if flamegraph.is_enabled(revision_config):
# Generate and Copy node flamegraphs
execute(flamegraph.stop_perf_agent)
execute(flamegraph.generate_flamegraph, rev_num)
flamegraph_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf', 'flamegraph')
flamegraph_test_dir = os.path.join(flamegraph_dir, last_stress_operation_id)
retrieve_flamegraph(flamegraph_test_dir, rev_num+1)
sh.tar('cfvz', "{}.tar.gz".format(stats['id']), last_stress_operation_id, _cwd=flamegraph_dir)
shutil.rmtree(flamegraph_test_dir)
log_add_data(log, {'title':title,
'subtitle': subtitle,
'revisions': revisions})
if teardown_after_test:
if revisions[-1].get('leave_data', leave_data):
teardown(destroy=False, leave_data=True)
else:
kill_delay = 300 if profiler.yourkit_is_enabled(revision_config) else 0
teardown(destroy=True, leave_data=False, kill_delay=kill_delay)
if profiler.yourkit_is_enabled(revision_config):
yourkit_config = profiler.yourkit_get_config()
yourkit_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf', 'yourkit')
yourkit_test_dir = os.path.join(yourkit_dir, last_stress_operation_id)
retrieve_yourkit(yourkit_test_dir, rev_num+1)
sh.tar('cfvz', "{}.tar.gz".format(stats['id']),
last_stress_operation_id, _cwd=yourkit_dir)
shutil.rmtree(yourkit_test_dir)
0
Example 84
Project: termite-visualizations Source File: appadmin.py
def ccache():
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
for key, value in cache.ram.storage.iteritems():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
folder = os.path.join(request.folder,'cache')
if not os.path.exists(folder):
os.mkdir(folder)
locker = open(os.path.join(folder, 'cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(folder, 'cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['entries'] = ram['entries'] + disk['entries']
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
total['keys'] = ram['keys'] + disk['keys']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
0
Example 85
Project: eofs Source File: iris.py
def __init__(self, cubes, weights=None, center=True, ddof=1):
"""Create a MultivariateEof instance.
The EOF solution is computed at initialization time. Method
calls are used to retrieve computed quantities.
**Arguments:**
*cubes*
A list/tuple containing one or more `~iris.cube.Cube`
instances, each with two or more dimensions, containing the
data to be analysed. Time must be the first dimension of
each `~iris.cube.Cube`. Missing values are allowed provided
that they are constant with time in each field (e.g., values
of an oceanographic field over land).
**Optional arguments:**
*weights*
Sets the weighting method. One method can be chosen to apply
to all cubes in *datasets* or a sequence of options can be
given to specify a different weighting method for each cube
in *datasets*. The following pre-defined weighting methods
are available:
* *'area'* : Square-root of grid cell area normalized by
total grid area. Requires a latitude-longitude grid to be
present in the corresponding `~iris.cube.Cube`. This is a
fairly standard weighting strategy. If you are unsure
which method to use and you have gridded data then this
should be your first choice.
* *'coslat'* : Square-root of cosine of latitude. Requires a
latitude dimension to be present in the corresponding
`~iris.cube.Cube`.
* *None* : Equal weights for all grid points (*'none'* is
also accepted).
Alternatively a sequence of arrays of weights whose shapes
are compatible with the corresponding `~iris.cube.Cube`
instances in *datasets* may be supplied instead of
specifying a weighting method.
*center*
If *True*, the mean along the first axis of each cube in
*datasets* (the time-mean) will be removed prior to
analysis. If *False*, the mean along the first axis will not
be removed. Defaults to *True* (mean is removed).
The covariance interpretation relies on the input data being
anomalies with a time-mean of 0. Therefore this option
should usually be set to *True*. Setting this option to
*True* has the useful side effect of propagating missing
values along the time dimension, ensuring that a solution
can be found even if missing values occur in different
locations at different times.
*ddof*
'Delta degrees of freedom'. The divisor used to normalize
the covariance matrix is *N - ddof* where *N* is the
number of samples. Defaults to *1*.
**Returns:**
*solver*
An `MultivariateEof` instance.
**Examples:**
EOF analysis of two cubes with area-weighting::
from eofs.multivariate.iris import MultivariateEof
solver = MultivariateEof(cube1, cube2, weights='area')
"""
# Record the number of input cubes.
self._ncubes = len(cubes)
# Check that the weights argument is valid and refactor it if there
# is only one option provided.
if weights in (None, 'area', 'coslat'):
weights = [weights] * self._ncubes
elif len(weights) != self._ncubes:
raise ValueError('number of weights and cubes must match')
# Process each input cube recording its time dimension coordinate,
# other dimension coordinates, and defining its weight array.
self._time = []
self._coords = []
self._time_aux_coords = []
self._space_aux_coords = []
self._time_space_aux_coords = []
passweights = []
for cube, weight in zip(cubes, weights):
if not isinstance(cube, Cube):
raise TypeError('input is not an iris cube')
# Record the time dimension and it's position. If its position is
# not 0 then raise an error.
time, time_dim = get_time_coord(cube)
if time_dim != 0:
raise ValueError('time must be the first dimension, '
'consider using the transpose() method')
self._time.append(copy(time))
# Make a list of the cube's other dimension coordinates.
coords = [copy(coord) for coord in cube.dim_coords]
coords.remove(time)
if not coords:
raise ValueError('one or more non-time '
'dimensions are required')
self._coords.append(coords)
# Make a lists of the AuxCoords on the current cube and store
# them for reapplication later.
_t, _s, _ts = classified_aux_coords(cube)
self._time_aux_coords.append(_t)
self._space_aux_coords.append(_s)
self._time_space_aux_coords.append(_ts)
# Determine the weighting option for the cube.
if weight is None:
wtarray = None
else:
try:
scheme = weight.lower()
wtarray = weights_array(cube, scheme=scheme)
except AttributeError:
wtarray = weight
try:
wtarray = wtarray.astype(cube.data.dtype)
except AttributeError:
pass
passweights.append(wtarray)
# Get a list of all the auxiliary coordinates that span just time
# and are present on every input cube.
self._common_time_aux_coords = common_items(self._time_aux_coords)
# Create a solver.
self._solver = standard.MultivariateEof(
[cube.data for cube in cubes],
weights=passweights,
center=center,
ddof=ddof)
#: Number of EOFs in the solution.
self.neofs = self._solver.neofs
# Names of the cubes.
self._cube_names = [c.name(default='dataset').replace(' ', '_')
for c in cubes]
self._cube_var_names = [cube.var_name for cube in cubes]
0
Example 86
Project: pcbmode Source File: component.py
def __init__(self, refdef, component):
"""
"""
self._refdef = refdef
self._layer = component.get('layer') or 'top'
self._rotate = component.get('rotate') or 0
if self._layer=='bottom':
self._rotate *= -1
self._rotate_point = utils.toPoint(component.get('rotate-point') or [0, 0])
self._scale = component.get('scale') or 1
self._location = component.get('location') or [0, 0]
# Get footprint definition and shapes
try:
self._footprint_name = component['footprint']
except:
msg.error("Cannot find a 'footprint' name for refdef %s." % refdef)
filename = self._footprint_name + '.json'
paths = [os.path.join(config.cfg['base-dir'],
config.cfg['locations']['shapes'],
filename),
os.path.join(config.cfg['base-dir'],
config.cfg['locations']['components'],
filename)]
footprint_dict = None
for path in paths:
if os.path.isfile(path):
footprint_dict = utils.dictFromJsonFile(path)
break
if footprint_dict == None:
fname_list = ""
for path in paths:
fname_list += " %s" % path
msg.error("Couldn't find shape file. Looked for it here:\n%s" % (fname_list))
footprint = Footprint(footprint_dict)
footprint_shapes = footprint.getShapes()
#------------------------------------------------
# Apply component-specific modifiers to footprint
#------------------------------------------------
for sheet in ['conductor', 'soldermask', 'solderpaste', 'pours', 'silkscreen', 'assembly', 'drills']:
for layer in config.stk['layer-names']:
for shape in footprint_shapes[sheet].get(layer) or []:
# In order to apply the rotation we need to adust the location
shape.rotateLocation(self._rotate, self._rotate_point)
shape.transformPath(scale=self._scale,
rotate=self._rotate,
rotate_point=self._rotate_point,
mirror=shape.getMirrorPlacement(),
add=True)
#--------------------------------------------------------------
# Remove silkscreen and assembly shapes if instructed
#--------------------------------------------------------------
# If the 'show' flag is 'false then remove these items from the
# shapes dictionary
#--------------------------------------------------------------
for sheet in ['silkscreen','assembly']:
try:
shapes_dict = component[sheet].get('shapes') or {}
except:
shapes_dict = {}
# If the setting is to not show silkscreen shapes for the
# component, delete the shapes from the shapes' dictionary
if shapes_dict.get('show') == False:
for pcb_layer in utils.getSurfaceLayers():
footprint_shapes[sheet][pcb_layer] = []
#----------------------------------------------------------
# Add silkscreen and assembly reference designator (refdef)
#----------------------------------------------------------
for sheet in ['silkscreen','assembly']:
try:
refdef_dict = component[sheet].get('refdef') or {}
except:
refdef_dict = {}
if refdef_dict.get('show') != False:
layer = refdef_dict.get('layer') or 'top'
# Rotate the refdef; if unspecified the rotation is the same as
# the rotation of the component
refdef_dict['rotate'] = refdef_dict.get('rotate') or 0
# Sometimes you'd want to keep all refdefs at the same angle
# and not rotated with the component
if refdef_dict.get('rotate-with-component') != False:
refdef_dict['rotate'] += self._rotate
refdef_dict['rotate-point'] = utils.toPoint(refdef_dict.get('rotate-point')) or self._rotate_point
refdef_dict['location'] = refdef_dict.get('location') or [0, 0]
refdef_dict['type'] = 'text'
refdef_dict['value'] = refdef_dict.get('value') or refdef
refdef_dict['font-family'] = (refdef_dict.get('font-family') or
config.stl['layout'][sheet]['refdef'].get('font-family') or
config.stl['defaults']['font-family'])
refdef_dict['font-size'] = (refdef_dict.get('font-size') or
config.stl['layout'][sheet]['refdef'].get('font-size') or
"2mm")
refdef_shape = Shape(refdef_dict)
refdef_shape.is_refdef = True
refdef_shape.rotateLocation(self._rotate, self._rotate_point)
style = Style(refdef_dict, sheet, 'refdef')
refdef_shape.setStyle(style)
# Add the refdef to the silkscreen/assembly list. It's
# important that this is added at the very end since the
# placement process assumes the refdef is last
try:
footprint_shapes[sheet][layer]
except:
footprint_shapes[sheet][layer] = []
footprint_shapes[sheet][layer].append(refdef_shape)
#------------------------------------------------------
# Invert layers
#------------------------------------------------------
# If the placement is on the bottom of the baord then we need
# to invert the placement of all components. This affects the
# surface laters but also internal layers
if self._layer == 'bottom':
layers = config.stk['layer-names']
for sheet in ['conductor', 'pours', 'soldermask', 'solderpaste', 'silkscreen', 'assembly']:
sheet_dict = footprint_shapes[sheet]
sheet_dict_new = {}
for i, pcb_layer in enumerate(layers):
try:
sheet_dict_new[layers[len(layers)-i-1]] = copy.copy(sheet_dict[pcb_layer])
except:
continue
footprint_shapes[sheet] = copy.copy(sheet_dict_new)
self._footprint_shapes = footprint_shapes
0
Example 87
Project: dolo Source File: portfolio_perturbation.py
def solve_portfolio_model(model, pf_names, order=2, lambda_name='lam', guess=None):
from dolo.compiler.compiler_python import GModel
if isinstance(model, GModel):
model = model.model
pf_model = model
from dolo import Variable, Parameter, Equation
import re
n_states = len(pf_model.symbols_s['states'])
states = pf_model.symbols_s['states']
steady_states = [Parameter(v.name+'_bar') for v in pf_model.symbols_s['states']]
n_pfs = len(pf_names)
pf_vars = [Variable(v) for v in pf_names]
res_vars = [Variable('res_'+str(i)) for i in range(n_pfs)]
pf_parms = [Parameter('K_'+str(i)) for i in range(n_pfs)]
pf_dparms = [[Parameter('K_'+str(i)+'_'+str(j)) for j in range(n_states)] for i in range(n_pfs)]
from sympy import Matrix
# creation of the new model
import copy
new_model = copy.copy(pf_model)
new_model.symbols_s['controls'] += res_vars
for v in res_vars + pf_vars:
new_model.calibration_s[v] = 0
new_model.symbols_s['parameters'].extend(steady_states)
for p in pf_parms + Matrix(pf_dparms)[:]:
new_model.symbols_s['parameters'].append(p)
new_model.calibration_s[p] = 0
compregex = re.compile('(.*)<=(.*)<=(.*)')
to_be_added_1 = []
to_be_added_2 = []
expressions = Matrix(pf_parms) + Matrix(pf_dparms)*( Matrix(states) - Matrix(steady_states))
for n,eq in enumerate(new_model.equations_groups['arbitrage']):
if 'complementarity' in eq.tags:
tg = eq.tags['complementarity']
[lhs,mhs,rhs] = compregex.match(tg).groups()
mhs = new_model.eval_string(mhs)
else:
mhs = None
if mhs in pf_vars:
i = pf_vars.index(mhs)
neq = Equation(mhs, expressions[i])
neq.tag(**eq.tags)
eq_res = Equation(eq.gap, res_vars[i])
eq_res.tag(eq_type='arbitrage')
to_be_added_2.append(eq_res)
new_model.equations_groups['arbitrage'][n] = neq
to_be_added_1.append(neq)
# new_model.equations_groups['arbitrage'].extend(to_be_added_1)
new_model.equations_groups['arbitrage'].extend(to_be_added_2)
new_model.update()
print("number of equations {}".format(len(new_model.equations)))
print("number of arbitrage equations {}".format( len(new_model.equations_groups['arbitrage'])) )
print('parameters_ordering')
print("number of parameters {}".format(new_model.symbols['parameters']))
print("number of parameters {}".format(new_model.parameters))
# now, we need to solve for the optimal portfolio coefficients
from trash.dolo.numeric.perturbations_to_states import approximate_controls
dr = approximate_controls(new_model)
print('ok')
import numpy
n_controls = len(model.symbols_s['controls'])
def constant_residuals(x, return_dr=False):
d = {}
for i in range(n_pfs):
p = pf_parms[i]
v = pf_vars[i]
d[p] = x[i]
d[v] = x[i]
new_model.set_calibration(d)
# new_model.parameters_values[p] = x[i]
# new_model.init_values[v] = x[i]
if return_dr:
dr = approximate_controls(new_model, order=1, return_dr=True, lambda_name='lam')
return dr
X_bar, X_s, X_ss = approximate_controls(new_model, order=2, return_dr=False, lambda_name="lam")
return X_bar[n_controls-n_pfs:n_controls]
if guess is not None:
x0 = numpy.array(guess)
else:
x0 = numpy.zeros(n_pfs)
print('Zero order portfolios')
print('Initial guess: {}'.format(x0))
print('Initial error: {}'.format( constant_residuals(x0) ))
portfolios_0 = solver(constant_residuals, x0)
print('Solution: {}'.format(portfolios_0))
print('Final error: {}'.format( constant_residuals(portfolios_0) ))
if order == 1:
dr = constant_residuals(portfolios_0, return_dr=True)
return dr
def dynamic_residuals(X, return_dr=False):
x = X[:,0]
dx = X[:,1:]
d = {}
for i in range(n_pfs):
p = pf_parms[i]
v = pf_vars[i]
d[p] = x[i]
d[v] = x[i]
for j in range(n_states):
d[pf_dparms[i][j]] = dx[i,j]
new_model.set_calibration(d)
if return_dr:
dr = approximate_controls(new_model, order=2, lambda_name='lam')
return dr
else:
[X_bar, X_s, X_ss, X_sss] = approximate_controls(new_model, order=3, return_dr=False, lambda_name='lam')
crit = numpy.column_stack([
X_bar[n_controls-n_pfs:n_controls],
X_s[n_controls-n_pfs:n_controls,:],
])
return crit
y0 = numpy.column_stack([x0, numpy.zeros((n_pfs, n_states))])
print('Initial error:')
err = (dynamic_residuals(y0))
print( abs(err).max() )
portfolios_1 = solver(dynamic_residuals, y0)
print('First order portfolios : ')
print(portfolios_1)
print('Final error:')
print(dynamic_residuals(portfolios_1))
dr = dynamic_residuals(portfolios_1, return_dr=True)
# TODO: remove coefficients of criteria
return dr
0
Example 88
Project: nodewatcher Source File: base.py
def generate_form_for_class(context, prefix, data, index, instance=None,
force_selector_widget=False, static=False):
"""
A helper function for generating a form for a specific registry item class.
"""
selected_item = instance.__class__ if instance is not None else None
previous_item = None
existing_mid = (instance.pk if instance is not None else 0) or 0
# Parse a form that holds the item selector
meta_form = RegistryMetaForm(
context, selected_item, data=data,
prefix=context.get_prefix(prefix),
force_selector_widget=force_selector_widget,
static=static, instance_mid=existing_mid,
)
if not (context.flags & FORM_INITIAL) and not static:
if not meta_form.is_valid():
context.validation_errors = True
else:
selected_item = context.items.get(meta_form.cleaned_data['item'])
previous_item = context.items.get(meta_form.cleaned_data['prev_item'])
existing_mid = meta_form.cleaned_data['mid']
# Fallback to default item in case of severe problems (this should not happen in normal
# operation, but might happen when someone tampers with the form)
if selected_item is None:
selected_item = context.items.values()[0]
# Items have changed between submissions, we should copy some field values from the
# previous form to the new one
if previous_item is not None and selected_item != previous_item and not static:
pform = previous_item._registry.get_form_class()(
data,
prefix=context.get_prefix(prefix, previous_item),
)
# Perform a partial clean and copy all valid fields to the new form
pform.cleaned_data = {}
pform._errors = {}
pform._clean_fields()
for field in pform.cleaned_data.keys():
prev_item_field = context.get_prefix(prefix, previous_item, field)
if prev_item_field in data:
data[context.get_prefix(prefix, selected_item, field)] = data[prev_item_field]
# When there is no instance, we should create one so we will be able to save somewhere
if not (context.flags & FORM_INITIAL) and context.flags & FORM_OUTPUT and instance is None:
# Check if we can reuse an existing instance
existing_instance = context.existing_models.get(existing_mid, None)
if isinstance(existing_instance, selected_item):
instance = existing_instance
instance._skip_delete = True
else:
instance = selected_item(root=context.root)
# Populate data with default values from the registry item instance
if selected_item != previous_item and instance is not None:
model_data = django_forms.model_to_dict(instance)
for field_name, values in model_data.iteritems():
field_name_prefix = context.get_prefix(prefix, selected_item, field_name)
if data is not None and field_name_prefix not in data:
context.data_from_field(prefix, selected_item, field_name, values, data)
# Ensure that the instance root is properly set.
if instance is not None:
instance.root = context.root
# Now generate a form for the selected item
form_prefix = context.get_prefix(prefix, selected_item)
form = selected_item._registry.get_form_class()(
data,
instance=instance,
prefix=form_prefix,
)
# Fetch the current item in form state representation.
form_modified = False
form_attributes = {}
if context.flags & FORM_OUTPUT:
state_item = context.form_state.lookup_item(selected_item, index, context.hierarchy_parent_current)
if state_item is not None:
form_attributes['index'] = state_item._id
def modify_to_context(obj):
if not hasattr(obj, 'modify_to_context'):
return False
obj.modify_to_context(state_item, context.form_state, context.request)
return True
# Enable forms to modify themselves accoording to current context
form_modified = modify_to_context(form)
# Enable form fields to modify themselves accoording to current context
for name, field in form.fields.iteritems():
if modify_to_context(field):
form_modified = True
else:
state_item = None
config = None
if not (context.flags & FORM_INITIAL):
if context.flags & FORM_OUTPUT:
# Perform a full validation and save the form
if form.is_valid():
form_id = (instance._registry.registry_id, id(context.hierarchy_parent_obj), index)
assert form_id not in context.pending_save_forms
# Setup dependencies among forms
dependencies = set()
for name, field in form.fields.iteritems():
if hasattr(field, 'get_dependencies'):
value = field.widget.value_from_datadict(form.data, form.files, form.add_prefix(name))
dependencies.update(field.get_dependencies(value))
# If we have a parent, we depend on it
if context.hierarchy_parent_obj is not None:
parent_id = (
context.hierarchy_parent_obj._registry.registry_id,
id(context.hierarchy_grandparent_obj),
context.hierarchy_parent_index,
)
dependencies.add(parent_id)
context.pending_save_foreign_keys.setdefault(parent_id, []).append(
(form_id, selected_item._registry.item_parent_field.name)
)
# Add form to list of forms pending save together with dependencies
context.pending_save_forms[form_id] = {
'registry_id': instance._registry.registry_id,
'index': index,
'form': form,
'form_id': form_id,
'dependencies': dependencies,
}
else:
context.validation_errors = True
# Update the current config item as it may have changed due to modify_to_context calls.
if form_modified and state_item is not None:
pform = copy.copy(form)
pform.cleaned_data = {}
pform._errors = {}
pform._clean_fields()
for field in state_item._meta.fields:
if not field.editable or field.rel is not None:
continue
try:
setattr(state_item, field.name, pform.cleaned_data.get(field.name, None))
except AttributeError:
pass
else:
# We are only interested in all the current values even if they might be incomplete
# and/or invalid, so we can't do full form validation.
form.cleaned_data = {}
form._errors = {}
form._clean_fields()
config = context.form_state.create_item(selected_item, form.cleaned_data, context.hierarchy_parent_partial, index)
# Generate a new meta form, since the previous item has now changed
meta_form = RegistryMetaForm(
context, selected_item,
prefix=context.get_prefix(prefix),
force_selector_widget=force_selector_widget,
static=static, instance_mid=existing_mid,
)
# Pack forms into a proper abstract representation.
if selected_item._registry.has_children():
sub_context = RegistryFormContext(
regpoint=context.regpoint,
request=context.request,
root=context.root,
data=context.data,
save=context.save,
hierarchy_prefix=form_prefix,
hierarchy_parent_cls=selected_item,
hierarchy_parent_obj=instance,
hierarchy_parent_partial=config,
hierarchy_parent_current=state_item,
hierarchy_parent_index=index,
hierarchy_grandparent_obj=context.hierarchy_parent_obj,
validation_errors=False,
pending_save_forms=context.pending_save_forms,
pending_save_foreign_keys=context.pending_save_foreign_keys,
form_state=context.form_state,
flags=context.flags,
)
forms = NestedRegistryRenderItem(form, meta_form, prepare_forms(sub_context), **form_attributes)
# Validation errors flag must propagate upwards
if sub_context.validation_errors:
context.validation_errors = True
else:
forms = BasicRegistryRenderItem(form, meta_form, **form_attributes)
return forms
0
Example 89
Project: pyelftools Source File: lineprogram.py
def _decode_line_program(self):
entries = []
state = LineState(self.header['default_is_stmt'])
def add_entry_new_state(cmd, args, is_extended=False):
# Add an entry that sets a new state.
# After adding, clear some state registers.
entries.append(LineProgramEntry(
cmd, is_extended, args, copy.copy(state)))
state.basic_block = False
state.prologue_end = False
state.epilogue_begin = False
def add_entry_old_state(cmd, args, is_extended=False):
# Add an entry that doesn't visibly set a new state
entries.append(LineProgramEntry(cmd, is_extended, args, None))
offset = self.program_start_offset
while offset < self.program_end_offset:
opcode = struct_parse(
self.structs.Dwarf_uint8(''),
self.stream,
offset)
# As an exercise in avoiding premature optimization, if...elif
# chains are used here for standard and extended opcodes instead
# of dispatch tables. This keeps the code much cleaner. Besides,
# the majority of instructions in a typical program are special
# opcodes anyway.
if opcode >= self.header['opcode_base']:
# Special opcode (follow the recipe in 6.2.5.1)
maximum_operations_per_instruction = self['maximum_operations_per_instruction']
adjusted_opcode = opcode - self['opcode_base']
operation_advance = adjusted_opcode // self['line_range']
address_addend = (
self['minimum_instruction_length'] *
((state.op_index + operation_advance) //
maximum_operations_per_instruction))
state.address += address_addend
state.op_index = (state.op_index + operation_advance) % maximum_operations_per_instruction
line_addend = self['line_base'] + (adjusted_opcode % self['line_range'])
state.line += line_addend
add_entry_new_state(
opcode, [line_addend, address_addend, state.op_index])
elif opcode == 0:
# Extended opcode: start with a zero byte, followed by
# instruction size and the instruction itself.
inst_len = struct_parse(self.structs.Dwarf_uleb128(''),
self.stream)
ex_opcode = struct_parse(self.structs.Dwarf_uint8(''),
self.stream)
if ex_opcode == DW_LNE_end_sequence:
state.end_sequence = True
add_entry_new_state(ex_opcode, [], is_extended=True)
# reset state
state = LineState(self.header['default_is_stmt'])
elif ex_opcode == DW_LNE_set_address:
operand = struct_parse(self.structs.Dwarf_target_addr(''),
self.stream)
state.address = operand
add_entry_old_state(ex_opcode, [operand], is_extended=True)
elif ex_opcode == DW_LNE_define_file:
operand = struct_parse(
self.structs.Dwarf_lineprog_file_entry, self.stream)
self['file_entry'].append(operand)
add_entry_old_state(ex_opcode, [operand], is_extended=True)
else:
# Unknown, but need to roll forward the stream because the
# length is specified. Seek forward inst_len - 1 because
# we've already read the extended opcode, which takes part
# in the length.
self.stream.seek(inst_len - 1, os.SEEK_CUR)
else: # 0 < opcode < opcode_base
# Standard opcode
if opcode == DW_LNS_copy:
add_entry_new_state(opcode, [])
elif opcode == DW_LNS_advance_pc:
operand = struct_parse(self.structs.Dwarf_uleb128(''),
self.stream)
address_addend = (
operand * self.header['minimum_instruction_length'])
state.address += address_addend
add_entry_old_state(opcode, [address_addend])
elif opcode == DW_LNS_advance_line:
operand = struct_parse(self.structs.Dwarf_sleb128(''),
self.stream)
state.line += operand
elif opcode == DW_LNS_set_file:
operand = struct_parse(self.structs.Dwarf_uleb128(''),
self.stream)
state.file = operand
add_entry_old_state(opcode, [operand])
elif opcode == DW_LNS_set_column:
operand = struct_parse(self.structs.Dwarf_uleb128(''),
self.stream)
state.column = operand
add_entry_old_state(opcode, [operand])
elif opcode == DW_LNS_negate_stmt:
state.is_stmt = not state.is_stmt
add_entry_old_state(opcode, [])
elif opcode == DW_LNS_set_basic_block:
state.basic_block = True
add_entry_old_state(opcode, [])
elif opcode == DW_LNS_const_add_pc:
adjusted_opcode = 255 - self['opcode_base']
address_addend = ((adjusted_opcode // self['line_range']) *
self['minimum_instruction_length'])
state.address += address_addend
add_entry_old_state(opcode, [address_addend])
elif opcode == DW_LNS_fixed_advance_pc:
operand = struct_parse(self.structs.Dwarf_uint16(''),
self.stream)
state.address += operand
add_entry_old_state(opcode, [operand])
elif opcode == DW_LNS_set_prologue_end:
state.prologue_end = True
add_entry_old_state(opcode, [])
elif opcode == DW_LNS_set_epilogue_begin:
state.epilogue_begin = True
add_entry_old_state(opcode, [])
elif opcode == DW_LNS_set_isa:
operand = struct_parse(self.structs.Dwarf_uleb128(''),
self.stream)
state.isa = operand
add_entry_old_state(opcode, [operand])
else:
dwarf_assert(False, 'Invalid standard line program opcode: %s' % (
opcode,))
offset = self.stream.tell()
return entries
0
Example 90
Project: Attentive_reader Source File: attentive_reader.py
def train(dim_word_desc=400,# word vector dimensionality
dim_word_q=400,
dim_word_ans=600,
dim_proj=300,
dim=400,# the number of LSTM units
encoder_desc='lstm',
encoder_desc_word='lstm',
encoder_desc_sent='lstm',
use_dq_sims=False,
eyem=None,
learn_h0=False,
use_desc_skip_c_g=False,
debug=False,
encoder_q='lstm',
patience=10,
max_epochs=5000,
dispFreq=100,
decay_c=0.,
alpha_c=0.,
clip_c=-1.,
lrate=0.01,
n_words_q=49145,
n_words_desc=115425,
n_words_ans=409,
pkl_train_files=None,
pkl_valid_files=None,
maxlen=2000, # maximum length of the description
optimizer='rmsprop',
batch_size=2,
vocab=None,
valid_batch_size=16,
use_elu_g=False,
saveto='model.npz',
model_dir=None,
ms_nlayers=3,
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
datasets=[None],
truncate=400,
momentum=0.9,
use_bidir=False,
cost_mask=None,
valid_datasets=['/u/yyu/stor/caglar/rc-data/cnn/cnn_test_data.h5',
'/u/yyu/stor/caglar/rc-data/cnn/cnn_valid_data.h5'],
dropout_rate=0.5,
use_dropout=True,
reload_=True,
**opt_ds):
ensure_dir_exists(model_dir)
mpath = os.path.join(model_dir, saveto)
mpath_best = os.path.join(model_dir, prfx("best", saveto))
mpath_last = os.path.join(model_dir, prfx("last", saveto))
mpath_stats = os.path.join(model_dir, prfx("stats", saveto))
# Model options
model_options = locals().copy()
model_options['use_sent_reps'] = opt_ds['use_sent_reps']
stats = defaultdict(list)
del model_options['eyem']
del model_options['cost_mask']
if cost_mask is not None:
cost_mask = sharedX(cost_mask)
# reload options and parameters
if reload_:
print "Reloading the model."
if os.path.exists(mpath_best):
print "Reloading the best model from %s." % mpath_best
with open(os.path.join(mpath_best, '%s.pkl' % mpath_best), 'rb') as f:
models_options = pkl.load(f)
params = init_params(model_options)
params = load_params(mpath_best, params)
elif os.path.exists(mpath):
print "Reloading the model from %s." % mpath
with open(os.path.join(mpath, '%s.pkl' % mpath), 'rb') as f:
models_options = pkl.load(f)
params = init_params(model_options)
params = load_params(mpath, params)
else:
raise IOError("Couldn't open the file.")
else:
print "Couldn't reload the models initializing from scratch."
params = init_params(model_options)
if datasets[0]:
print "Short dataset", datasets[0]
print 'Loading data'
print 'Building model'
if pkl_train_files is None or pkl_valid_files is None:
train, valid, test = load_data(path=datasets[0],
valid_path=valid_datasets[0],
test_path=valid_datasets[1],
batch_size=batch_size,
**opt_ds)
else:
train, valid, test = load_pkl_data(train_file_paths=pkl_train_files,
valid_file_paths=pkl_valid_files,
batch_size=batch_size,
vocab=vocab,
eyem=eyem,
**opt_ds)
tparams = init_tparams(params)
trng, use_noise, inps_d, \
opt_ret, \
cost, errors, ent_errors, ent_derrors, probs = \
build_model(tparams,
model_options,
prepare_data if not opt_ds['use_sent_reps'] \
else prepare_data_sents,
valid,
cost_mask=cost_mask)
alphas = opt_ret['dec_alphas']
if opt_ds['use_sent_reps']:
inps = [inps_d["desc"], \
inps_d["word_mask"], \
inps_d["q"], \
inps_d['q_mask'], \
inps_d['ans'], \
inps_d['wlen'],
inps_d['slen'], inps_d['qlen'],\
inps_d['ent_mask']
]
else:
inps = [inps_d["desc"], \
inps_d["word_mask"], \
inps_d["q"], \
inps_d['q_mask'], \
inps_d['ans'], \
inps_d['wlen'], \
inps_d['qlen'], \
inps_d['ent_mask']]
outs = [cost, errors, probs, alphas]
if ent_errors:
outs += [ent_errors]
if ent_derrors:
outs += [ent_derrors]
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, outs, profile=profile)
print 'Done'
# Apply weight decay on the feed-forward connections
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
if "logit" in kk or "ff" in kk:
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# after any regularizer
print 'Computing gradient...',
grads = safe_grad(cost, itemlist(tparams))
print 'Done'
# Gradient clipping:
if clip_c > 0.:
g2 = get_norms(grads)
for p, g in grads.iteritems():
grads[p] = tensor.switch(g2 > (clip_c**2),
(g / tensor.sqrt(g2 + 1e-8)) * clip_c,
g)
inps.pop()
if optimizer.lower() == "adasecant":
learning_rule = Adasecant(delta_clip=25.0,
use_adagrad=True,
grad_clip=0.25,
gamma_clip=0.)
elif optimizer.lower() == "rmsprop":
learning_rule = RMSPropMomentum(init_momentum=momentum)
elif optimizer.lower() == "adam":
learning_rule = Adam()
elif optimizer.lower() == "adadelta":
learning_rule = AdaDelta()
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
learning_rule = None
if learning_rule:
f_grad_shared, f_update = learning_rule.get_funcs(learning_rate=lr,
grads=grads,
inp=inps,
cost=cost,
errors=errors)
else:
f_grad_shared, f_update = eval(optimizer)(lr,
tparams,
grads,
inps,
cost,
errors)
print 'Done'
print 'Optimization'
history_errs = []
# reload history
if reload_ and os.path.exists(mpath):
history_errs = list(numpy.load(mpath)['history_errs'])
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
best_found = False
uidx = 0
estop = False
train_cost_ave, train_err_ave, \
train_gnorm_ave = reset_train_vals()
for eidx in xrange(max_epochs):
n_samples = 0
if train.done:
train.reset()
for d_, q_, a, em in train:
n_samples += len(a)
uidx += 1
use_noise.set_value(1.)
if opt_ds['use_sent_reps']:
# To mask the description and the question.
d, d_mask, q, q_mask, dlen, slen, qlen = prepare_data_sents(d_,
q_)
if d is None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
cost, errors, gnorm, pnorm = f_grad_shared(d,
d_mask,
q,
q_mask,
a,
dlen,
slen,
qlen)
else:
d, d_mask, q, q_mask, dlen, qlen = prepare_data(d_, q_)
if d is None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
cost, errors, gnorm, pnorm = f_grad_shared(d, d_mask,
q, q_mask,
a,
dlen,
qlen)
upnorm = f_update(lrate)
ud = time.time() - ud_start
# Collect the running ave train stats.
train_cost_ave = running_ave(train_cost_ave,
cost)
train_err_ave = running_ave(train_err_ave,
errors)
train_gnorm_ave = running_ave(train_gnorm_ave,
gnorm)
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
import ipdb; ipdb.set_trace()
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, ' Update ', uidx, \
' Cost ', cost, ' UD ', ud, \
' UpNorm ', upnorm[0].tolist(), \
' GNorm ', gnorm, \
' Pnorm ', pnorm, 'Terrors ', errors
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
if best_p is not None and best_found:
numpy.savez(mpath_best, history_errs=history_errs, **best_p)
pkl.dump(model_options, open('%s.pkl' % mpath_best, 'wb'))
else:
params = unzip(tparams)
numpy.savez(mpath, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl' % mpath, 'wb'))
pkl.dump(stats, open("%s.pkl" % mpath_stats, 'wb'))
print 'Done'
print_param_norms(tparams)
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
if valid.done:
valid.reset()
valid_costs, valid_errs, valid_probs, \
valid_alphas, error_ent, error_dent = eval_model(f_log_probs,
prepare_data if not opt_ds['use_sent_reps'] \
else prepare_data_sents,
model_options,
valid,
use_sent_rep=opt_ds['use_sent_reps'])
valid_alphas_ = numpy.concatenate([va.argmax(0) for va in valid_alphas.tolist()], axis=0)
valid_err = valid_errs.mean()
valid_cost = valid_costs.mean()
valid_alpha_ent = -negentropy(valid_alphas)
mean_valid_alphas = valid_alphas_.mean()
std_valid_alphas = valid_alphas_.std()
mean_valid_probs = valid_probs.argmax(1).mean()
std_valid_probs = valid_probs.argmax(1).std()
history_errs.append([valid_cost, valid_err])
stats['train_err_ave'].append(train_err_ave)
stats['train_cost_ave'].append(train_cost_ave)
stats['train_gnorm_ave'].append(train_gnorm_ave)
stats['valid_errs'].append(valid_err)
stats['valid_costs'].append(valid_cost)
stats['valid_err_ent'].append(error_ent)
stats['valid_err_desc_ent'].append(error_dent)
stats['valid_alphas_mean'].append(mean_valid_alphas)
stats['valid_alphas_std'].append(std_valid_alphas)
stats['valid_alphas_ent'].append(valid_alpha_ent)
stats['valid_probs_mean'].append(mean_valid_probs)
stats['valid_probs_std'].append(std_valid_probs)
if uidx == 0 or valid_err <= numpy.array(history_errs)[:, 1].min():
best_p = unzip(tparams)
bad_counter = 0
best_found = True
else:
bst_found = False
if numpy.isnan(valid_err):
import ipdb; ipdb.set_trace()
print "============================"
print '\t>>>Valid error: ', valid_err, \
' Valid cost: ', valid_cost
print '\t>>>Valid pred mean: ', mean_valid_probs, \
' Valid pred std: ', std_valid_probs
print '\t>>>Valid alphas mean: ', mean_valid_alphas, \
' Valid alphas std: ', std_valid_alphas, \
' Valid alpha negent: ', valid_alpha_ent, \
' Valid error ent: ', error_ent, \
' Valid error desc ent: ', error_dent
print "============================"
print "Running average train stats "
print '\t>>>Train error: ', train_err_ave, \
' Train cost: ', train_cost_ave, \
' Train grad norm: ', train_gnorm_ave
print "============================"
train_cost_ave, train_err_ave, \
train_gnorm_ave = reset_train_vals()
print 'Seen %d samples' % n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid.reset()
valid_cost, valid_error, valid_probs, \
valid_alphas, error_ent = eval_model(f_log_probs,
prepare_data if not opt_ds['use_sent_reps'] \
else prepare_data_sents,
model_options, valid,
use_sent_rep=opt_ds['use_sent_rep'])
print " Final eval resuts: "
print 'Valid error: ', valid_error.mean()
print 'Valid cost: ', valid_cost.mean()
print '\t>>>Valid pred mean: ', valid_probs.mean(), \
' Valid pred std: ', valid_probs.std(), \
' Valid error ent: ', error_ent
params = copy.copy(best_p)
numpy.savez(mpath_last,
zipped_params=best_p,
history_errs=history_errs,
**params)
return valid_err, valid_cost
0
Example 91
Project: evennia Source File: cmdhandler.py
@inlineCallbacks
def cmdhandler(called_by, raw_string, _testing=False, callertype="session", session=None, **kwargs):
"""
This is the main mechanism that handles any string sent to the engine.
Args:
called_by (Session, Player or Object): Object from which this
command was called. which this was called from. What this is
depends on the game state.
raw_string (str): The command string as given on the command line.
_testing (bool, optional): Used for debug purposes and decides if we
should actually execute the command or not. If True, the
command instance will be returned.
callertype (str, optional): One of "session", "player" or
"object". These are treated in decending order, so when the
Session is the caller, it will merge its own cmdset into
cmdsets from both Player and eventual puppeted Object (and
cmdsets in its room etc). A Player will only include its own
cmdset and the Objects and so on. Merge order is the same
order, so that Object cmdsets are merged in last, giving them
precendence for same-name and same-prio commands.
session (Session, optional): Relevant if callertype is "player" - the session will help
retrieve the correct cmdsets from puppeted objects.
Kwargs:
kwargs (any): other keyword arguments will be assigned as named variables on the
retrieved command object *before* it is executed. This is unused
in default Evennia but may be used by code to set custom flags or
special operating conditions for a command as it executes.
Returns:
deferred (Deferred): This deferred is fired with the return
value of the command's `func` method. This is not used in
default Evennia.
"""
@inlineCallbacks
def _run_command(cmd, cmdname, args):
"""
Helper function: This initializes and runs the Command
instance once the parser has identified it as either a normal
command or one of the system commands.
Args:
cmd (Command): command object
cmdname (str): name of command
args (str): extra text entered after the identified command
Returns:
deferred (Deferred): this will fire with the return of the
command's `func` method.
Raises:
RuntimeError: If command recursion limit was reached.
"""
global _COMMAND_NESTING
try:
# Assign useful variables to the instance
cmd.caller = caller
cmd.cmdstring = cmdname
cmd.args = args
cmd.cmdset = cmdset
cmd.session = session
cmd.player = player
cmd.raw_string = unformatted_raw_string
#cmd.obj # set via on-object cmdset handler for each command,
# since this may be different for every command when
# merging multuple cmdsets
if hasattr(cmd, 'obj') and hasattr(cmd.obj, 'scripts'):
# cmd.obj is automatically made available by the cmdhandler.
# we make sure to validate its scripts.
yield cmd.obj.scripts.validate()
if _testing:
# only return the command instance
returnValue(cmd)
# assign custom kwargs to found cmd object
for key, val in kwargs.items():
setattr(cmd, key, val)
_COMMAND_NESTING[called_by] += 1
if _COMMAND_NESTING[called_by] > _COMMAND_RECURSION_LIMIT:
err = _ERROR_RECURSION_LIMIT.format(recursion_limit=_COMMAND_RECURSION_LIMIT,
raw_string=unformatted_raw_string,
cmdclass=cmd.__class__)
raise RuntimeError(err)
# pre-command hook
abort = yield cmd.at_pre_cmd()
if abort:
# abort sequence
returnValue(abort)
# Parse and execute
yield cmd.parse()
# main command code
# (return value is normally None)
ret = yield cmd.func()
# post-command hook
yield cmd.at_post_cmd()
if cmd.save_for_next:
# store a reference to this command, possibly
# accessible by the next command.
caller.ndb.last_cmd = yield copy(cmd)
else:
caller.ndb.last_cmd = None
# return result to the deferred
returnValue(ret)
except Exception:
_msg_err(caller, _ERROR_UNTRAPPED)
raise ErrorReported
finally:
_COMMAND_NESTING[called_by] -= 1
raw_string = to_unicode(raw_string, force_string=True)
session, player, obj = session, None, None
if callertype == "session":
session = called_by
player = session.player
obj = session.puppet
elif callertype == "player":
player = called_by
if session:
obj = yield session.puppet
elif callertype == "object":
obj = called_by
else:
raise RuntimeError("cmdhandler: callertype %s is not valid." % callertype)
# the caller will be the one to receive messages and excert its permissions.
# we assign the caller with preference 'bottom up'
caller = obj or player or session
# The error_to is the default recipient for errors. Tries to make sure a player
# does not get spammed for errors while preserving character mirroring.
error_to = obj or session or player
try: # catch bugs in cmdhandler itself
try: # catch special-type commands
cmdset = yield get_and_merge_cmdsets(caller, session, player, obj,
callertype)
if not cmdset:
# this is bad and shouldn't happen.
raise NoCmdSets
unformatted_raw_string = raw_string
raw_string = raw_string.strip()
if not raw_string:
# Empty input. Test for system command instead.
syscmd = yield cmdset.get(CMD_NOINPUT)
sysarg = ""
raise ExecSystemCommand(syscmd, sysarg)
# Parse the input string and match to available cmdset.
# This also checks for permissions, so all commands in match
# are commands the caller is allowed to call.
matches = yield _COMMAND_PARSER(raw_string, cmdset, caller)
# Deal with matches
if len(matches) > 1:
# We have a multiple-match
syscmd = yield cmdset.get(CMD_MULTIMATCH)
sysarg = _("There were multiple matches.")
if syscmd:
# use custom CMD_MULTIMATCH
syscmd.matches = matches
else:
# fall back to default error handling
sysarg = yield _SEARCH_AT_RESULT([match[2] for match in matches], caller, query=match[0])
raise ExecSystemCommand(syscmd, sysarg)
if len(matches) == 1:
# We have a unique command match. But it may still be invalid.
match = matches[0]
cmdname, args, cmd = match[0], match[1], match[2]
if not matches:
# No commands match our entered command
syscmd = yield cmdset.get(CMD_NOMATCH)
if syscmd:
# use custom CMD_NOMATCH command
sysarg = raw_string
else:
# fallback to default error text
sysarg = _("Command '%s' is not available.") % raw_string
suggestions = string_suggestions(raw_string,
cmdset.get_all_cmd_keys_and_aliases(caller),
cutoff=0.7, maxnum=3)
if suggestions:
sysarg += _(" Maybe you meant %s?") % utils.list_to_string(suggestions, _('or'), addquote=True)
else:
sysarg += _(" Type \"help\" for help.")
raise ExecSystemCommand(syscmd, sysarg)
# Check if this is a Channel-cmd match.
if hasattr(cmd, 'is_channel') and cmd.is_channel:
# even if a user-defined syscmd is not defined, the
# found cmd is already a system command in its own right.
syscmd = yield cmdset.get(CMD_CHANNEL)
if syscmd:
# replace system command with custom version
cmd = syscmd
cmd.session = session
sysarg = "%s:%s" % (cmdname, args)
raise ExecSystemCommand(cmd, sysarg)
# A normal command.
ret = yield _run_command(cmd, cmdname, args)
returnValue(ret)
except ErrorReported:
# this error was already reported, so we
# catch it here and don't pass it on.
pass
except ExecSystemCommand as exc:
# Not a normal command: run a system command, if available,
# or fall back to a return string.
syscmd = exc.syscmd
sysarg = exc.sysarg
if syscmd:
ret = yield _run_command(syscmd, syscmd.key, sysarg)
returnValue(ret)
elif sysarg:
# return system arg
error_to.msg(exc.sysarg)
except NoCmdSets:
# Critical error.
logger.log_err("No cmdsets found: %s" % caller)
error_to.msg(_ERROR_NOCMDSETS)
except Exception:
# We should not end up here. If we do, it's a programming bug.
_msg_err(error_to, _ERROR_UNTRAPPED)
except Exception:
# This catches exceptions in cmdhandler exceptions themselves
_msg_err(error_to, _ERROR_CMDHANDLER)
0
Example 92
Project: Ultros Source File: manager.py
@inlineCallbacks
def load_plugins(self, plugins, output=True):
"""
Attempt to load up all plugins specified in a list
This is intended to do the primary plugin load operation on startup,
using the plugin names specified in the configuration.
Plugin names are not case-sensitive.
:param plugins: List of plugin names to look for
:param output: Whether to output errors and other messages to the log
:type plugins: list
:type output: bool
:returns: A Deferred, resulting in no value
:rtype: Deferred
"""
self.loaders["python"].setup()
# Plugins still needing loaded
to_load = []
# Final, ordered, list of plugins to load
load_order = []
# Get plugin info objects, etc.
for name in plugins:
name = name.lower()
if name not in self.info_objects:
if output:
self.log.warning("Unknown plugin: %s" % name)
continue
info = self.info_objects[name]
# Store the list of deps separately so we can keep track of the
# unmet ones, for logging later on
to_load.append((info, [i.lower() for i in info.dependencies]))
# Determine order
has_loaded = True
while len(to_load) > 0 and has_loaded:
has_loaded = False
# Iterate backwards so we can remove items
for x in xrange(len(to_load) - 1, -1, -1):
info = to_load[x][0]
deps = to_load[x][1]
self.log.trace(
"Checking dependencies for plugins: %s" % info.name
)
for i, dep in enumerate(copy(deps)):
if " " in dep:
dep_name, dep_operator, dep_version = dep.split(" ")
else:
dep_name = dep
dep_operator = None
dep_version = None
operator_func = OPERATORS.get(
dep_operator, MISSING_OPERATOR
)
if dep_version:
parsed_dep_version = StrictVersion(dep_version)
else:
parsed_dep_version = dep_version
for loaded in load_order:
# I know this isn't super efficient, but it doesn't
# matter, it's a tiny list. This comment exists
# purely for myself.
if loaded.name.lower() == dep_name:
self.log.trace("Found a dependency")
loaded_version = StrictVersion(loaded.version)
if not operator_func(
loaded_version, parsed_dep_version
):
break
deps[i] = None
if deps.count(None) == len(deps):
self.log.trace("No more deps")
break
self.log.trace(deps)
while None in deps:
deps.remove(None)
if len(deps) == 0:
# No outstanding dependencies - safe to load
self.log.trace(
"All dependencies met, adding to load queue."
)
load_order.append(info)
del to_load[x]
has_loaded = True
# Deal with unloadable plugins
if len(to_load) > 0:
for plugin in to_load:
self.log.warning(
'Unable to load plugin "%s" due to failed dependencies: '
'%s' %
(
plugin[0].name,
", ".join(plugin[1])
)
)
did_load = []
# Deal with loadable plugins
for info in load_order:
self.log.debug("Loading plugin: %s" % info.name)
result = yield self.load_plugin(info.name)
if result is PluginState.LoadError:
self.log.debug("LoadError")
pass # Already output by load_plugin
elif result is PluginState.UnknownType:
self.log.debug("UnknownType")
pass # Already output by load_plugin
elif result is PluginState.NotExists:
self.log.warning(
"Plugin state: NotExists (This should never happen)"
)
elif result is PluginState.Loaded:
if output:
self.log.info(
"Loaded plugin: %s v%s by %s" % (
info.name,
info.version,
info.author
)
)
did_load.append(info.name)
elif result is PluginState.AlreadyLoaded:
if output:
self.log.warning("Plugin already loaded: %s" % info.name)
elif result is PluginState.Unloaded: # Can actually happen now
self.log.warn("Plugin unloaded: %s" % info.name)
self.log.warn("This means the plugin disabled itself - did "
"it output anything on its own?")
elif result is PluginState.DependencyMissing:
self.log.debug("DependencyMissing")
self.log.info("Loaded {} plugins: {}".format(
len(did_load), ", ".join(sorted(did_load))
))
0
Example 93
Project: fos-legacy Source File: plots.py
def init(self):
tracks.angle_table_index=0
tracks.anglex = 0.
tracks.angley = 0.
tracks.anglez = 0.
empty = tracks.Empty()
empty.init()
ghost = tracks.Ghost()
ghost.init()
global csurf
#devel06
#csurfr ='/home/ian/Data/dipy/rh.pial.vtk'
#csurfl ='/home/ian/Data/dipy/lh.pial.vtk'
#devel07
csurfr ='/home/eg01/Data_Backup/Data/Adam/multiple_transp_volumes/freesurfer_trich/rh.pial.vtk'
csurfl ='/home/eg01/Data_Backup/Data/Adam/multiple_transp_volumes/freesurfer_trich/lh.pial.vtk'
csurfr = cortex.CorticalSurface(csurfr, angle_table=tracks.angle_table)
csurfl = cortex.CorticalSurface(csurfl, angle_table=tracks.angle_table)
csurfr.fadeout = True
csurfl.fadeout = True
csurfr.fadeout_speed = 0.001
csurfl.fadeout_speed = 0.001
csurfr.orbit_demo = True
csurfr.orbit_anglez_rate = 1.
csurfl.orbit_demo = True
csurfl.orbit_anglez_rate = 1.
csurfr.orbit_anglex_rate = -.1
csurfl.orbit_anglex_rate = -.1
csurfr.position[2]+=10
csurfl.position[2]+=10
csurfr.init()
csurfl.init()
#'''
import copy
global csurfR
global csurfL
csurfR = copy.copy(csurfr)
csurfL = copy.copy(csurfl)
csurfR.fadeout = False
csurfL.fadeout = False
csurfR.init()
csurfL.init()
global tb1
#devel06
#tb1_fname='/home/ian/Data/dipy/brain2_scan1_fiber_track_mni.trk'
#devel07
tb1_fname='/home/eg01/Data_Backup/Data/PBC/pbc2009icdm/brain2/brain2_scan1_fiber_track_mni.trk'
#tb1=tracks.ChromoTracks(tb1_fname,shrink=0.99)
tb1=tracks.Tracks(tb1_fname,ang_table=True,shrink=0.99,subset=[0,20000])
tb1.angular_speed = 0.
tb1.fade_demo = True
tb1.opacity = 0.1
tb1.opacity_rate = -0.01
#tb1.fadeout = True
#tb1.fadeout_speed = 0.001
tb1.position = -tb1.mean
tb1.position[0] += 5.
tb1.position[2] += 10.#20.
tb1.manycolors = False #True
#tb1.material_color = True
tb1.orbit_demo = True #False
tb1.orbit_anglez_rate = 1.
tb1.orbit_anglex_rate = -.1
tb1.init()
global t1; t1 = self.hidden_tracks(tb1_fname,0.1*0.1,angle_table=True, data_ext=tb1.data)
global ct1; ct1 = self.hidden_tracks(tb1_fname,0.1*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
global ct6; ct6 = self.hidden_tracks(tb1_fname,1*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
#global ct7; ct7 = self.hidden_tracks(tb1_fname,0.7,angle_table=True,many_colors=True, data_ext=tb1.data)
'''
global t1; t1 = self.hidden_tracks(tb1_fname,1*0.1,angle_table=True, data_ext=tb1.data)
global t2; t2 = self.hidden_tracks(tb1_fname,.1*0.1,angle_table=True, data_ext=tb1.data)
global t3; t3 = self.hidden_tracks(tb1_fname,.05*0.1,angle_table=True, data_ext=tb1.data)
global t4; t4 = self.hidden_tracks(tb1_fname,.01*0.1,angle_table=True, data_ext=tb1.data)
#global t5; t5 = self.hidden_tracks(tb1_fname,.005*0.1,angle_table=True, data_ext=tb1.data)
#global ct1; ct1 = self.hidden_tracks(tb1_fname,.005*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
global ct2; ct2 = self.hidden_tracks(tb1_fname,.01*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
global ct3; ct3 = self.hidden_tracks(tb1_fname,.05*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
global ct4; ct4 = self.hidden_tracks(tb1_fname,.1*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
global ct5; ct5 = self.hidden_tracks(tb1_fname,.5*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
global ct6; ct6 = self.hidden_tracks(tb1_fname,1*0.1,angle_table=True,many_colors=True, data_ext=tb1.data)
'''
#'''
initial = 10*MS
delayt = 5*MS
delay = initial +delayt
self.slots={00:{'actor':empty, 'slot':(0, delayt)},
05:{'actor':ghost,'slot':( 0*MS+delayt, 800*MS+delayt )},
80:{'actor':csurfL,'slot':( 0*MS+delayt, 800*MS )},
90:{'actor':csurfR,'slot':( 0*MS+delayt, 800*MS )}}
'''
#05:{'actor':ghost,'slot':(39*MS+delay, 800*MS+delay )},
10:{'actor':tb1,'slot':( 0*MS+delay, 40*MS+delay )},
11:{'actor':csurfl,'slot':( 0*MS+delay, 40*MS+delay )},
12:{'actor':csurfr,'slot':( 0*MS+delay, 40*MS+delay )},
21:{'actor':t1,'slot':( 40*MS+delay, 41*MS+delay )},
22:{'actor':t1,'slot':( 40*MS+delay, 42*MS+delay )},
23:{'actor':t1,'slot':( 40*MS+delay, 43*MS+delay )},
31:{'actor':ct1,'slot':( 42*MS+delay, 47*MS+delay )},
32:{'actor':ct1,'slot':( 42*MS+delay, 46*MS+delay )},
33:{'actor':ct1,'slot':( 42*MS+delay, 45*MS+delay )},
34:{'actor':ct6,'slot':( 47*MS+delay, 800*MS+delay )}
#35:{'actor':ct7,'slot':( 48*MS, 800*MS )}
'''
'''
self.slots={00:{'actor':empty, 'slot':(0, delay)},
05:{'actor':ghost,'slot':( 0*MS+delay, 800*MS+delay )},
#05:{'actor':ghost,'slot':(39*MS+delay, 800*MS+delay )},
10:{'actor':tb1,'slot':( 0*MS+delay, 40*MS+delay )},
11:{'actor':csurfl,'slot':( 0*MS+delay, 40*MS+delay )},
12:{'actor':csurfr,'slot':( 0*MS+delay, 40*MS+delay )},
21:{'actor':t1,'slot':( 40*MS+delay, 41*MS+delay )},
22:{'actor':t1,'slot':( 40*MS+delay, 42*MS+delay )},
23:{'actor':t1,'slot':( 40*MS+delay, 43*MS+delay )},
31:{'actor':ct1,'slot':( 42*MS+delay, 47*MS+delay )},
32:{'actor':ct1,'slot':( 42*MS+delay, 46*MS+delay )},
33:{'actor':ct1,'slot':( 42*MS+delay, 45*MS+delay )},
34:{'actor':ct6,'slot':( 47*MS+delay, 800*MS+delay )}
#35:{'actor':ct7,'slot':( 48*MS, 800*MS )}
}
'''
'''
self.slots={10:{'actor':tb1,'slot':( 0, 3*40*MS )},
11:{'actor':csurfl,'slot':( 0,3*40*MS )},
12:{'actor':csurfr,'slot':( 0,3*40*MS )},
21:{'actor':t1,'slot':( 3*40*MS, 3*41*MS )},
22:{'actor':t1,'slot':( 3*40*MS, 3*42*MS )},
23:{'actor':t1,'slot':( 3*40*MS, 3*43*MS )},
31:{'actor':ct1,'slot':( 3*42*MS, 3*47*MS )},
32:{'actor':ct1,'slot':( 3*42*MS, 3*46*MS )},
33:{'actor':ct1,'slot':( 3*42*MS,3*45*MS )},
34:{'actor':ct6,'slot':( 3*47*MS, 3*48*MS )},
35:{'actor':ct7,'slot':( 3*48*MS, 800*MS )}
}
'''
global last_time
last_time = glut.glutGet(glut.GLUT_ELAPSED_TIME)
0
Example 94
Project: django-calaccess-campaign-browser Source File: loadcalaccesscampaignexpenditures.py
def transform_quarterly_expenditures_csv(self):
self.log(" Marking duplicates")
self.log(" Dumping CSV sorted by unique identifier")
sql = """
SELECT
`agent_namf`,
`agent_naml`,
`agent_nams`,
`agent_namt`,
`amend_id`,
`amount`,
`bakref_tid`,
`bal_juris`,
`bal_name`,
`bal_num`,
`cand_namf`,
`cand_naml`,
`cand_nams`,
`cand_namt`,
`cmte_id`,
`cuem_oth`,
`cuem_ytd`,
`dist_no`,
`entity_cd`,
`expn_chkno`,
`expn_code`,
`expn_date`,
`expn_dscr`,
`filing_id`,
`form_type`,
`g_from_e_f`,
`juris_cd`,
`juris_dscr`,
`line_item`,
`memo_code`,
`memo_refno`,
`off_s_h_cd`,
`offic_dscr`,
`office_cd`,
`payee_city`,
`payee_namf`,
`payee_naml`,
`payee_nams`,
`payee_namt`,
`payee_st`,
`payee_zip4`,
`rec_type`,
`sup_opp_cd`,
`tran_id`,
`tres_city`,
`tres_namf`,
`tres_naml`,
`tres_nams`,
`tres_namt`,
`tres_st`,
`tres_zip4`,
`xref_match`,
`xref_schnm`
FROM %(raw_model)s
ORDER BY filing_id, tran_id, amend_id DESC
INTO OUTFILE '%(tmp_csv)s'
FIELDS TERMINATED BY ','
ENCLOSED BY '"'
LINES TERMINATED BY '\n'
""" % dict(
raw_model=ExpnCd._meta.db_table,
tmp_csv=self.quarterly_tmp_csv,
)
self.cursor.execute(sql)
INHEADERS = [
"agent_namf",
"agent_naml",
"agent_nams",
"agent_namt",
"amend_id",
"amount",
"bakref_tid",
"bal_juris",
"bal_name",
"bal_num",
"cand_namf",
"cand_naml",
"cand_nams",
"cand_namt",
"cmte_id",
"cuem_oth",
"cuem_ytd",
"dist_no",
"entity_cd",
"expn_chkno",
"expn_code",
"expn_date",
"expn_dscr",
"filing_id",
"form_type",
"g_from_e_f",
"juris_cd",
"juris_dscr",
"line_item",
"memo_code",
"memo_refno",
"off_s_h_cd",
"offic_dscr",
"office_cd",
"payee_city",
"payee_namf",
"payee_naml",
"payee_nams",
"payee_namt",
"payee_st",
"payee_zip4",
"rec_type",
"sup_opp_cd",
"tran_id",
"tres_city",
"tres_namf",
"tres_naml",
"tres_nams",
"tres_namt",
"tres_st",
"tres_zip4",
"xref_match",
"xref_schnm"
]
OUTHEADERS = copy.copy(INHEADERS)
OUTHEADERS.append("is_duplicate")
self.log(" Marking duplicates in a new CSV")
# `rU` is read Universal
# see: https://docs.python.org/2/library/functions.html#open
with open(self.quarterly_tmp_csv, 'rU') as fin:
fout = csv.DictWriter(
open(self.quarterly_target_csv, 'wb'),
fieldnames=OUTHEADERS
)
fout.writeheader()
last_uid = ''
reader = csv.DictReader(fin, fieldnames=INHEADERS)
for row in reader:
row.pop(None, None)
uid = '{}-{}'.format(
row['filing_id'],
row['tran_id']
)
if uid != last_uid:
row['is_duplicate'] = 0
last_uid = uid
else:
row['is_duplicate'] = 1
try:
fout.writerow(row)
except ValueError:
continue
0
Example 95
Project: gfw-api Source File: discovery.py
def createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, futureDesc, schema):
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
mediaPathUrl = mediaUpload['protocols']['simple']['path']
mediaResumablePathUrl = mediaUpload['protocols']['resumable']['path']
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH']:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
if kwargs[name] not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, kwargs[name], str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
headers = {}
headers, params, query, body = self._model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Convert a simple filename into a MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError(
'media_filename must be str or MediaUpload. Got %s' % type(media_upload))
if media_upload.resumable():
resumable = media_upload
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
if media_upload.resumable():
expanded_url = uritemplate.expand(mediaResumablePathUrl, params)
else:
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
expanded_url = uritemplate.expand(mediaResumablePathUrl, params)
if not media_upload.resumable():
body = media_upload.getbytes(0, media_upload.size())
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
if media_upload.resumable():
# This is a multipart resumable upload, where a multipart payload
# looks like this:
#
# --===============1678050750164843052==
# Content-Type: application/json
# MIME-Version: 1.0
#
# {'foo': 'bar'}
# --===============1678050750164843052==
# Content-Type: image/png
# MIME-Version: 1.0
# Content-Transfer-Encoding: binary
#
# <BINARY STUFF>
# --===============1678050750164843052==--
#
# In the case of resumable multipart media uploads, the <BINARY
# STUFF> is large and will be spread across multiple PUTs. What we
# do here is compose the multipart message with a random payload in
# place of <BINARY STUFF> and then split the resulting content into
# two pieces, text before <BINARY STUFF> and text after <BINARY
# STUFF>. The text after <BINARY STUFF> is the multipart boundary.
# In apiclient.http the HttpRequest will send the text before
# <BINARY STUFF>, then send the actual binary media in chunks, and
# then will send the multipart delimeter.
payload = hex(random.getrandbits(300))
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
body, _ = body.split(payload)
resumable = media_upload
else:
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
logging.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
self._model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
for arg in argmap.iterkeys():
if arg in STACK_QUERY_PARAMETERS:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethodFromFuture(theclass, methodName, methodDesc, futureDesc):
""" This is a legacy method, as only Buzz and Moderator use the future.json
functionality for generating _next methods. It will be kept around as long
as those API versions are around, but no new APIs should depend upon it.
"""
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous):
"""Retrieve the next page of results.
Takes a single argument, 'body', which is the results
from the last call, and returns the next set of items
in the collection.
Returns:
None if there are no more items in the collection.
"""
if futureDesc['type'] != 'uri':
raise UnknownLinkType(futureDesc['type'])
try:
p = previous
for key in futureDesc['location']:
p = p[key]
url = p
except (KeyError, TypeError):
return None
url = _add_query_parameter(url, 'key', self._developerKey)
headers = {}
headers, params, query, body = self._model.request(headers, {}, {}, None)
logging.info('URL being requested: %s' % url)
resp, content = self._http.request(url, method='GET', headers=headers)
return self._requestBuilder(self._http,
self._model.response,
url,
method='GET',
headers=headers,
methodId=methodId)
setattr(theclass, methodName, methodNext)
def createNextMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logging.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if futureDesc:
future = futureDesc['methods'].get(methodName, {})
else:
future = None
createMethod(Resource, methodName, methodDesc, future)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
def methodResource(self):
return createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, futureDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
if futureDesc and 'resources' in futureDesc:
future = futureDesc['resources'].get(methodName, {})
else:
future = {}
createResourceMethod(Resource, methodName, methodDesc, future)
# Add <m>_next() methods to Resource
if futureDesc and 'methods' in futureDesc:
for methodName, methodDesc in futureDesc['methods'].iteritems():
if 'next' in methodDesc and methodName in resourceDesc['methods']:
createNextMethodFromFuture(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodDesc['next'])
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema[responseSchema['$ref']]
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
0
Example 96
Project: trytond Source File: model.py
@classmethod
def fields_get(cls, fields_names=None):
"""
Return the definition of each field on the model.
"""
res = {}
pool = Pool()
Translation = pool.get('ir.translation')
FieldAccess = pool.get('ir.model.field.access')
ModelAccess = pool.get('ir.model.access')
# Add translation to cache
language = Transaction().language
trans_args = []
for field in (x for x in cls._fields.keys()
if ((not fields_names) or x in fields_names)):
trans_args.append((cls.__name__ + ',' + field, 'field', language,
None))
trans_args.append((cls.__name__ + ',' + field, 'help', language,
None))
if hasattr(cls._fields[field], 'selection'):
if (isinstance(cls._fields[field].selection, (tuple, list))
and ((hasattr(cls._fields[field],
'translate_selection')
and cls._fields[field].translate_selection)
or not hasattr(cls._fields[field],
'translate_selection'))):
sel = cls._fields[field].selection
for (key, val) in sel:
trans_args.append((cls.__name__ + ',' + field,
'selection', language, val))
Translation.get_sources(trans_args)
encoder = PYSONEncoder()
accesses = FieldAccess.get_access([cls.__name__])[cls.__name__]
for field in (x for x in cls._fields.keys()
if ((not fields_names) or x in fields_names)):
res[field] = {
'type': cls._fields[field]._type,
'name': field,
}
for arg in (
'string',
'readonly',
'states',
'size',
'required',
'translate',
'help',
'select',
'on_change',
'add_remove',
'on_change_with',
'autocomplete',
'sort',
'datetime_field',
'loading',
'filename',
'selection_change_with',
'domain',
'converter',
):
if getattr(cls._fields[field], arg, None) is not None:
value = getattr(cls._fields[field], arg)
if isinstance(value, set):
value = list(value)
else:
value = copy.copy(value)
res[field][arg] = value
if not accesses.get(field, {}).get('write', True):
res[field]['readonly'] = True
if res[field].get('states') and \
'readonly' in res[field]['states']:
del res[field]['states']['readonly']
for arg in ('digits', 'invisible'):
if hasattr(cls._fields[field], arg) \
and getattr(cls._fields[field], arg):
res[field][arg] = copy.copy(getattr(cls._fields[field],
arg))
if (isinstance(cls._fields[field],
(fields.Function, fields.One2Many, fields.Many2Many))
and not getattr(cls, 'order_%s' % field, None)):
res[field]['sortable'] = False
if ((isinstance(cls._fields[field], fields.Function)
and not cls._fields[field].searcher)
or (cls._fields[field]._type in ('binary', 'sha'))
or (isinstance(cls._fields[field], fields.Property)
and isinstance(cls._fields[field]._field,
fields.Many2One))):
res[field]['searchable'] = False
else:
res[field]['searchable'] = True
if Transaction().context.get('language'):
# translate the field label
res_trans = Translation.get_source(
cls.__name__ + ',' + field, 'field',
Transaction().context['language'])
if res_trans:
res[field]['string'] = res_trans
help_trans = Translation.get_source(
cls.__name__ + ',' + field, 'help',
Transaction().context['language'])
if help_trans:
res[field]['help'] = help_trans
if hasattr(cls._fields[field], 'selection'):
if isinstance(cls._fields[field].selection, (tuple, list)):
sel = copy.copy(cls._fields[field].selection)
if (Transaction().context.get('language')
and ((hasattr(cls._fields[field],
'translate_selection')
and cls._fields[field].translate_selection)
or not hasattr(cls._fields[field],
'translate_selection'))):
# translate each selection option
sel2 = []
for (key, val) in sel:
val2 = Translation.get_source(
cls.__name__ + ',' + field, 'selection',
language, val)
sel2.append((key, val2 or val))
sel = sel2
res[field]['selection'] = sel
else:
# call the 'dynamic selection' function
res[field]['selection'] = copy.copy(
cls._fields[field].selection)
if res[field]['type'] in (
'one2many',
'many2many',
'many2one',
'one2one',
):
if hasattr(cls._fields[field], 'model_name'):
relation = copy.copy(cls._fields[field].model_name)
else:
relation = copy.copy(
cls._fields[field].get_target().__name__)
res[field]['relation'] = relation
res[field]['context'] = copy.copy(cls._fields[field].context)
res[field]['create'] = accesses.get(field, {}).get('create',
True)
res[field]['delete'] = accesses.get(field, {}).get('delete',
True)
if res[field]['type'] == 'one2many' \
and getattr(cls._fields[field], 'field', None):
res[field]['relation_field'] = copy.copy(
cls._fields[field].field)
if res[field]['type'] == 'many2one':
target = cls._fields[field].get_target()
relation_fields = []
for target_name, target_field in target._fields.iteritems():
if (target_field._type == 'one2many'
and target_field.model_name == cls.__name__
and target_field.field == field):
relation_fields.append(target_name)
# Set relation_field only if there is no ambiguity
if len(relation_fields) == 1:
res[field]['relation_field'], = relation_fields
if res[field]['type'] in ('datetime', 'time'):
res[field]['format'] = copy.copy(cls._fields[field].format)
if res[field]['type'] == 'selection':
res[field]['context'] = copy.copy(cls._fields[field].context)
if res[field]['type'] == 'dict':
res[field]['schema_model'] = cls._fields[field].schema_model
res[field]['domain'] = copy.copy(cls._fields[field].domain)
res[field]['context'] = copy.copy(cls._fields[field].context)
res[field]['create'] = accesses.get(field, {}).get('create',
True)
res[field]['delete'] = accesses.get(field, {}).get('delete',
True)
# convert attributes into pyson
for attr in ('states', 'domain', 'context', 'digits', 'size',
'add_remove', 'format'):
if attr in res[field]:
res[field][attr] = encoder.encode(res[field][attr])
for i in res.keys():
# filter out fields which aren't in the fields_names list
if fields_names:
if i not in fields_names:
del res[i]
elif not ModelAccess.check_relation(cls.__name__, i, mode='read'):
del res[i]
return res
0
Example 97
Project: termite-data-server Source File: appadmin.py
def ccache():
if is_gae:
form = FORM(
P(TAG.BUTTON(T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")))
else:
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
session.flash = ""
if is_gae:
if request.vars.yes:
cache.ram.clear()
session.flash += T("Cache Cleared")
else:
clear_ram = False
clear_disk = False
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
if is_gae:
gae_stats = cache.ram.client.get_stats()
try:
gae_stats['ratio'] = ((gae_stats['hits'] * 100) /
(gae_stats['hits'] + gae_stats['misses']))
except ZeroDivisionError:
gae_stats['ratio'] = T("?")
gae_stats['oldest'] = GetInHMS(time.time() - gae_stats['oldest_item_age'])
total.update(gae_stats)
else:
for key, value in cache.ram.storage.iteritems():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
folder = os.path.join(request.folder,'cache')
if not os.path.exists(folder):
os.mkdir(folder)
locker = open(os.path.join(folder, 'cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(folder, 'cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['entries'] = ram['entries'] + disk['entries']
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
total['keys'] = ram['keys'] + disk['keys']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
if not is_gae:
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
0
Example 98
Project: pydal Source File: migrator.py
def migrate_table(self, table, sql_fields, sql_fields_old, sql_fields_aux,
logfile, fake_migrate=False):
# logfile is deprecated (moved to adapter.log method)
db = table._db
db._migrated.append(table._tablename)
tablename = table._tablename
# make sure all field names are lower case to avoid
# migrations because of case cahnge
sql_fields = dict(map(self._fix, iteritems(sql_fields)))
sql_fields_old = dict(map(self._fix, iteritems(sql_fields_old)))
sql_fields_aux = dict(map(self._fix, iteritems(sql_fields_aux)))
if db._debug:
db.logger.debug('migrating %s to %s' % (
sql_fields_old, sql_fields))
keys = list(sql_fields.keys())
for key in sql_fields_old:
if key not in keys:
keys.append(key)
new_add = self.dialect.concat_add(tablename)
metadata_change = False
sql_fields_current = copy.copy(sql_fields_old)
for key in keys:
query = None
if key not in sql_fields_old:
sql_fields_current[key] = sql_fields[key]
if self.dbengine in ('postgres',) and \
sql_fields[key]['type'].startswith('geometry'):
# 'sql' == ftype in sql
query = [sql_fields[key]['sql']]
else:
query = ['ALTER TABLE %s ADD %s %s;' % (
table.sqlsafe, self.dialect.quote(key),
sql_fields_aux[key]['sql'].replace(', ', new_add))]
metadata_change = True
elif self.dbengine in ('sqlite', 'spatialite'):
if key in sql_fields:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
elif key not in sql_fields:
del sql_fields_current[key]
ftype = sql_fields_old[key]['type']
if self.dbengine == 'postgres' and \
ftype.startswith('geometry'):
geotype, parms = ftype[:-1].split('(')
schema = parms.split(',')[0]
query = ["SELECT DropGeometryColumn ('%(schema)s', \
'%(table)s', '%(field)s');" % dict(
schema=schema, table=tablename, field=key)]
elif self.dbengine in ('firebird',):
query = ['ALTER TABLE %s DROP %s;' % (
self.dialect.quote(tablename),
self.dialect.quote(key))]
else:
query = ['ALTER TABLE %s DROP COLUMN %s;' % (
self.dialect.quote(tablename),
self.dialect.quote(key))]
metadata_change = True
elif (
sql_fields[key]['sql'] != sql_fields_old[key]['sql'] and not
(key in table.fields and
isinstance(table[key].type, SQLCustomType)) and not
sql_fields[key]['type'].startswith('reference') and not
sql_fields[key]['type'].startswith('double') and not
sql_fields[key]['type'].startswith('id')):
sql_fields_current[key] = sql_fields[key]
t = tablename
tt = sql_fields_aux[key]['sql'].replace(', ', new_add)
if self.dbengine in ('firebird',):
drop_expr = 'ALTER TABLE %s DROP %s;'
else:
drop_expr = 'ALTER TABLE %s DROP COLUMN %s;'
key_tmp = key + '__tmp'
query = [
'ALTER TABLE %s ADD %s %s;' % (
self.dialect.quote(t), self.dialect.quote(key_tmp),
tt),
'UPDATE %s SET %s=%s;' % (
self.dialect.quote(t), self.dialect.quote(key_tmp),
self.dialect.quote(key)),
drop_expr % (
self.dialect.quote(t), self.dialect.quote(key)),
'ALTER TABLE %s ADD %s %s;' % (
self.dialect.quote(t),
self.dialect.quote(key), tt),
'UPDATE %s SET %s=%s;' % (
self.dialect.quote(t), self.dialect.quote(key),
self.dialect.quote(key_tmp)),
drop_expr % (
self.dialect.quote(t), self.dialect.quote(key_tmp))
]
metadata_change = True
elif sql_fields[key]['type'] != sql_fields_old[key]['type']:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
if query:
self.log(
'timestamp: %s\n' % datetime.datetime.today().isoformat(),
table)
for sub_query in query:
self.log(sub_query + '\n', table)
if fake_migrate:
if db._adapter.commit_on_alter_table:
self.save_dbt(table, sql_fields_current)
self.log('faked!\n', table)
else:
self.adapter.execute(sub_query)
# Caveat: mysql, oracle and firebird
# do not allow multiple alter table
# in one transaction so we must commit
# partial transactions and
# update table._dbt after alter table.
if db._adapter.commit_on_alter_table:
db.commit()
self.save_dbt(table, sql_fields_current)
self.log('success!\n', table)
elif metadata_change:
self.save_dbt(table, sql_fields_current)
if metadata_change and not (
query and db._adapter.commit_on_alter_table):
db.commit()
self.save_dbt(table, sql_fields_current)
self.log('success!\n', table)
0
Example 99
Project: KiPart Source File: xilinxultra_reader.py
def xilinxultra_reader(csv_file):
'''Extract the pin data from a Xilinx CSV file and return a dictionary of pin data.'''
# Create a dictionary that uses the unit numbers as keys. Each entry in this dictionary
# contains another dictionary that uses the side of the symbol as a key. Each entry in
# that dictionary uses the pin names in that unit and on that side as keys. Each entry
# in that dictionary is a list of Pin objects with each Pin object having the same name
# as the dictionary key. So the pins are separated into units at the top level, and then
# the sides of the symbol, and then the pins with the same name that are on that side
# of the unit.
pin_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
# Read title line of the CSV file and extract the part number.
title = csv_file.readline()
try:
_, part_num, date, time, _ = re.split('\s+', title)
except:
return
# Dump the blank line between the title and the part's pin data.
_ = csv_file.readline()
# Create a reader object for the rows of the CSV file and read it row-by-row.
csv_reader = csv.DictReader(csv_file, skipinitialspace=True)
for index, row in enumerate(csv_reader):
# A blank line signals the end of the pin data.
try:
if row['Pin'] == '':
break
except KeyError:
# Abort if a TXT file is being processed instead of a CSV file.
return
# Get the pin attributes from the cells of the row of data.
pin = copy.copy(DEFAULT_PIN)
pin.index = index
pin.name = fix_pin_data(row['Pin Name'], part_num)
pin.num = fix_pin_data(row['Pin'], part_num)
pin.unit = fix_pin_data(row['Bank'], part_num)
# The type of the pin isn't given in the CSV file, so we'll have to infer it
# from the name of the pin. Pin names starting with the following prefixes
# are assigned the given pin type.
DEFAULT_PIN_TYPE = 'input' # Assign this pin type if name inference can't be made.
PIN_TYPE_PREFIXES = [
(r'CCLK', 'bidirectional'),
(r'CFGBVS_', 'input'),
(r'DONE', 'bidirectional'),
(r'D0[0-3]_', 'bidirectional'),
(r'DXP', 'passive'),
(r'DXN', 'passive'),
(r'GNDADC', 'input'),
(r'GND', 'power_in'),
(r'INIT_B', 'bidirectional'),
(r'IO_', 'bidirectional'),
(r'M0[_]?', 'input'),
(r'M1[_]?', 'input'),
(r'M2[_]?', 'input'),
(r'MGTAVCC[_]?', 'power_in'),
(r'MGTAVTTRCAL_', 'input'),
(r'MGTAVTT[_]?', 'input'),
(r'MGTHRX[NP][0-9]+_', 'input'),
(r'MGTHTX[NP][0-9]+_', 'output'),
(r'MGTREFCLK[0-9][NP]+_', 'input'),
(r'MGTRREF_', 'input'),
(r'MGTVCCAUX[_]?', 'power_in'),
(r'MGTYRX[NP][0-9]+_', 'input'),
(r'MGTYTX[NP][0-9]+_', 'output'),
(r'NC', 'no_connect'),
(r'POR_OVERRIDE', 'input'),
(r'PUDC_B_[0-9]+', 'input'),
(r'PROGRAM_B_[0-9]+', 'input'),
(r'RDWR_FCS_B_[0-9]+', 'bidirectional'),
(r'TCK_[0-9]+', 'input'),
(r'TDI_[0-9]+', 'input'),
(r'TDO_[0-9]+', 'output'),
(r'TMS_[0-9]+', 'input'),
(r'VBATT', 'power_in'),
(r'VCCADC?', 'power_in'),
(r'VCCAUX[_]?', 'power_in'),
(r'VCCBRAM', 'power_in'),
(r'VCCINT', 'power_in'),
(r'VCCO_', 'power_in'),
(r'VN', 'input'),
(r'VP', 'input'),
(r'VREF[PN]', 'input'),
(r'VREF_', 'input'),
(r'PS_MIO[0-9]+', 'bidirectional'),
(r'PS_DDR_DQ[0-9]+', 'bidirectional'),
(r'PS_DDR_DQS_[PN][0-9]+', 'bidirectional'),
(r'PS_DDR_ALERT_N', 'input'),
(r'PS_DDR_ACT_N', 'output'),
(r'PS_DDR_A[0-9]+', 'output'),
(r'PS_DDR_BA[0-9]+', 'output'),
(r'PS_DDR_BG[0-9]+', 'output'),
(r'PS_DDR_CK_N[0-9]+', 'output'),
(r'PS_DDR_CK[0-9]+', 'output'),
(r'PS_DDR_CKE[0-9]+', 'output'),
(r'PS_DDR_CS_N[0-9]+', 'output'),
(r'PS_DDR_DM[0-9]+', 'output'),
(r'PS_DDR_ODT[0-9]+', 'output'),
(r'PS_DDR_PARITY[0-9]*', 'output'),
(r'PS_DDR_RAM_RST_N[0-9]*', 'output'),
(r'PS_DDR_ZQ[0-9]*', 'bidirectional'),
(r'VCC_PS', 'power_in'),
(r'PS_DONE', 'output'),
(r'PS_ERROR_OUT', 'output'),
(r'PS_ERROR_STATUS', 'output'),
(r'PS_MODE[0-9]+', 'input'),
(r'PS_PADI', 'input'),
(r'PS_PADO', 'output'),
(r'PS_POR_B', 'input'),
(r'PS_PROG_B', 'input'),
(r'PS_INIT_B', 'output'),
(r'PS_DONE', 'output'),
(r'PS_REF_CLK', 'input'),
(r'PS_SRST_B', 'input'),
(r'PS_MGTRRX[NP][0-9]+_', 'input'),
(r'PS_MGTRTX[NP][0-9]+_', 'output'),
(r'PS_MGTREFCLK[0-9]+[NP]_', 'input'),
(r'PS_MGTRAVCC', 'power_in'),
(r'PS_MGTRAVTT', 'power_in'),
(r'PS_MGTRREF', 'input'),
(r'PS_JTAG_TCK', 'input'),
(r'PS_JTAG_TDI', 'input'),
(r'PS_JTAG_TDO', 'output'),
(r'PS_JTAG_TMS', 'input'),
]
for prefix, typ in PIN_TYPE_PREFIXES:
if re.match(prefix, pin.name, re.IGNORECASE):
pin.type = typ
break
else:
if pin.name not in defaulted_names:
warnings.warn('No match for {} on {}, assigning as {}'.format(
pin.name, part_num[:4], DEFAULT_PIN_TYPE))
pin.type = DEFAULT_PIN_TYPE
defaulted_names.add(pin.name)
pin.type = fix_pin_data(pin.type, part_num)
# Add the pin from this row of the CVS file to the pin dictionary.
# Place all the like-named pins into a list under their common name.
# We'll unbundle them later, if necessary.
pin_data[pin.unit][pin.side][pin.name].append(pin)
yield part_num, pin_data # Return the dictionary of pins extracted from the CVS file.
0
Example 100
Project: wtop Source File: logrep.py
def calculate_aggregates(reqs, agg_fields, group_by, order_by=None, limit=0,
descending=True, tmpfile=None):
if iqm_available:
miqm = iqm.MovingIQM(1000)
diqm = iqm.DictIQM(round_digits=-1, tenth_precise=True)
MAXINT = 1 << 64
table = dict()
cnt = 0
using_disk = False
if tmpfile:
import shelve
table = shelve.open(tmpfile, flag="n", writeback=True)
using_disk = True
# each aggregate record will start as a list of values whose
# default depends on the agg function. Also take the opportunity
# here to build a formatting string for printing the final results.
fmt = ["%s"] * len(agg_fields)
blank = [0] * (len(agg_fields) + 1) # that +1 is for a count column
needed_post_fns = list()
for i, f in enumerate(agg_fields):
op, field = f
if op == "avg":
fmt[i] = "%.2f"
elif op in ("dev", "miqm", "var"):
blank[i + 1] = (0, 0) # sum, squared sum
needed_post_fns.append((op, i + 1))
fmt[i] = "%.2f"
elif op == "iqm":
needed_post_fns.append((op, i + 1))
fmt[i] = "%d"
elif op == "min":
blank[i + 1] = MAXINT
fmt = "\t".join(fmt)
def agg_avg(i, r, field, table, key):
numerator = (table[key][i] * (table[key][0]-1)) + r[field]
denominator = float(table[key][0])
if denominator == 0:
return 0
else:
return numerator / denominator
def agg_iqm(i, r, field, table, key):
key = "%s-%s" % (key, i)
diqm(key, r[field])
return (0, 0)
def agg_miqm(i, r, field, table, key):
key = "%s-%s" % (key, i)
miqm(key, r[field])
return (0, 0)
def agg_post_prep(i, r, field, table, key):
sums = table[key][i][0] + r[field]
sq_sums = table[key][i][1] + (r[field] ** 2)
return (sums, sq_sums)
agg_fns = {
# the None function is for pass-through fields eg "class" in
# "class,max(msec)"
None: (lambda i, r, field, table, key: r[field]),
"avg": agg_avg,
# count(*) is always just copied from col 0
"count": (lambda i, r, field, table, key: table[key][0]),
"dev": agg_post_prep,
"max": (lambda i, r, field, table, key: max(r[field], table[key][i])),
"min": (lambda i, r, field, table, key: min(r[field], table[key][i])),
"sum": (lambda i, r, field, table, key: table[key][i] + r[field]),
"var": agg_post_prep,
}
if iqm_available:
agg_fns["iqm"] = agg_iqm
agg_fns["miqm"] = agg_miqm
# post-processing for more complex aggregates
def post_dev(key, col_idx, sums, sq_sums, count):
count = float(count)
numerator = (count * sq_sums) - (sums * sums)
denominator = count * (count - 1)
if denominator == 0:
return 0
else:
return math.sqrt(numerator / denominator)
def post_iqm(key, col_idx, sums, sq_sums, count):
key = "%s-%s" % (key, col_idx)
return diqm.report(key)
def post_miqm(key, col_idx, sums, sq_sums, count):
key = "%s-%s" % (key, col_idx)
return miqm.report(key)
def post_var(key, col_idx, sums, sq_sums, count):
count = float(count)
return (sq_sums - ((sums ** 2) / count)) / count
post_fns = {
"dev": post_dev,
"var": post_var,
}
if iqm_available:
post_fns["iqm"] = post_iqm
post_fns["miqm"] = post_miqm
# various stuff needed if we are also running a limit/sort
if limit:
running_list = dict()
key_fn, key_fn2 = keyfns(order_by)
lastt = time.time()
for r in reqs:
cnt += 1
if cnt % PROGRESS_INTERVAL == 0:
t = time.time() - lastt
lastt = time.time()
warn("%0.2f processed %d records..." % (t, cnt))
key = id_from_dict_keys(r, group_by)
if key not in table:
table[key] = copy(blank)
# always keep record count regardless of what the user asked for
table[key][0] += 1
for idx, (op, field) in enumerate(agg_fields):
table[key][idx+1] = agg_fns[op](idx+1, r, field, table, key)
# sort & prune periodically
if limit:
running_list[key] = table[key]
if cnt % SORT_BUFFER_LENGTH:
running_list = dict(sorted(running_list.iteritems(),
key=key_fn,
reverse=descending)[0:limit])
if using_disk and cnt % DISC_SYNC_CNT == 0:
warn("sync()ing records to disk...")
table.sync()
warn("done.")
if limit:
records = running_list
else:
records = table
# todo: the arg signature is not generic. what other agg functions do
# people want?
if needed_post_fns:
cnt = 0
for k in records.iterkeys():
for (fn, col_idx) in needed_post_fns:
records[k][col_idx] = post_fns[fn](k, col_idx,
records[k][col_idx][0],
records[k][col_idx][1],
records[k][0])
cnt += 1
if using_disk and cnt % DISC_SYNC_CNT == 0:
warn("sync()ing records to disk...")
table.sync()
# return the records & printing format
# for silly reasons we have to also return the tmpfile and the table
# object.
return records, fmt, tmpfile, table