sys.float_info.epsilon

Here are the examples of the python api sys.float_info.epsilon taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

32 Examples 7

Example 1

Project: pypot
Source File: primitive.py
View license
    @goal_speed.setter
    def goal_speed(self, value):
        if abs(value) < sys.float_info.epsilon:
            self.goal_position = self.present_position

        else:
            # 0.7 corresponds approx. to the min speed that will be converted into 0
            # and as 0 corredsponds to setting the max speed, we have to check this case
            value = numpy.sign(value) * 0.7 if abs(value) < 0.7 else value

            self.goal_position = numpy.sign(value) * self.max_pos
            self.moving_speed = abs(value)

Example 2

Project: ursgal
Source File: unimod_mapper.py
View license
    def _appMass2whatever(self, mass, decimal_places=2, entry_key=None):
        return_list = []
        for entry in self.data_list:
            umass = entry['mono_mass']
            rounded_umass = round( float(umass), decimal_places )
            if abs(rounded_umass - mass) <= sys.float_info.epsilon:
                return_list.append( entry[ entry_key ] )
        return return_list

Example 3

Project: isitbullshit
Source File: core.py
View license
def raise_for_float_problem(suspicious, scheme):
    if not isinstance(suspicious, float):
        cause = ValueError("Should be a float")
        error = ItIsBullshitError(suspicious)
        pep3134.raise_from(error, cause)

    if abs(suspicious - scheme) >= sys.float_info.epsilon:
        cause = ValueError("Should be {0}".format(scheme))
        error = ItIsBullshitError(suspicious)
        pep3134.raise_from(error, cause)

Example 4

Project: isitbullshit
Source File: test_isitbullshit.py
View license
    @pytest.mark.parametrize("input_, result_", (
        (1.0, True),
        (1.0 + sys.float_info.epsilon, False),
        (1.0 - sys.float_info.epsilon, False),
    ))
    def test_float(self, input_, result_):
        func = positive if result_ else negative
        func(1.0, input_)

Example 5

Project: proficiency-metric
Source File: runstat.py
View license
    def stdDev (self):
        if self.count == 0:
            return float("NaN")
        if (self.maxV - self.minV) < sys.float_info.epsilon * self.sum2:
            return 0 # guard against roundoff errors producing sqrt(-eps)
        return math.sqrt(self.sum2 / self.count -
                         (self.sumV * self.sumV) / (self.count * self.count))

Example 6

Project: proficiency-metric
Source File: runstat.py
View license
    def stdDev (self):
        if self.count == 0:
            return float("NaN")
        if (self.maxV - self.minV) < sys.float_info.epsilon * self.sum2:
            return 0 # guard against roundoff errors producing sqrt(-eps)
        return math.sqrt(self.sum2 / self.count -
                         (self.sumV * self.sumV) / (self.count * self.count))

Example 7

Project: GParML
Source File: gd-example.py
View license
def transform_back(b, x):
    if b == (0, None):
        if x > lim_val:
            return x
        elif x <= sys.float_info.epsilon:
            return numpy.log(-1 + numpy.exp(sys.float_info.epsilon))
        else:
            return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 8

Project: GParML
Source File: gd-example.py
View license
def transform_back(b, x):
    if b == (0, None):
        if x > lim_val:
            return x
        elif x <= sys.float_info.epsilon:
            return numpy.log(-1 + numpy.exp(sys.float_info.epsilon))
        else:
            return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 9

Project: GParML
Source File: scg-example.py
View license
def transform_back(b, x):
    if b == (0, None):
        if x > lim_val:
            return x
        elif x <= sys.float_info.epsilon:
            return numpy.log(-1 + numpy.exp(sys.float_info.epsilon))
        else:
            return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 10

Project: GParML
Source File: scg-example.py
View license
def transform_back(b, x):
    if b == (0, None):
        if x > lim_val:
            return x
        elif x <= sys.float_info.epsilon:
            return numpy.log(-1 + numpy.exp(sys.float_info.epsilon))
        else:
            return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 11

Project: GParML
Source File: scg_adapted-example.py
View license
def transform_back(b, x):
    if b == (0, None):
        if x > lim_val:
            return x
        elif x <= sys.float_info.epsilon:
            return numpy.log(-1 + numpy.exp(sys.float_info.epsilon))
        else:
            return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 12

Project: GParML
Source File: scg_adapted-example.py
View license
def transform_back(b, x):
    if b == (0, None):
        if x > lim_val:
            return x
        elif x <= sys.float_info.epsilon:
            return numpy.log(-1 + numpy.exp(sys.float_info.epsilon))
        else:
            return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 13

Project: GParML
Source File: supporting_functions.py
View license
def transform_back(b, x):
    if b == (0, None):
        assert sys.float_info.epsilon < x < lim_val
        return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 14

Project: GParML
Source File: supporting_functions.py
View license
def transform_back(b, x):
    if b == (0, None):
        assert sys.float_info.epsilon < x < lim_val
        return numpy.log(-1 + numpy.exp(x))
    elif b == (None, None):
        return x

Example 15

Project: zipline
Source File: execution.py
View license
def asymmetric_round_price_to_penny(price, prefer_round_down,
                                    diff=(0.0095 - .005)):
    """
    Asymmetric rounding function for adjusting prices to two places in a way
    that "improves" the price.  For limit prices, this means preferring to
    round down on buys and preferring to round up on sells.  For stop prices,
    it means the reverse.

    If prefer_round_down == True:
        When .05 below to .95 above a penny, use that penny.
    If prefer_round_down == False:
        When .95 below to .05 above a penny, use that penny.

    In math-speak:
    If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01.
    If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01.
    """
    # Subtracting an epsilon from diff to enforce the open-ness of the upper
    # bound on buys and the lower bound on sells.  Using the actual system
    # epsilon doesn't quite get there, so use a slightly less epsilon-ey value.
    epsilon = float_info.epsilon * 10
    diff = diff - epsilon

    # relies on rounding half away from zero, unlike numpy's bankers' rounding
    rounded = round(price - (diff if prefer_round_down else -diff), 2)
    if zp_math.tolerant_equals(rounded, 0.0):
        return 0.0
    return rounded

Example 16

View license
    def __parse_conffile(self):
        self.conf = ET.parse(self.conf_file)  

        
        self.cost = 'L2'
        if self.conf.find('cost') is not None:
            self.cost = self.conf.find('cost').text
        
        pnt_nos = self.conf.find('pnt_no').text.split(',')
        self.pnt_nos = [int(p) for p in pnt_nos]

        self.op_dir = self.conf.find('logging/output_dir').text + '/'
        self.shared_op_dir = self.conf.find('logging/shared_output_dir').text + '/'
        self.weights_dir = self.conf.find('init/weights_dir').text + '/'
    
        self.load_weights = False
            
        self.learning_rate = float(self.conf.find('learning_rate').text)
        self.n_iters = int(self.conf.find('n_iters').text)
        self.n_epochs = int(self.conf.find('n_epochs').text)
 
        self.batch_size = int(self.conf.find('batch_size').text)
        self.thresh = int(self.conf.find('test_thresh').text)
        self.test_dir = self.conf.find('test_dir').text
        self.reg_weight = float(self.conf.find('reg_weight').text)       
        self.momentum = float(self.conf.find('momentum').text)   
        
        if self.momentum < sys.float_info.epsilon:
            self.use_momentum = False 
        else:
            self.use_momentum = True
        
        self.rmsprop_filter_weight = float(self.conf.find('rmsprop_filter_weight').text) 
        self.rmsprop_maxgain = float(self.conf.find('rmsprop_maxgain').text) 
        self.rmsprop_mingain = float(self.conf.find('rmsprop_mingain').text) 
        
        if self.rmsprop_filter_weight < sys.float_info.epsilon:
            self.use_rmsprop = False 
        else:
            self.use_rmsprop = True
            
        self.hard_mine_freq = int(self.conf.find('hard_mine_freq').text)   
        self.epoch_no = int(self.conf.find('epoch_no').text)   

        self.log_filename = self.conf.find('log_filename').text
        
        self.mix_ratio = dict()
        for kind in ['train', 'test']:
            self.mix_ratio[kind] = self.conf.find('mix_ratio/' + kind).text.split(':')
            self.mix_ratio[kind] = float(self.mix_ratio[kind][0]) / float(self.mix_ratio[kind][1])
        
        self.perturb = False
        self.perturb = self.conf.find('perturb').text == 'True'

Example 17

Project: apsis
Source File: parameter_definition.py
View license
    def __init__(self, lower_bound, upper_bound,
                 include_lower=True, include_upper=True, epsilon=None):
        """
        Initializes the lower/upper bound defined parameter space.

        Parameters
        ----------
        lower_bound : float
            The lowest possible value
        upper_bound : float
            The highest possible value
        include_lower : bool, optional
            If true (default), lower_bound is the smallest possible value that
            can be returned. If false, all returned values will be greater than
            lower_bound.
        include_upper : bool, optional
            If true (default), upper_bound is the greatest possible value that
            can be returned. If false, all returned values will be less than
            upper_bound.
        epsilon : float, optional
            The tolerance to use if excluding upper/lower. The lowest or
            highest value will be epsilon away from the given lower or upper
            bound. By default, this is ten times the system's float epsilon.

        """
        self._logger = logging_utils.get_logger(self)
        self._logger.debug("Initializing MinMaxParamDef. Parameters are "
                           "lower bound %s, upper_bound %s, include_lower %s,"
                           "include_upper %s and epsilon %s",
                           lower_bound, upper_bound, include_lower,
                           include_upper, epsilon)
        try:
            lower_bound = float(lower_bound)
            upper_bound = float(upper_bound)
        except:
            raise ValueError("Bounds are not floats.")
        if epsilon is None:
            epsilon = sys.float_info.epsilon * 10
        self.epsilon = epsilon
        self.lower_bound = lower_bound
        self.upper_bound = upper_bound
        self.include_lower = include_lower
        self.include_upper = include_upper
        self._logger.debug("Initialized MinMaxParamDef.")

Example 18

Project: holoviews
Source File: xarray.py
View license
    @classmethod
    def select(cls, dataset, selection_mask=None, **selection):
        validated = {}
        for k, v in selection.items():
            if isinstance(v, slice):
                v = (v.start, v.stop)
            if isinstance(v, set):
                validated[k] = list(v)
            elif isinstance(v, tuple):
                upper = None if v[1] is None else v[1]-sys.float_info.epsilon*10
                validated[k] = slice(v[0], upper)
            elif isinstance(v, types.FunctionType):
                validated[k] = v(dataset[k])
            else:
                validated[k] = v
        data = dataset.data.sel(**validated)

        # Restore constant dimensions
        dropped = {d.name: np.atleast_1d(data[d.name])
                   for d in dataset.kdims
                   if not data[d.name].data.shape}
        if dropped:
            data = data.assign_coords(**dropped)

        indexed = cls.indexed(dataset, selection)
        if (indexed and len(data.data_vars) == 1 and
            len(data[dataset.vdims[0].name].shape) == 0):
            return data[dataset.vdims[0].name].item()
        return data

Example 19

Project: GoogleCodeJam-2015
Source File: kiddie-pool.py
View license
def kiddie_pool():
    C, R = 0, 1
    N, V, X = map(float, raw_input().strip().split())
    N = int(N)
    sources = [map(float, raw_input().strip().split()) for _ in xrange(N)]
    sources = [[(x[1]-X), x[0]] for x in sources]
    
    # Turning on all the sources and get current C.
    cur_C = sum(x[R] * x[C] for x in sources)
    # The rate of turning on all the sources.
    Rmax = sum(x[R] for x in sources)
    if abs(cur_C) > float_info.epsilon:
        sources = sorted(sources, reverse = cur_C > float_info.epsilon)
        
        # For better precision. Recompute Rmax.
        # Rmax = sum(Rj), where j in xrange(i, len(sources))
        # Ri may be slowed down.
        def r_max(sources, i):
            Rmax, cur_C = 0, 0
            # Sum R from i + 1 to len(sources) - 1.
            for j in xrange(i + 1, len(sources)):
                Rmax += sources[j][R]
                cur_C += sources[j][R] * sources[j][C]
            # Check if Ri should be slowed down.
            if abs(sources[i][C]) > float_info.epsilon:
                # 0 = cur_C + Ri' * sources[i][C].
                # Ri' = -cur_C / sources[i][C].
                Rmax += -cur_C / sources[i][C]
            else:
                Rmax += sources[i][R]
            return Rmax
 
        # Turn off the sources from the start until cur_C is as expected.
        i = 0
        while i < len(sources):
            # Current C is as expected.
            if abs(sources[i][C]) <= float_info.epsilon :
                return V / r_max(sources, i)
            elif cur_C / sources[i][C] > float_info.epsilon:
                # To slow down Rmax as little as possible:
                # 1. Always cool down cur_C by slowing down or 
                #    turning off the hotest source i until Ci == 0.
                # 2. Always warm up cur_C by slowing down or 
                #    turning off the coldest source i until Ci == 0.
                if abs(cur_C) - abs(sources[i][R] * sources[i][C]) > \
                   float_info.epsilon:
                    # Turn off the source i.
                    Rmax -= sources[i][R]
                    # Update current C.
                    cur_C -= sources[i][R] * sources[i][C]
                elif i != len(sources) - 1:
                    # Slow down the source i, the rate is Ri'.
                    return V / r_max(sources, i)
            i += 1
        
        # Turning off all possible sources still does not achieve expected C. 
        return "IMPOSSIBLE"

    return V / Rmax

Example 20

Project: GoogleCodeJam-2015
Source File: kiddie-pool.py
View license
def kiddie_pool2():
    R, C = 0, 1
    N, V, X = map(float, raw_input().strip().split())
    N = int(N)
    sources = [map(float, raw_input().strip().split()) for _ in xrange(N)]
    sources = [[x[R], (x[C]-X)] for x in sources]
    
    # Rx always > 0, no need to care special case.
    if max(x[C] for x in sources) >= -float_info.epsilon and \
       min(x[C] for x in sources) <= float_info.epsilon:
        Tmax = V / sum(x[R] for x in sources) # This is the min of Tmax,
                                              # only happens if every x[R]*x[C] is zero
        for x in sources:
            if abs(x[C]) > float_info.epsilon:
                # For each Cx find Tx by the following:
                # (1) V = RxTx   + sum(RiTi),     (sum() for each i != x)
                # (2) 0 = RxTxCx + sum(RiTiCi),   (sum() for each i != x)
                # <=> 0 = RxTx + sum(RiTiCi/Cx)
                # (1) - (2): V = sum(RiTi(1 - Ci/Cx)) <= Tmax * sum(Ri(1 - Ci/Cx))
                # <=> V / Tmax <= sum(Ri(1 - Ci/Cx)) = Fx
                # <=> V / Fx <= Tmax
                # To minimize Tmax, is to maximize every Fx, i.e minimize every Tx = V / Fx.
                Fx = sum(max(0, i[R]*(1-i[C]/x[C])) for i in sources)
                Tx = V / Fx
                Tmax = max(Tmax, Tx)
        return Tmax

    # Every Ci > 0 or every Ci < 0.
    return "IMPOSSIBLE"

Example 21

Project: GoogleCodeJam-2015
Source File: kiddie-pool.py
View license
def kiddie_pool():
    C, R = 0, 1
    N, V, X = map(float, raw_input().strip().split())
    N = int(N)
    sources = [map(float, raw_input().strip().split()) for _ in xrange(N)]
    sources = [[(x[1]-X), x[0]] for x in sources]
    
    # Turning on all the sources and get current C.
    cur_C = sum(x[R] * x[C] for x in sources)
    # The rate of turning on all the sources.
    Rmax = sum(x[R] for x in sources)
    if abs(cur_C) > float_info.epsilon:
        sources = sorted(sources, reverse = cur_C > float_info.epsilon)
        
        # For better precision. Recompute Rmax.
        # Rmax = sum(Rj), where j in xrange(i, len(sources))
        # Ri may be slowed down.
        def r_max(sources, i):
            Rmax, cur_C = 0, 0
            # Sum R from i + 1 to len(sources) - 1.
            for j in xrange(i + 1, len(sources)):
                Rmax += sources[j][R]
                cur_C += sources[j][R] * sources[j][C]
            # Check if Ri should be slowed down.
            if abs(sources[i][C]) > float_info.epsilon:
                # 0 = cur_C + Ri' * sources[i][C].
                # Ri' = -cur_C / sources[i][C].
                Rmax += -cur_C / sources[i][C]
            else:
                Rmax += sources[i][R]
            return Rmax
 
        # Turn off the sources from the start until cur_C is as expected.
        i = 0
        while i < len(sources):
            # Current C is as expected.
            if abs(sources[i][C]) <= float_info.epsilon :
                return V / r_max(sources, i)
            elif cur_C / sources[i][C] > float_info.epsilon:
                # To slow down Rmax as little as possible:
                # 1. Always cool down cur_C by slowing down or 
                #    turning off the hotest source i until Ci == 0.
                # 2. Always warm up cur_C by slowing down or 
                #    turning off the coldest source i until Ci == 0.
                if abs(cur_C) - abs(sources[i][R] * sources[i][C]) > \
                   float_info.epsilon:
                    # Turn off the source i.
                    Rmax -= sources[i][R]
                    # Update current C.
                    cur_C -= sources[i][R] * sources[i][C]
                elif i != len(sources) - 1:
                    # Slow down the source i, the rate is Ri'.
                    return V / r_max(sources, i)
            i += 1
        
        # Turning off all possible sources still does not achieve expected C. 
        return "IMPOSSIBLE"

    return V / Rmax

Example 22

Project: GoogleCodeJam-2015
Source File: kiddie-pool.py
View license
def kiddie_pool2():
    R, C = 0, 1
    N, V, X = map(float, raw_input().strip().split())
    N = int(N)
    sources = [map(float, raw_input().strip().split()) for _ in xrange(N)]
    sources = [[x[R], (x[C]-X)] for x in sources]
    
    # Rx always > 0, no need to care special case.
    if max(x[C] for x in sources) >= -float_info.epsilon and \
       min(x[C] for x in sources) <= float_info.epsilon:
        Tmax = V / sum(x[R] for x in sources) # This is the min of Tmax,
                                              # only happens if every x[R]*x[C] is zero
        for x in sources:
            if abs(x[C]) > float_info.epsilon:
                # For each Cx find Tx by the following:
                # (1) V = RxTx   + sum(RiTi),     (sum() for each i != x)
                # (2) 0 = RxTxCx + sum(RiTiCi),   (sum() for each i != x)
                # <=> 0 = RxTx + sum(RiTiCi/Cx)
                # (1) - (2): V = sum(RiTi(1 - Ci/Cx)) <= Tmax * sum(Ri(1 - Ci/Cx))
                # <=> V / Tmax <= sum(Ri(1 - Ci/Cx)) = Fx
                # <=> V / Fx <= Tmax
                # To minimize Tmax, is to maximize every Fx, i.e minimize every Tx = V / Fx.
                Fx = sum(max(0, i[R]*(1-i[C]/x[C])) for i in sources)
                Tx = V / Fx
                Tmax = max(Tmax, Tx)
        return Tmax

    # Every Ci > 0 or every Ci < 0.
    return "IMPOSSIBLE"

Example 23

Project: GParML
Source File: supporting_functions.py
View license
def transformVar_back(x):
    assert numpy.all(sys.float_info.epsilon < x) and numpy.all(x < lim_val)
    val = numpy.log(-1 + numpy.exp(x))
    return val

Example 24

Project: GParML
Source File: supporting_functions.py
View license
def transformVar_back(x):
    assert numpy.all(sys.float_info.epsilon < x) and numpy.all(x < lim_val)
    val = numpy.log(-1 + numpy.exp(x))
    return val

Example 25

Project: PyGeM
Source File: affine.py
View license
def affine_points_fit(points_start, points_end):
	"""
	Fit an affine transformation from starting points to ending points through a
	least square procedure.

	:param numpy.ndarray points_start: set of starting points.
	:param numpy.ndarray points_end: set of ending points.

	:return: transform_vector: function that transforms a vector according to the
			 affine map. It takes a source vector and return a vector transformed
			 by the reduced row echelon form of the map.
	:rtype: function

	:Example:

	>>> import pygem.affine as at

	>>> # Example of a rotation (affine transformation)
	>>> p_start = np.array([[1,0,0], [0,1,0], [0,0,1], [0,0,0]])
	>>> p_end = np.array([[0,1,0], [-1,0,0], [0,0,1], [0,0,0]])
	>>> v_test = np.array([1., 2., 3.])
	>>> transformation = at.affine_points_fit(p_start, p_end)
	>>> v_trans = transformation(v_test)
	"""
	if len(points_start) != len(points_end):
		raise RuntimeError("points_start and points_end must be of same size.")

	dim = len(points_start[0])
	if len(points_start) < dim:
		raise RuntimeError(
			"Too few starting points => under-determined system."
		)

	# Fill an an empty (dim+1) x (dim) matrix
	c = [[0.0 for a in range(dim)] for i in range(dim + 1)]
	for j in range(dim):
		for k in range(dim + 1):
			for i, pnts_i in enumerate(points_start):
				qt = list(pnts_i) + [1]
				c[k][j] += qt[k] * points_end[i][j]

	# Fill an an empty (dim+1) x (dim+1) matrix
	Q = [[0.0 for a in range(dim)] + [0] for i in range(dim + 1)]
	for qi in points_start:
		qt = list(qi) + [1]
		for i in range(dim + 1):
			for j in range(dim + 1):
				Q[i][j] += qt[i] * qt[j]

	# Augement Q with c and get the reduced row echelon form of the result
	affine_matrix = [Q[i] + c[i] for i in range(dim + 1)]

	if np.linalg.cond(affine_matrix) < 1 / sys.float_info.epsilon:
		rref_aff_matrix = to_reduced_row_echelon_form(affine_matrix)
		rref_aff_matrix = np.array(rref_aff_matrix)
	else:
		raise RuntimeError(
			"Error: singular matrix. Points are probably coplanar."
		)

	def transform_vector(source):
		"""
		Transform a vector according to the affine map.

		:param numpy.ndarray source: vector to be transformed.

		:return destination: numpy.ndarray representing the transformed vector.
		:rtype: numpy.ndarray
		"""
		destination = np.zeros(dim)
		for i in range(dim):
			for j in range(dim):
				destination[j] += source[i] * rref_aff_matrix[i][j + dim + 1]
			# Add the last line of the rref
			destination[i] += rref_aff_matrix[dim][i + dim + 1]
		return destination

	return transform_vector

Example 26

Project: PyGeM
Source File: affine.py
View license
def affine_points_fit(points_start, points_end):
	"""
	Fit an affine transformation from starting points to ending points through a
	least square procedure.

	:param numpy.ndarray points_start: set of starting points.
	:param numpy.ndarray points_end: set of ending points.

	:return: transform_vector: function that transforms a vector according to the
			 affine map. It takes a source vector and return a vector transformed
			 by the reduced row echelon form of the map.
	:rtype: function

	:Example:

	>>> import pygem.affine as at

	>>> # Example of a rotation (affine transformation)
	>>> p_start = np.array([[1,0,0], [0,1,0], [0,0,1], [0,0,0]])
	>>> p_end = np.array([[0,1,0], [-1,0,0], [0,0,1], [0,0,0]])
	>>> v_test = np.array([1., 2., 3.])
	>>> transformation = at.affine_points_fit(p_start, p_end)
	>>> v_trans = transformation(v_test)
	"""
	if len(points_start) != len(points_end):
		raise RuntimeError("points_start and points_end must be of same size.")

	dim = len(points_start[0])
	if len(points_start) < dim:
		raise RuntimeError(
			"Too few starting points => under-determined system."
		)

	# Fill an an empty (dim+1) x (dim) matrix
	c = [[0.0 for a in range(dim)] for i in range(dim + 1)]
	for j in range(dim):
		for k in range(dim + 1):
			for i, pnts_i in enumerate(points_start):
				qt = list(pnts_i) + [1]
				c[k][j] += qt[k] * points_end[i][j]

	# Fill an an empty (dim+1) x (dim+1) matrix
	Q = [[0.0 for a in range(dim)] + [0] for i in range(dim + 1)]
	for qi in points_start:
		qt = list(qi) + [1]
		for i in range(dim + 1):
			for j in range(dim + 1):
				Q[i][j] += qt[i] * qt[j]

	# Augement Q with c and get the reduced row echelon form of the result
	affine_matrix = [Q[i] + c[i] for i in range(dim + 1)]

	if np.linalg.cond(affine_matrix) < 1 / sys.float_info.epsilon:
		rref_aff_matrix = to_reduced_row_echelon_form(affine_matrix)
		rref_aff_matrix = np.array(rref_aff_matrix)
	else:
		raise RuntimeError(
			"Error: singular matrix. Points are probably coplanar."
		)

	def transform_vector(source):
		"""
		Transform a vector according to the affine map.

		:param numpy.ndarray source: vector to be transformed.

		:return destination: numpy.ndarray representing the transformed vector.
		:rtype: numpy.ndarray
		"""
		destination = np.zeros(dim)
		for i in range(dim):
			for j in range(dim):
				destination[j] += source[i] * rref_aff_matrix[i][j + dim + 1]
			# Add the last line of the rref
			destination[i] += rref_aff_matrix[dim][i + dim + 1]
		return destination

	return transform_vector

Example 27

Project: python-paillier
Source File: paillier_test.py
View license
    def testAutomaticPrecisionAgreesWithEpsilon(self):
        # Check that automatic precision is equivalent to precision=eps
        eps = sys.float_info.epsilon

        # There's a math.floor in _encode, we want to test that
        # bin_lsb_exponent is correct and not off by some fraction that
        # sometimes gets rounded down. The " * 2" in the next line is excessive.
        floor_happy = math.ceil(self.EncodedNumberCls.LOG2_BASE) * 2

        for i in range(-floor_happy, floor_happy + 1):
            enc1 = self.EncodedNumberCls.encode(self.public_key, 2.**i)
            enc2 = self.EncodedNumberCls.encode(self.public_key, 2.**i,
                                                          precision=eps * 2**i)
            self.assertEqual(enc1.exponent, enc2.exponent, i)

            # Check the max val for a given eps
            rel_eps = eps * 2 ** (i - 1)
            val = 2. ** i - rel_eps
            assert val != 2. ** i
            enc3 = self.EncodedNumberCls.encode(self.public_key, val)
            enc4 = self.EncodedNumberCls.encode(self.public_key, val,
                                                        precision=rel_eps)
            self.assertEqual(enc3.exponent, enc4.exponent, i)

Example 28

Project: python-paillier
Source File: paillier_test.py
View license
    def testAutomaticPrecision0(self):
        eps = sys.float_info.epsilon
        one_plus_eps = 1. + eps
        assert one_plus_eps > 1. # If this is false, we have trouble!

        ciphertext1 = self.public_key.encrypt(one_plus_eps)
        decryption1 = self.private_key.decrypt(ciphertext1)
        self.assertEqual(one_plus_eps, decryption1)

        ciphertext2 = ciphertext1 + eps
        self.assertGreater(ciphertext1.exponent, ciphertext2.exponent)
        decryption2 = self.private_key.decrypt(ciphertext2)
        self.assertEqual(one_plus_eps + eps, decryption2)

        # 1. + eps/5 == 1. for a python float...
        ciphertext3 = ciphertext1 + eps / 5
        decryption3 = self.private_key.decrypt(ciphertext3)
        self.assertEqual(one_plus_eps, decryption3)

        # ...but not for our 'arbitrary-precision' Paillier floats
        ciphertext4 = ciphertext3 + eps * 4. / 5
        decryption4 = self.private_key.decrypt(ciphertext4)
        self.assertNotEqual(one_plus_eps, decryption4)
        self.assertEqual(one_plus_eps + eps, decryption4)

Example 29

Project: python-paillier
Source File: paillier_test.py
View license
    def testAutomaticPrecisionAgreesWithEpsilon(self):
        # Check that automatic precision is equivalent to precision=eps
        eps = sys.float_info.epsilon

        # There's a math.floor in _encode, we want to test that
        # bin_lsb_exponent is correct and not off by some fraction that
        # sometimes gets rounded down. The " * 2" in the next line is excessive.
        floor_happy = math.ceil(self.EncodedNumberCls.LOG2_BASE) * 2

        for i in range(-floor_happy, floor_happy + 1):
            enc1 = self.EncodedNumberCls.encode(self.public_key, 2.**i)
            enc2 = self.EncodedNumberCls.encode(self.public_key, 2.**i,
                                                          precision=eps * 2**i)
            self.assertEqual(enc1.exponent, enc2.exponent, i)

            # Check the max val for a given eps
            rel_eps = eps * 2 ** (i - 1)
            val = 2. ** i - rel_eps
            assert val != 2. ** i
            enc3 = self.EncodedNumberCls.encode(self.public_key, val)
            enc4 = self.EncodedNumberCls.encode(self.public_key, val,
                                                        precision=rel_eps)
            self.assertEqual(enc3.exponent, enc4.exponent, i)

Example 30

Project: python-paillier
Source File: paillier_test.py
View license
    def testAutomaticPrecision0(self):
        eps = sys.float_info.epsilon
        one_plus_eps = 1. + eps
        assert one_plus_eps > 1. # If this is false, we have trouble!

        ciphertext1 = self.public_key.encrypt(one_plus_eps)
        decryption1 = self.private_key.decrypt(ciphertext1)
        self.assertEqual(one_plus_eps, decryption1)

        ciphertext2 = ciphertext1 + eps
        self.assertGreater(ciphertext1.exponent, ciphertext2.exponent)
        decryption2 = self.private_key.decrypt(ciphertext2)
        self.assertEqual(one_plus_eps + eps, decryption2)

        # 1. + eps/5 == 1. for a python float...
        ciphertext3 = ciphertext1 + eps / 5
        decryption3 = self.private_key.decrypt(ciphertext3)
        self.assertEqual(one_plus_eps, decryption3)

        # ...but not for our 'arbitrary-precision' Paillier floats
        ciphertext4 = ciphertext3 + eps * 4. / 5
        decryption4 = self.private_key.decrypt(ciphertext4)
        self.assertNotEqual(one_plus_eps, decryption4)
        self.assertEqual(one_plus_eps + eps, decryption4)

Example 31

Project: nistats
Source File: design_matrix.py
View license
def create_second_level_design(maps_table, confounds=None):
    """Sets up a second level design from a maps table.

    Parameters
    ----------
    maps_table: pandas DataFrame
        Contains at least columns 'map_name' and 'subject_id'
    confounds: pandas DataFrame, optional
        If given, contains at least two columns, 'subject_id' and one confound.
        confounds and maps_table do not need to agree on their shape,
        information between them is matched based on the 'subject_id' column
        that both must have.

    Returns
    -------
    design_matrix: pandas DataFrame
        The second level design matrix
    """
    maps_name = maps_table['map_name'].tolist()
    subjects_id = maps_table['subject_id'].tolist()
    confounds_name = []
    if confounds is not None:
        confounds_name = confounds.columns.tolist()
        confounds_name.remove('subject_id')
    design_columns = (np.unique(maps_name).tolist() +
                      np.unique(subjects_id).tolist() +
                      confounds_name)
    design_matrix = pd.DataFrame(columns=design_columns)
    for ridx, row in maps_table.iterrows():
        design_matrix.loc[ridx] = [0] * len(design_columns)
        design_matrix.loc[ridx, row['map_name']] = 1
        design_matrix.loc[ridx, row['subject_id']] = 1
        if confounds is not None:
            conrow = confounds['subject_id'] == row['subject_id']
            for conf_name in confounds_name:
                design_matrix.loc[ridx, conf_name] = confounds[conrow][conf_name].values

    # check column names are unique
    if len(np.unique(design_columns)) != len(design_columns):
        raise ValueError('Design matrix columns do not have unique names')

    # check design matrix is not singular
    if np.linalg.cond(design_matrix.as_matrix()) < (1. / sys.float_info.epsilon):
        warn('Attention: Design matrix is singular. Aberrant estimates '
             'are expected.')

    return design_matrix

Example 32

Project: nistats
Source File: design_matrix.py
View license
def create_second_level_design(maps_table, confounds=None):
    """Sets up a second level design from a maps table.

    Parameters
    ----------
    maps_table: pandas DataFrame
        Contains at least columns 'map_name' and 'subject_id'
    confounds: pandas DataFrame, optional
        If given, contains at least two columns, 'subject_id' and one confound.
        confounds and maps_table do not need to agree on their shape,
        information between them is matched based on the 'subject_id' column
        that both must have.

    Returns
    -------
    design_matrix: pandas DataFrame
        The second level design matrix
    """
    maps_name = maps_table['map_name'].tolist()
    subjects_id = maps_table['subject_id'].tolist()
    confounds_name = []
    if confounds is not None:
        confounds_name = confounds.columns.tolist()
        confounds_name.remove('subject_id')
    design_columns = (np.unique(maps_name).tolist() +
                      np.unique(subjects_id).tolist() +
                      confounds_name)
    design_matrix = pd.DataFrame(columns=design_columns)
    for ridx, row in maps_table.iterrows():
        design_matrix.loc[ridx] = [0] * len(design_columns)
        design_matrix.loc[ridx, row['map_name']] = 1
        design_matrix.loc[ridx, row['subject_id']] = 1
        if confounds is not None:
            conrow = confounds['subject_id'] == row['subject_id']
            for conf_name in confounds_name:
                design_matrix.loc[ridx, conf_name] = confounds[conrow][conf_name].values

    # check column names are unique
    if len(np.unique(design_columns)) != len(design_columns):
        raise ValueError('Design matrix columns do not have unique names')

    # check design matrix is not singular
    if np.linalg.cond(design_matrix.as_matrix()) < (1. / sys.float_info.epsilon):
        warn('Attention: Design matrix is singular. Aberrant estimates '
             'are expected.')

    return design_matrix