numpy.float16

Here are the examples of the python api numpy.float16 taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

Example 1

Project: chainer
Source File: test_matmul.py
View license
    def setUp(self):
        self.x1 = numpy.random.uniform(.5, 1, (m, k)).astype(numpy.float16)
        self.x2 = numpy.random.uniform(.5, 1, (k, n)).astype(numpy.float16)
        self.gy = numpy.random.uniform(-1, 1, (m, n)).astype(numpy.float16)
        self.op = lambda x, y: F.matmul(x, y)
        self.forward_answer = numpy.dot(self.x1, self.x2)

Example 2

Project: chainer
Source File: relu.py
View license
    def forward_gpu(self, x):
        if (cuda.cudnn_enabled and self.use_cudnn and
                (_cudnn_version >= 3000 or x[0].dtype != numpy.float16)):
            y = cudnn.activation_forward(x[0], _mode)
            self.y = y
        else:
            y = cuda.cupy.maximum(x[0], 0)
        return y,

Example 3

Project: chainer
Source File: relu.py
View license
    def backward_gpu(self, x, gy):
        if (cuda.cudnn_enabled and self.use_cudnn and
                (_cudnn_version >= 3000 or x[0].dtype != numpy.float16)):
            gx = cudnn.activation_backward(x[0], self.y, gy[0], _mode)
        else:
            gx = cuda.elementwise(
                'T x, T gy', 'T gx',
                'gx = x > 0 ? gy : (T)0',
                'relu_bwd')(x[0], gy[0])
        return gx,

Example 4

Project: chainer
Source File: sigmoid.py
View license
    def forward_gpu(self, inputs):
        x = inputs[0]
        if (cuda.cudnn_enabled and self.use_cudnn and
                (_cudnn_version >= 3000 or x.dtype != numpy.float16)):
            self.y = cuda.cupy.cudnn.activation_forward(x, _mode)
        else:
            self.y = cuda.elementwise(
                'T x', 'T y', 'y = tanh(x * 0.5) * 0.5 + 0.5',
                'sigmoid_fwd')(x)
        return self.y,

Example 5

Project: chainer
Source File: sigmoid.py
View license
    def backward_gpu(self, inputs, grads):
        x = inputs[0]
        gy = grads[0]
        if (cuda.cudnn_enabled and self.use_cudnn and
                (_cudnn_version >= 3000 or x.dtype != numpy.float16)):
            gx = cuda.cupy.cudnn.activation_backward(x, self.y, gy, _mode)
        else:
            gx = cuda.elementwise(
                'T y, T gy', 'T gx',
                'gx = gy * y * (1 - y)',
                'sigmoid_bwd')(self.y, gy)
        return gx,

Example 6

Project: chainer
Source File: tanh.py
View license
    def forward_gpu(self, x):
        if (cuda.cudnn_enabled and self.use_cudnn and
                (_cudnn_version >= 3000 or x[0].dtype != numpy.float16)):
            self.y = cudnn.activation_forward(x[0], _mode)
        else:
            self.y = cuda.cupy.empty_like(x[0])
            cuda.cupy.tanh(x[0], out=self.y)
        return self.y,

Example 7

Project: chainer
Source File: tanh.py
View license
    def backward_gpu(self, x, gy):
        if (cuda.cudnn_enabled and self.use_cudnn and
                (_cudnn_version >= 3000 or x[0].dtype != numpy.float16)):
            gx = cudnn.activation_backward(x[0], self.y, gy[0], _mode)
        else:
            gx = cuda.elementwise(
                'T y, T gy', 'T gx',
                'gx = gy * (1 - y * y)',
                'tanh_bwd')(self.y, gy[0])
        return gx,

Example 8

Project: chainer
Source File: alex.py
View license
    def __init__(self):
        self.dtype = np.float16
        W = initializers.HeNormal(1 / np.sqrt(2), self.dtype)
        bias = initializers.Zero(self.dtype)
        chainer.Chain.__init__(
            self,
            conv1=L.Convolution2D(3,  96, 11, stride=4, initialW=W, bias=bias),
            conv2=L.Convolution2D(96, 256,  5, pad=2, initialW=W, bias=bias),
            conv3=L.Convolution2D(256, 384,  3, pad=1, initialW=W, bias=bias),
            conv4=L.Convolution2D(384, 384,  3, pad=1, initialW=W, bias=bias),
            conv5=L.Convolution2D(384, 256,  3, pad=1, initialW=W, bias=bias),
            fc6=L.Linear(9216, 4096, initialW=W, bias=bias),
            fc7=L.Linear(4096, 4096, initialW=W, bias=bias),
            fc8=L.Linear(4096, 1000, initialW=W, bias=bias),
        )
        self.train = True

Example 9

Project: chainer
Source File: test_log_softmax.py
View license
    def setUp(self):
        if self.shape is None:
            # For checking numerical stability
            value = -5 if self.dtype == numpy.float16 else -1000
            self.x = numpy.array([[value, 1]], dtype=self.dtype)
        else:
            self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.gy = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)

        self.check_forward_options = {}
        self.check_backward_options = {'dtype': numpy.float64}
        if self.dtype == numpy.float16:
            self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
            self.check_backward_options = {
                'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}

Example 10

Project: chainer
Source File: test_relu.py
View license
    def setUp(self):
        self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.expect = self.use_cudnn and (
            cuda.cudnn.cudnn.getVersion() >= 3000 or
            self.dtype != numpy.float16)

Example 11

Project: chainer
Source File: test_sigmoid.py
View license
    def setUp(self):
        self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.expect = self.use_cudnn and (
            cuda.cudnn.cudnn.getVersion() >= 3000 or
            self.dtype != numpy.float16)

Example 12

Project: chainer
Source File: test_softmax.py
View license
    def setUp(self):
        if self.shape is None:
            # For checking numerical stability
            value = -5 if self.dtype == numpy.float16 else -1000
            self.x = numpy.array([[value, 1]], dtype=self.dtype)
        else:
            self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.gy = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)

        self.check_forward_options = {}
        self.check_backward_options = {'dtype': numpy.float64}
        if self.dtype == numpy.float16:
            self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
            self.check_backward_options = {
                'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}

Example 13

Project: chainer
Source File: test_softmax.py
View license
    def setUp(self):
        self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.expect = self.use_cudnn and (
            cuda.cudnn.cudnn.getVersion() >= 3000 or
            self.dtype != numpy.float16)

Example 14

Project: chainer
Source File: test_tanh.py
View license
    def setUp(self):
        self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        self.expect = self.use_cudnn and (
            cuda.cudnn.cudnn.getVersion() >= 3000 or
            self.dtype != numpy.float16)

Example 15

Project: chainer
Source File: test_basic_math.py
View license
    def check_backward(self, op, x1_data, x2_data, y_grad):
        options = {}
        if self.dtype == numpy.float16:
            options = {'atol': 5e-4, 'rtol': 5e-3}
        gradient_check.check_backward(op, (x1_data, x2_data), y_grad,
                                      dtype=numpy.float64, **options)

Example 16

Project: chainer
Source File: test_basic_math.py
View license
    def check_forward(self, op, x_data):
        x = chainer.Variable(x_data)
        y = op(x, self.value)
        if self.dtype == numpy.float16:
            atol = 5e-4
            rtol = 5e-4
        else:
            atol = 1e-7
            rtol = 1e-7
        testing.assert_allclose(
            op(self.x, self.value), y.data, atol=atol, rtol=rtol)

Example 17

Project: chainer
Source File: test_basic_math.py
View license
    def check_backward(self, op, x_data, y_grad):
        options = {}
        if self.dtype == numpy.float16:
            options = {'atol': 5e-4, 'rtol': 5e-3}
        gradient_check.check_backward(lambda x: op(x, self.value),
                                      x_data, y_grad,
                                      dtype=numpy.float64, **options)

Example 18

Project: chainer
Source File: test_basic_math.py
View license
    def check_backward(self, op, x_data, y_grad):
        options = {}
        if self.dtype == numpy.float16:
            options = {'atol': 5e-4, 'rtol': 5e-3}
        gradient_check.check_backward(
            op, x_data, y_grad, dtype=numpy.float64, **options)

Example 19

Project: chainer
Source File: test_basic_math.py
View license
    def check_backward(self, x_data, y_grad):
        options = {}
        if self.dtype == numpy.float16:
            options = {'atol': 5e-4, 'rtol': 5e-3}
        gradient_check.check_backward(
            lambda x: x ** 2, x_data, y_grad, dtype=numpy.float64, **options)

Example 20

Project: chainer
Source File: test_basic_math.py
View license
    def check_forward(self, x_data, y_data):
        if self.left_const:
            x = x_data
        else:
            x = chainer.Variable(x_data)
        if self.right_const:
            y = y_data
        else:
            y = chainer.Variable(y_data)
        z = operator.matmul(x, y)
        if self.dtype == numpy.float16:
            options = {'atol': 1e-3, 'rtol': 1e-3}
        else:
            options = {'atol': 1e-7, 'rtol': 1e-7}
        testing.assert_allclose(
            self.x.dot(self.y), z.data, **options)

Example 21

Project: chainer
Source File: test_basic_math.py
View license
    def check_backward(self, x_data, y_data, z_grad):
        if self.right_const:
            def op(x):
                return operator.matmul(x, y_data)
            data = x_data,
        elif self.left_const:
            def op(y):
                return operator.matmul(x_data, y)
            data = y_data,
        else:
            op = operator.matmul
            data = x_data, y_data

        if self.dtype == numpy.float16:
            options = {'atol': 1e-3, 'rtol': 1e-3}
        else:
            options = {'atol': 1e-4, 'rtol': 1e-4}
        gradient_check.check_backward(
            op, data, z_grad, dtype=numpy.float64, **options)

Example 22

Project: chainer
Source File: test_sum.py
View license
    def check_forward(self, x_data, axis=None):
        x = chainer.Variable(x_data)
        y = functions.sum(x, axis=axis)
        self.assertEqual(y.data.dtype, self.dtype)
        y_expect = self.x.sum(axis=axis)

        if self.dtype == numpy.float16:
            options = {'atol': 1e-3, 'rtol': 1e-3}
        else:
            options = {}

        testing.assert_allclose(y_expect, y.data, **options)

Example 23

Project: seagoatvision
Source File: linetest.py
View license
    def save_data(self):
        precisions = np.zeros(len(self.precisions), dtype=np.float16)
        noises = np.zeros(len(self.noises), dtype=np.float16)
        for x in xrange(0, len(self.precisions)):
            precisions[x] = round(self.precisions.values()[x] * 100.0, 2)
            noises[x] = round(self.noises.values()[x] * 100.0, 2)

        tmp = tempfile.NamedTemporaryFile()
        data = np.append(precisions, noises)
        tmp.file.write(data.tostring())
        tmp.file.flush()
        return tmp

Example 24

Project: seagoatvision
Source File: parameval.py
View license
    def __init__(self, test_folder, chain, fname, pname, minval, maxval):
        """
        Args:
            test_folder: test folder path
            chain: the filterchain object to test
            fname: the filter name
            pname: the parameter name to evaluate
            minval: the starting value
            maxval: the ending value"""
        self.test_folder = test_folder
        self.chain = chain
        self.fname = fname
        self.pname = pname
        self.filter = self.filter_to_use()
        self.minval = minval
        self.maxval = maxval
        self.precisions = np.zeros(maxval - minval, dtype=np.float16)
        self.noises = np.zeros(maxval - minval, dtype=np.float16)

Example 25

View license
  def testHalf(self):
    t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
    self.assertProtoEquals("""
      dtype: DT_HALF
      tensor_shape {
        dim {
          size: 2
        }
      }
      half_val: 18688
      half_val: 19712
      """, t)

    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.float16, a.dtype)
    self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)

Example 26

Project: deep_recommend_system
Source File: test_util.py
View license
  def assertAllCloseAccordingToType(self, a, b, rtol=1e-6, atol=1e-6):
    """Like assertAllClose, but also suitable for comparing fp16 arrays.

    In particular, the tolerance is reduced to 1e-3 if at least
    one of the arguments is of type float16.

    Args:
      a: a numpy ndarray or anything can be converted to one.
      b: a numpy ndarray or anything can be converted to one.
      rtol: relative tolerance
      atol: absolute tolerance
    """
    a = self._GetNdArray(a)
    b = self._GetNdArray(b)
    if a.dtype == np.float16 or b.dtype == np.float16:
      rtol = max(rtol, 1e-3)
      atol = max(atol, 1e-3)

    self.assertAllClose(a, b, rtol=rtol, atol=atol)

Example 27

View license
  def testCompare(self):
    for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
            np.complex64, np.complex128):
      self._compareDiffType(2, t, False)
      self._compareDiffType(3, t, False)

      x = [1, 2, 3]
      y = [4, 5]

      a = [[1, 1], [1, 1]]

      self._compareDiff(x, y, False)
      self._compareDiff(x, a, False)

Example 28

View license
  def _testAll(self, np_inputs, np_bias):
    self._testBias(np_inputs, np_bias, use_gpu=False)
    if np_inputs.dtype in [np.float16, np.float32, np.float64]:
      self._testBias(np_inputs, np_bias, use_gpu=True)
      if tf.test.is_gpu_available():
        self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)

Example 29

View license
  def testToFloat16(self):
    with self.test_session():
      in_bytes = tf.placeholder(tf.string, shape=[None])
      decode = tf.decode_raw(in_bytes, out_type=tf.float16)
      self.assertEqual([None, None], decode.get_shape().as_list())

      expected_result = np.matrix([[1, -2, -3, 4]], dtype=np.float16)
      result = decode.eval(feed_dict={in_bytes: [expected_result.tostring()]})

      self.assertAllEqual(expected_result, result)

Example 30

View license
  def testHalfBasic(self):
    x = np.arange(1., 5.).reshape([4, 1]).astype(np.float16)
    y = np.arange(1., 3.).reshape([1, 2]).astype(np.float16)
    self._testCpuMatmul(x, y)
    if test_util.CudaSupportsHalfMatMulAndConv():
      self._testGpuMatmul(x, y)
    else:
      print("Built without fp16 matmul support, skipping GPU test.")

Example 31

View license
  def testHalfVector(self):
    self._vectorTest(np.float16, gpu=False)
    if test_util.CudaSupportsHalfMatMulAndConv():
      self._vectorTest(np.float16, gpu=True)
    else:
      print("Built without fp16 matmul support, skipping GPU test.")

Example 32

View license
  def testNumbers(self):
    for t in [np.float16, np.float32, np.float64]:
      self._testElu(
          np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
          use_gpu=False)
      self._testElu(
          np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
          use_gpu=True)

Example 33

View license
  def testHalf(self):
    self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
    self._compare(
        np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
    self._compare(
        np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))

Example 34

View license
  def _testSingleClass(self, use_gpu=False):
    for dtype in np.float16, np.float32:
      with self.test_session(use_gpu=use_gpu) as sess:
        loss, backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
            np.array([[1.], [-1.], [0.]]).astype(dtype),
            np.array([[-1.], [0.], [1.]]).astype(dtype))
        tf_loss, tf_backprop = sess.run([loss, backprop])
      self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
      self.assertAllClose([[2.0], [1.0], [0.0]], tf_backprop)

Example 35

View license
  def testRankTooLarge(self):
    for dtype in np.float16, np.float32:
      np_features = np.array(
          [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]).astype(dtype)
      np_labels = np.array(
          [[[0., 0., 0., 1.]], [[0., .5, .5, 0.]]]).astype(dtype)
      self.assertRaisesRegexp(ValueError, "must be rank 2",
                              gen_nn_ops._softmax_cross_entropy_with_logits,
                              np_features, np_labels)

Example 36

View license
  def testReduceLogSumExp(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf_np = math_ops.reduce_logsumexp(x_np).eval()
        y_np = log(np.sum(exp(x_np)))
        self.assertAllClose(y_tf_np, y_np)

Example 37

View license
  def testReductionIndices(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=[0])
        y_np = log(np.sum(exp(x_np), axis=0))
        self.assertShapeEqual(y_np, y_tf)
        y_tf_np = y_tf.eval()
        self.assertAllClose(y_tf_np, y_np)

Example 38

View license
  def testReductionIndices2(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=0)
        y_np = log(np.sum(exp(x_np), axis=0))
        self.assertShapeEqual(y_np, y_tf)
        y_tf_np = y_tf.eval()
        self.assertAllClose(y_tf_np, y_np)

Example 39

View license
  def testKeepDims(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf_np = math_ops.reduce_logsumexp(x_np, keep_dims=True).eval()
        self.assertEqual(y_tf_np.ndim, x_np.ndim)
        y_np = log(np.sum(exp(x_np), keepdims=True))
        self.assertAllClose(y_tf_np, y_np)

Example 40

View license
  def testOverflow(self):
    x = [1000, 1001, 1002, 1003]
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.array(x, dtype=dtype)
      max_np = np.max(x_np)
      with self.assertRaisesRegexp(RuntimeWarning,
                                   "overflow encountered in exp"):
        out = log(np.sum(exp(x_np)))
        if out == np.inf:
          raise RuntimeWarning("overflow encountered in exp")

      with self.test_session(use_gpu=True):
        x_tf = constant_op.constant(x_np, shape=x_np.shape)
        y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
        y_np = log(np.sum(exp(x_np - max_np))) + max_np
        self.assertAllClose(y_tf_np, y_np)

Example 41

View license
  def testUnderflow(self):
    x = [-1000, -1001, -1002, -1003]
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.array(x, dtype=dtype)
      max_np = np.max(x_np)
      with self.assertRaisesRegexp(RuntimeWarning,
                                   "divide by zero encountered in log"):
        out = log(np.sum(exp(x_np)))
        if out == -np.inf:
          raise RuntimeWarning("divide by zero encountered in log")

      with self.test_session(use_gpu=True):
        x_tf = constant_op.constant(x_np, shape=x_np.shape)
        y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
        y_np = log(np.sum(exp(x_np - max_np))) + max_np
        self.assertAllClose(y_tf_np, y_np)

Example 42

View license
  def _toType(self, dtype):
    if dtype == np.float16:
      return tf.float16
    elif dtype == np.float32:
      return tf.float32
    elif dtype == np.float64:
      return tf.float64
    elif dtype == np.int32:
      return tf.int32
    elif dtype == np.int64:
      return tf.int64
    else:
      assert False, (dtype)

Example 43

View license
  def testApplyGradientDescent(self):
    for (dtype, use_gpu) in itertools.product(
        [np.float16, np.float32, np.float64], [False, True]):
      x = np.arange(100).astype(dtype)
      alpha = np.array(2.0).astype(dtype)
      delta = np.arange(100).astype(dtype)
      self._testTypes(x, alpha, delta, use_gpu)

Example 44

View license
  def testApplyAdagrad(self):
    for (dtype, use_gpu) in itertools.product(
        [np.float16, np.float32, np.float64], [False, True]):
      x = np.arange(100).astype(dtype)
      y = np.arange(1, 101).astype(dtype)
      lr = np.array(2.0).astype(dtype)
      grad = np.arange(100).astype(dtype)
      self._testTypesForAdagrad(x, y, lr, grad, use_gpu)

Example 45

View license
  def testApplyFtrl(self):
    for dtype in [np.float16, np.float32, np.float64]:
      x = np.arange(100).astype(dtype)
      y = np.arange(1, 101).astype(dtype)
      z = np.arange(102, 202).astype(dtype)
      lr = np.array(2.0).astype(dtype)
      l1 = np.array(3.0).astype(dtype)
      l2 = np.array(4.0).astype(dtype)
      grad = np.arange(100).astype(dtype)
      self._testTypesForFtrl(x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)

Example 46

View license
  def testSparseApplyAdagrad(self):
    for (dtype, index_type) in itertools.product(
        [np.float16, np.float32, np.float64], [np.int32, np.int64]):
      x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]
      y_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]
      x = np.array(x_val).astype(dtype)
      y = np.array(y_val).astype(dtype)
      lr = np.array(2.0).astype(dtype)
      grad_val = [np.arange(10), np.arange(10)]
      grad = np.array(grad_val).astype(dtype)
      indices = np.array([0, 2]).astype(index_type)
      self._testTypesForSparseAdagrad(x, y, lr, grad, indices)

Example 47

View license
  def testSparseApplyAdagradDim1(self):
    for (dtype, index_type) in itertools.product(
        [np.float16, np.float32, np.float64], [np.int32, np.int64]):
      x_val = [[1.0], [2.0], [3.0]]
      y_val = [[4.0], [5.0], [6.0]]
      x = np.array(x_val).astype(dtype)
      y = np.array(y_val).astype(dtype)
      lr = np.array(2.0).astype(dtype)
      grad_val = [[1.5], [2.5]]
      grad = np.array(grad_val).astype(dtype)
      indices = np.array([0, 2]).astype(index_type)
      self._testTypesForSparseAdagrad(x, y, lr, grad, indices)

Example 48

View license
  def testSparseApplyFtrlDim1(self):
    for (dtype, index_type) in itertools.product(
        [np.float16, np.float32, np.float64], [np.int32, np.int64]):
      x_val = [[0.0], [0.0], [0.0]]
      y_val = [[4.0], [5.0], [6.0]]
      z_val = [[0.0], [0.0], [0.0]]
      x = np.array(x_val).astype(dtype)
      y = np.array(y_val).astype(dtype)
      z = np.array(z_val).astype(dtype)
      lr = np.array(2.0).astype(dtype)
      grad_val = [[1.5], [2.5]]
      grad = np.array(grad_val).astype(dtype)
      indices = np.array([0, 2]).astype(index_type)
      self._testTypesForSparseFtrl(x, y, z, lr, grad, indices)

Example 49

View license
  def testApplyAdam(self):
    for dtype, use_gpu in itertools.product(
        [np.float16, np.float32, np.float64], [False, True]):
      var = np.arange(100).astype(dtype)
      m = np.arange(1, 101).astype(dtype)
      v = np.arange(101, 201).astype(dtype)
      grad = np.arange(100).astype(dtype)
      self._testTypesForAdam(var, m, v, grad, use_gpu)

Example 50

View license
  def testHalf(self):
    t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
    self.assertProtoEquals("""
      dtype: DT_HALF
      tensor_shape {
        dim {
          size: 2
        }
      }
      half_val: 18688
      half_val: 19712
      """, t)

    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.float16, a.dtype)
    self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)