numpy.random.mtrand.RandomState

Here are the examples of the python api numpy.random.mtrand.RandomState taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

106 Examples 7

3 Source : test_nmf.py
with MIT License
from alvarobartt

def test_initialize_nn_output():
    # Test that initialization does not return negative values
    rng = np.random.mtrand.RandomState(42)
    data = np.abs(rng.randn(10, 10))
    for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
        W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
        assert_false((W   <   0).any() or (H  <  0).any())


def test_parameter_checking():

3 Source : test_nmf.py
with MIT License
from alvarobartt

def test_initialize_close():
    # Test NNDSVD error
    # Test that _initialize_nmf error is less than the standard deviation of
    # the entries in the matrix.
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(10, 10))
    W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
    error = linalg.norm(np.dot(W, H) - A)
    sdev = linalg.norm(A - A.mean())
    assert_true(error   <  = sdev)


def test_initialize_variants():

3 Source : test_nmf.py
with MIT License
from alvarobartt

def test_initialize_variants():
    # Test NNDSVD variants correctness
    # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
    # 'nndsvd' only where the basic version has zeros.
    rng = np.random.mtrand.RandomState(42)
    data = np.abs(rng.randn(10, 10))
    W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
    Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
    War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
                                   random_state=0)

    for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
        assert_almost_equal(evl[ref != 0], ref[ref != 0])


# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)

3 Source : test_nmf.py
with MIT License
from alvarobartt

def test_nmf_fit_close():
    rng = np.random.mtrand.RandomState(42)
    # Test that the fit is not too far away
    for solver in ('cd', 'mu'):
        pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
                   max_iter=600)
        X = np.abs(rng.randn(6, 5))
        assert_less(pnmf.fit(X).reconstruction_err_, 0.1)


def test_nmf_transform():

3 Source : test_nmf.py
with MIT License
from alvarobartt

def test_nmf_transform():
    # Test that NMF.transform returns close values
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(6, 5))
    for solver in ['cd', 'mu']:
        m = NMF(solver=solver, n_components=3, init='random',
                random_state=0, tol=1e-5)
        ft = m.fit_transform(A)
        t = m.transform(A)
        assert_array_almost_equal(ft, t, decimal=2)


def test_nmf_transform_custom_init():

3 Source : test_nmf.py
with MIT License
from alvarobartt

def test_n_components_greater_n_features():
    # Smoke test for the case of more components than features.
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(30, 10))
    NMF(n_components=15, random_state=0, tol=1e-2).fit(A)


def test_nmf_sparse_input():

3 Source : test_nmf.py
with MIT License
from alvarobartt

def test_nmf_sparse_transform():
    # Test that transform works on sparse data.  Issue #2124
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(3, 2))
    A[1, 1] = 0
    A = csc_matrix(A)

    for solver in ('cd', 'mu'):
        model = NMF(solver=solver, random_state=0, n_components=2,
                    max_iter=400)
        A_fit_tr = model.fit_transform(A)
        A_tr = model.transform(A)
        assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)


def test_non_negative_factorization_consistency():

3 Source : rng.py
with GNU General Public License v3.0
from Artikash

    def __init__(self, seed, dist=None):
        if seed   <  = 0:
            self._rng = mt.RandomState()
        elif seed > 0:
            self._rng = mt.RandomState(seed)
        if dist is None:
            dist = default_distribution
        if not isinstance(dist, Distribution):
            raise error("Not a distribution object")
        self._dist = dist

    def ranf(self):

3 Source : decision_tree.py
with Apache License 2.0
from bsc-wdc

def _sample_selection(n_samples, y_targets, bootstrap, seed):
    if bootstrap:
        random_state = RandomState(seed)
        selection = random_state.choice(
            n_samples, size=n_samples, replace=True
        )
        selection.sort()
        return selection, y_targets[selection]
    else:
        return np.arange(n_samples), y_targets


def _feature_selection(untried_indices, m_try, random_state):

3 Source : decision_tree.py
with Apache License 2.0
from bsc-wdc

def _split_node_using_features(
    sample, n_features, y_s, n_classes, m_try, features_file, seed
):
    features_mmap = np.load(features_file, mmap_mode="r", allow_pickle=False)
    random_state = RandomState(seed)
    return _compute_split(
        sample, n_features, y_s, n_classes, m_try, features_mmap, random_state
    )


@constraint(computing_units="${ComputingUnits}")

3 Source : decision_tree.py
with Apache License 2.0
from bsc-wdc

def _split_node(sample, n_features, y_s, n_classes, m_try, samples_file, seed):
    features_mmap = np.load(samples_file, mmap_mode="r", allow_pickle=False).T
    random_state = RandomState(seed)
    return _compute_split(
        sample, n_features, y_s, n_classes, m_try, features_mmap, random_state
    )


def _compute_split(

3 Source : sampler.py
with MIT License
from CIRADA-Tools

    def __init__(self, dim, lnprobfn, args=[], kwargs={}):
        self.dim = dim
        self.lnprobfn = lnprobfn
        self.args = args
        self.kwargs = kwargs

        # This is a random number generator that we can easily set the state
        # of without affecting the numpy-wide generator
        self._random = np.random.mtrand.RandomState()

        self.reset()

    @property

3 Source : test_nmf.py
with Apache License 2.0
from dashanji

def test_initialize_nn_output():
    # Test that initialization does not return negative values
    rng = np.random.mtrand.RandomState(42)
    data = np.abs(rng.randn(10, 10))
    for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
        W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
        assert not ((W   <   0).any() or (H  <  0).any())


def test_parameter_checking():

3 Source : test_nmf.py
with Apache License 2.0
from dashanji

def test_initialize_close():
    # Test NNDSVD error
    # Test that _initialize_nmf error is less than the standard deviation of
    # the entries in the matrix.
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(10, 10))
    W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
    error = linalg.norm(np.dot(W, H) - A)
    sdev = linalg.norm(A - A.mean())
    assert error   <  = sdev


def test_initialize_variants():

3 Source : test_nmf.py
with Apache License 2.0
from dashanji

def test_nmf_fit_close(solver):
    rng = np.random.mtrand.RandomState(42)
    # Test that the fit is not too far away
    pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
               max_iter=600)
    X = np.abs(rng.randn(6, 5))
    assert pnmf.fit(X).reconstruction_err_   <   0.1


@pytest.mark.parametrize('solver', ('cd', 'mu'))

3 Source : test_nmf.py
with Apache License 2.0
from dashanji

def test_nmf_transform(solver):
    # Test that NMF.transform returns close values
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(6, 5))
    m = NMF(solver=solver, n_components=3, init='random',
            random_state=0, tol=1e-5)
    ft = m.fit_transform(A)
    t = m.transform(A)
    assert_array_almost_equal(ft, t, decimal=2)


def test_nmf_transform_custom_init():

3 Source : simulation.py
with MIT License
from deeplearningbrasil

    def __init__(self, reward_model: nn.Module, epsilon: float = 0.1, seed: int = 42) -> None:
        super().__init__(reward_model)
        self._epsilon = epsilon
        self._rng = RandomState(seed)

    def _select_idx(

3 Source : bandit.py
with MIT License
from deeplearningbrasil

    def __init__(
        self,
        reward_model: nn.Module,
        explore_rounds: int = 500,
        decay_rate: float = 0.0026456,
        seed: int = 42,
    ) -> None:
        super().__init__(reward_model)
        self._init_explore_rounds = explore_rounds
        self._explore_rounds = explore_rounds
        self._exploit_rounds = explore_rounds
        self._decay_rate = decay_rate

        self._rng = RandomState(seed)
        self._t = 0
        self._te = 0
        self.exploring = True

    def _update_state(self):

3 Source : bandit.py
with MIT License
from deeplearningbrasil

    def __init__(
        self,
        reward_model: nn.Module,
        epsilon: float = 0.1,
        epsilon_decay: float = 1.0,
        seed: int = 42,
    ) -> None:
        super().__init__(reward_model)
        self._epsilon = epsilon
        self._rng = RandomState(seed)
        self._epsilon_decay = epsilon_decay

    def _compute_prob(

3 Source : bandit.py
with MIT License
from deeplearningbrasil

    def __init__(
        self,
        reward_model: nn.Module,
        exploration_threshold: float = 0.8,
        decay_rate: float = 0.0010391,
        seed: int = 42,
    ) -> None:
        super().__init__(reward_model)
        self._init_exploration_threshold = exploration_threshold
        self._exploration_threshold = exploration_threshold
        self._decay_rate = decay_rate
        self._rng = RandomState(seed)
        self._t = 0

    def _compute_prob(

3 Source : bandit.py
with MIT License
from deeplearningbrasil

    def __init__(
        self,
        reward_model: nn.Module,
        window_size: int = 500,
        exploration_threshold: float = 0.5,
        percentile=35,
        percentile_decay: float = 1.0,
        seed: int = 42,
    ) -> None:
        super().__init__(reward_model)
        self._window_size = window_size
        self._initial_exploration_threshold = exploration_threshold
        self._percentile_decay = percentile_decay
        self._best_arm_history = {}  # We save a deque for each pos
        self._rng = RandomState(seed)
        self._percentile = percentile
        self._t = 0

    def _compute_prob(

3 Source : bandit.py
with MIT License
from deeplearningbrasil

    def __init__(
        self,
        reward_model: nn.Module,
        logit_multiplier: float = 1.0,
        reverse_sigmoid: bool = True,
        seed: int = 42,
    ) -> None:
        super().__init__(reward_model)
        self._logit_multiplier = logit_multiplier
        self._rng = RandomState(seed)
        self._reverse_sigmoid = reverse_sigmoid

    def _softmax(self, x: np.ndarray) -> np.ndarray:

3 Source : epsilon_greedy_agent.py
with MIT License
from falox

    def __init__(self, seed, epsilon):
        self.name = "epsilon-Greedy Agent"
        self.np_random = RandomState(seed)
        self.epsilon = epsilon

    def act(self, observation, reward, done):

3 Source : softmax_agent.py
with MIT License
from falox

    def __init__(self, seed, beta, max_impressions):
        self.name = "Softmax Agent"
        self.np_random = RandomState(seed)
        self.beta = beta
        self.max_impressions = max_impressions

    def act(self, observation, reward, done):

3 Source : ucb1_agent.py
with MIT License
from falox

    def __init__(self, action_space, seed, c, max_impressions):
        self.name = "UCB1 Agent"
        self.values = [0.00] * action_space.n
        self.np_random = RandomState(seed)
        self.c = c
        self.max_impressions = max_impressions
        self.prev_action = None

    def act(self, observation, reward, done):

3 Source : test_nmf.py
with GNU General Public License v3.0
from gustavowillam

def test_nmf_fit_close(solver, regularization):
    rng = np.random.mtrand.RandomState(42)
    # Test that the fit is not too far away
    pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
               regularization=regularization, max_iter=600)
    X = np.abs(rng.randn(6, 5))
    assert pnmf.fit(X).reconstruction_err_   <   0.1


@pytest.mark.parametrize('solver', ('cd', 'mu'))

3 Source : test_nmf.py
with GNU General Public License v3.0
from gustavowillam

def test_nmf_transform(solver, regularization):
    # Test that NMF.transform returns close values
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(6, 5))
    m = NMF(solver=solver, n_components=3, init='random',
            regularization=regularization, random_state=0, tol=1e-5)
    ft = m.fit_transform(A)
    t = m.transform(A)
    assert_array_almost_equal(ft, t, decimal=2)


def test_nmf_transform_custom_init():

3 Source : test_nmf.py
with GNU General Public License v3.0
from gustavowillam

def test_n_components_greater_n_features():
    # Smoke test for the case of more components than features.
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(30, 10))
    # FIXME : should be removed in 1.1
    init = 'random'
    NMF(n_components=15, random_state=0, tol=1e-2, init=init).fit(A)


@pytest.mark.parametrize('solver', ['cd', 'mu'])

3 Source : test_nmf.py
with GNU General Public License v3.0
from gustavowillam

def test_nmf_sparse_transform():
    # Test that transform works on sparse data.  Issue #2124
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(3, 2))
    A[1, 1] = 0
    A = csc_matrix(A)

    for solver in ('cd', 'mu'):
        model = NMF(solver=solver, random_state=0, n_components=2,
                    max_iter=400, init='nndsvd')
        A_fit_tr = model.fit_transform(A)
        A_tr = model.transform(A)
        assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)


@pytest.mark.parametrize('init', ['random', 'nndsvd'])

3 Source : test_nmf.py
with GNU General Public License v3.0
from gustavowillam

def test_init_default_deprecation():
    # Test FutureWarning on init default
    msg = (r"The 'init' value, when 'init=None' and "
           r"n_components is less than n_samples and "
           r"n_features, will be changed from 'nndsvd' to "
           r"'nndsvda' in 1.1 \(renaming of 0.26\).")
    rng = np.random.mtrand.RandomState(42)
    A = np.abs(rng.randn(6, 5))
    with pytest.warns(FutureWarning, match=msg):
        nmf._initialize_nmf(A, 3)
    with pytest.warns(FutureWarning, match=msg):
        NMF().fit(A)
    with pytest.warns(FutureWarning, match=msg):
        non_negative_factorization(A)

3 Source : test_nmf.py
with GNU General Public License v3.0
from HHHHhgqcdxhg

def test_nmf_fit_close(solver):
    rng = np.random.mtrand.RandomState(42)
    # Test that the fit is not too far away
    pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
               max_iter=600)
    X = np.abs(rng.randn(6, 5))
    assert_less(pnmf.fit(X).reconstruction_err_, 0.1)


@pytest.mark.parametrize('solver', ('cd', 'mu'))

3 Source : test_metrics.py
with MIT License
from idanmoradarthas

def test_plot_metric_growth_per_labeled_instances_given_random_state():
    plot_metric_growth_per_labeled_instances(x_train, y_train, x_test, y_test,
                                             {"DecisionTreeClassifier": DecisionTreeClassifier(random_state=0),
                                              "RandomForestClassifier": RandomForestClassifier(random_state=0,
                                                                                               n_estimators=5)},
                                             random_state=RandomState(5))
    result_path = Path(__file__).parents[0].absolute().joinpath("result_images").joinpath(
        "test_metrics").joinpath("test_plot_metric_growth_per_labeled_instances_given_random_state.png")
    pyplot.savefig(str(result_path))

    baseline_path = Path(__file__).parents[0].absolute().joinpath("baseline_images").joinpath(
        "test_metrics").joinpath("test_plot_metric_growth_per_labeled_instances_given_random_state.png")
    pyplot.cla()
    pyplot.close(pyplot.gcf())
    compare_images_from_paths(str(baseline_path), str(result_path))


def test_plot_metric_growth_per_labeled_instances_exists_ax():

3 Source : codecs.py
with GNU Affero General Public License v3.0
from nccgroup

    def encode(cls, obj):
        import numpy as np
        assert type(obj) == np.random.mtrand.RandomState

        init_args = obj.__reduce__()[1]
        state = obj.__getstate__()

        return {
            '__mlspl_type': [type(obj).__module__, type(obj).__name__],
            'init_args': init_args,
            'state': state
        }

    @classmethod

3 Source : codecs.py
with GNU Affero General Public License v3.0
from nccgroup

    def decode(cls, obj):
        from numpy.random.mtrand import RandomState

        init_args = obj['init_args']
        state = obj['state']

        t = RandomState(*init_args)
        t.__setstate__(state)

        return t

class SparseMatrixCodec(BaseCodec):

3 Source : epsilon_greedy.py
with MIT License
from olivierjeunen

    def __init__(self, config, agent):
        super(EpsilonGreedy, self).__init__(config)
        self.agent = agent
        self.rng = RandomState(self.config.random_seed)

    def train(self, observation, action, reward, done = False):

3 Source : abstract.py
with MIT License
from olivierjeunen

    def reset_random_seed(self, epoch = 0):
        # Initialize Random State.
        assert (self.config.random_seed is not None)
        self.rng = RandomState(self.config.random_seed + epoch)

    def init_gym(self, args):

3 Source : normal_time_generator.py
with MIT License
from olivierjeunen

    def __init__(self, config):
        super(NormalTimeGenerator, self).__init__(config)
        self.current_time = 0

        if not hasattr(self.config, 'normal_time_mu'):
            self.normal_time_mu = 0
        else:
            self.normal_time_mu = self.config.normal_time_mu

        if not hasattr(self.config, 'normal_time_sigma'):
            self.normal_time_sigma = 1
        else:
            self.normal_time_sigma = self.config.normal_time_sigma

        self.rng = RandomState(config.random_seed)

    def new_time(self):

3 Source : test_sampling_spn.py
with GNU General Public License v3.0
from probabilistic-learning

    def test_induced_trees_correct_parameters(self):
        node_1_2_2 = Leaf(0)
        node_1_2_1 = Leaf(1)
        node_1_1 = Leaf([0, 1])
        node_1_2 = node_1_2_1 * node_1_2_2
        spn = 0.1 * node_1_1 + 0.9 * node_1_2
        node_1_2.id = 0

        rand_gen = RandomState(1234)
        with self.assertRaises(AssertionError):
            sample_induced_trees(spn, rand_gen.rand(10, 3), rand_gen)

        assign_ids(spn)
        node_1_2_2.id += 1

        with self.assertRaises(AssertionError):
            sample_induced_trees(spn, rand_gen.rand(10, 3), rand_gen)

    def test_induced_trees(self):

3 Source : test_randomstate_regression.py
with MIT License
from shreyasgaonkar

    def test_call_within_randomstate(self):
        # Check that custom RandomState does not call into global state
        m = random.RandomState()
        res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
        for i in range(3):
            random.seed(i)
            m.seed(4321)
            # If m.state is not honored, the result will change
            assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)

    def test_multivariate_normal_size_types(self):

3 Source : benchmark_cmf.py
with MIT License
from smn-ailab

def dense_cmf_benchmark(solver):
    rng = np.random.mtrand.RandomState(42)
    X = np.abs(rng.randn(2000, 150))
    Y = np.abs(rng.randn(150, 10))
    model = CMF(n_components=10, solver=solver,
                random_state=42, max_iter=10)
    U, V, Z = model.fit_transform(X, Y)


def dense_cmf_with_logits_benchmark():

3 Source : benchmark_cmf.py
with MIT License
from smn-ailab

def dense_cmf_with_logits_benchmark():
    rng = np.random.mtrand.RandomState(42)
    X = np.abs(rng.randn(2000, 150))
    Y = np.abs(rng.randn(150, 10))
    model = CMF(n_components=10, solver="newton",
                random_state=42, max_iter=10)
    U, V, Z = model.fit_transform(X, Y)


def sparse_cmf_benchmark(solver):

3 Source : benchmark_cmf.py
with MIT License
from smn-ailab

def sparse_cmf_benchmark(solver):
    rng = np.random.mtrand.RandomState(42)
    X = np.abs(rng.randn(2000, 150))
    X[:1000, 2 * np.arange(10) + 100] = 0
    X[1000:, 2 * np.arange(10)] = 0
    X_sparse = SP(X)
    Y = np.abs(rng.randn(150, 10))
    model = CMF(n_components=10, solver=solver,
                random_state=42, max_iter=10)
    U, V, Z = model.fit_transform(X_sparse, Y)


def sparse_cmf_with_logits_benchmark(sample_ratio):

3 Source : benchmark_cmf.py
with MIT License
from smn-ailab

def sparse_cmf_with_logits_benchmark(sample_ratio):
    rng = np.random.mtrand.RandomState(42)
    X = np.abs(rng.randn(2000, 150))
    X[:1000, 2 * np.arange(10) + 100] = 0
    X[1000:, 2 * np.arange(10)] = 0
    X_sparse = SP(X)
    Y = expit(rng.randn(150, 10))
    model = CMF(n_components=10, solver="newton",
                random_state=42, sg_sample_ratio=sample_ratio,
                max_iter=10)
    U, V, Z = model.fit_transform(X_sparse, Y)


if __name__ == "__main__":

3 Source : test_cmf.py
with MIT License
from smn-ailab

def test_fit_close(solver):
    rng = np.random.mtrand.RandomState(42)
    # Test that the fit is not too far away
    for rndm_state in [0]:
        pnmf = CMF(n_components=5, solver=solver, x_init='nndsvdar', y_init='nndsvdar',
                   random_state=rndm_state, max_iter=1000)
        X = np.abs(rng.randn(6, 5))
        Y = np.abs(rng.randn(5, 6))
        assert_less(pnmf.fit(X, Y).reconstruction_err_, 0.1)


def test_transform_custom_init():

3 Source : test_cmf.py
with MIT License
from smn-ailab

def test_n_components_greater_n_features():
    # Smoke test for the case of more components than features.
    rng = np.random.mtrand.RandomState(42)
    X = np.abs(rng.randn(30, 10))
    Y = np.abs(rng.randn(10, 15))
    CMF(n_components=15, random_state=0, tol=1e-2).fit(X, Y)


@pytest.mark.parametrize("solver", solvers)

3 Source : test_cmf.py
with MIT License
from smn-ailab

def test_recover_low_rank_matrix(solver):
    rng = np.random.mtrand.RandomState(42)
    # Test that the fit is not too far away
    pnmf = CMF(5, solver=solver, x_init='nndsvdar', y_init='nndsvdar',
               random_state=0, max_iter=1000)
    U = np.abs(rng.randn(10, 5))
    V = np.abs(rng.randn(8, 5))
    Z = np.abs(rng.randn(6, 5))
    X = np.dot(U, V.T)
    Y = np.dot(V, Z.T)
    assert_less(pnmf.fit(X, Y).reconstruction_err_, 1.0)


@ignore_warnings(category=ConvergenceWarning)

3 Source : test_cmf.py
with MIT License
from smn-ailab

def test_logit_link_optimization():
    n_components = 5
    rng = np.random.mtrand.RandomState(42)
    X = 1 / (1 + np.exp(-rng.randn(6, 5)))
    Y = 1 / (1 + np.exp(-rng.randn(5, 4)))

    model = CMF(n_components=n_components, solver="newton",
                l2_reg=0., random_state=42, x_link="logit", y_link="logit",
                U_non_negative=False, V_non_negative=False, Z_non_negative=False)

    U, V, Z = model.fit_transform(X, Y)
    assert_less(model.reconstruction_err_, 0.1)


def test_logit_link_non_negative_optimization():

3 Source : test_cmf.py
with MIT License
from smn-ailab

def test_logit_link_non_negative_optimization():
    # Test if the logit link function works with a non-negative counterpart
    n_components = 5
    rng = np.random.mtrand.RandomState(42)
    X = rng.randn(6, 5)
    X[X   <   0] = 0
    Y = 1 / (1 + np.exp(-rng.randn(5, 4)))

    model = CMF(n_components=n_components, solver="newton",
                l2_reg=0., random_state=42, y_link="logit",
                U_non_negative=True, V_non_negative=True, Z_non_negative=False,
                hessian_pertubation=0.2, max_iter=1000)

    U, V, Z = model.fit_transform(X, Y)
    assert_less(model.reconstruction_err_, 0.1)


@pytest.mark.parametrize("solver", solvers)

3 Source : test_cmf.py
with MIT License
from smn-ailab

def test_stochastic_newton_solver():
    rng = np.random.mtrand.RandomState(42)

    model = CMF(n_components=5, solver="newton", x_init='svd', y_init='svd',
                U_non_negative=False, V_non_negative=False, Z_non_negative=False, alpha=0.5,
                sg_sample_ratio=0.5, random_state=0, max_iter=1000)
    X = rng.randn(6, 5)
    Y = rng.randn(5, 6)
    assert_less(model.fit(X, Y).reconstruction_err_, 0.1)


def test_stochastic_newton_solver_sparse_input_close():

3 Source : test_cmf.py
with MIT License
from smn-ailab

def test_stochastic_newton_solver_sparse_input_close():
    rng = np.random.mtrand.RandomState(42)

    model = CMF(n_components=5, solver="newton", x_init='svd', y_init='svd',
                U_non_negative=False, V_non_negative=False, Z_non_negative=False, alpha=0.5,
                sg_sample_ratio=0.5, random_state=0, max_iter=1000)
    A = rng.randn(6, 5)
    B = rng.randn(5, 6)
    A_sparse = csr_matrix(A)
    B_sparse = csr_matrix(B)

    assert_less(model.fit(A_sparse, B_sparse).reconstruction_err_, 0.1)


def test_stochastic_newton_solver_sparse_input():

See More Examples