Here are the examples of the python api trnltk.morphology.contextless.parser.parser.ContextlessMorphologicalParser taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
12 Examples
3
Example 1
Project: trnltk Source File: test_parser_brute_force_noun.py
def setUp(self):
logging.basicConfig(level=logging.INFO)
parser_logger.setLevel(logging.INFO)
suffix_applier_logger.setLevel(logging.INFO)
suffix_graph = BasicSuffixGraph()
suffix_graph.initialize()
self.mock_brute_force_noun_root_finder = BruteForceNounRootFinder()
self.parser = ContextlessMorphologicalParser(suffix_graph, None, [self.mock_brute_force_noun_root_finder])
3
Example 2
Project: trnltk Source File: test_parser_brute_force_noun.py
def setUp(self):
logging.basicConfig(level=logging.INFO)
parser_logger.setLevel(logging.INFO)
suffix_applier_logger.setLevel(logging.INFO)
suffix_graph = BasicSuffixGraph()
suffix_graph.initialize()
self.mock_brute_force_noun_compound_root_finder = BruteForceCompoundNounRootFinder()
self.parser = ContextlessMorphologicalParser(suffix_graph, None, [self.mock_brute_force_noun_compound_root_finder])
3
Example 3
Project: trnltk Source File: test_parser_brute_force_verb.py
def setUp(self):
logging.basicConfig(level=logging.INFO)
parser_logger.setLevel(logging.INFO)
suffix_applier_logger.setLevel(logging.INFO)
suffix_graph = BasicSuffixGraph()
suffix_graph.initialize()
self.mock_brute_force_noun_root_finder = BruteForceVerbRootFinder()
self.parser = ContextlessMorphologicalParser(suffix_graph, None, [self.mock_brute_force_noun_root_finder])
3
Example 4
Project: trnltk Source File: test_parser_with_copula_graph.py
def setUp(self):
logging.basicConfig(level=logging.INFO)
parser_logger.setLevel(logging.INFO)
suffix_applier_logger.setLevel(logging.INFO)
self.cloned_root_map = copy(self._org_root_map)
suffix_graph = CopulaSuffixGraph(BasicSuffixGraph())
suffix_graph.initialize()
predefined_paths = PredefinedPaths(self.cloned_root_map, suffix_graph)
predefined_paths.create_predefined_paths()
word_root_finder = WordRootFinder(self.cloned_root_map)
self.parser = ContextlessMorphologicalParser(suffix_graph, predefined_paths,
[word_root_finder])
3
Example 5
Project: trnltk Source File: test_parser_with_proper_noun_graph.py
@classmethod
def setUpClass(cls):
super(ParserTestWithProperNouns, cls).setUpClass()
cls.root_map = dict()
suffix_graph = ProperNounSuffixGraph(BasicSuffixGraph())
suffix_graph.initialize()
proper_noun_from_apostrophe_root_finder = ProperNounFromApostropheRootFinder()
proper_noun_without_apostrophe_root_finder = ProperNounWithoutApostropheRootFinder()
cls.parser = ContextlessMorphologicalParser(suffix_graph, None,
[proper_noun_from_apostrophe_root_finder, proper_noun_without_apostrophe_root_finder])
3
Example 6
Project: trnltk Source File: test_formatter.py
def setUp(self):
logging.basicConfig(level=logging.INFO)
parser_logger.setLevel(logging.INFO)
suffix_applier_logger.setLevel(logging.INFO)
suffix_graph = BasicSuffixGraph()
suffix_graph.initialize()
word_root_finder = WordRootFinder(self.root_map)
self.parser = ContextlessMorphologicalParser(suffix_graph, None, [word_root_finder])
0
Example 7
Project: trnltk Source File: test_contextlessdistributioncalculator.py
@classmethod
def setUpClass(cls):
all_roots = []
lexemes = LexiconLoader.load_from_file(os.path.join(os.path.dirname(__file__), '../../../../../resources/master_dictionary.txt'))
for di in lexemes:
all_roots.extend(RootGenerator.generate(di))
root_map_generator = RootMapGenerator()
cls.root_map = root_map_generator.generate(all_roots)
suffix_graph = CopulaSuffixGraph(NumeralSuffixGraph(ProperNounSuffixGraph(BasicSuffixGraph())))
suffix_graph.initialize()
predefined_paths = PredefinedPaths(cls.root_map, suffix_graph)
predefined_paths.create_predefined_paths()
word_root_finder = WordRootFinder(cls.root_map)
digit_numeral_root_finder = DigitNumeralRootFinder()
text_numeral_root_finder = TextNumeralRootFinder(cls.root_map)
proper_noun_from_apostrophe_root_finder = ProperNounFromApostropheRootFinder()
proper_noun_without_apostrophe_root_finder = ProperNounWithoutApostropheRootFinder()
cls.contextless_parser = ContextlessMorphologicalParser(suffix_graph, predefined_paths,
[word_root_finder, digit_numeral_root_finder, text_numeral_root_finder,
proper_noun_from_apostrophe_root_finder, proper_noun_without_apostrophe_root_finder])
mongodb_connection = pymongo.Connection(host='127.0.0.1')
collection_map = {
1: mongodb_connection['trnltk']['wordUnigrams{}'.format(cls.parseset_index)]
}
database_index_builder = DatabaseIndexBuilder(collection_map)
target_form_given_context_counter = TargetFormGivenContextCounter(collection_map)
smoother = CachedContextlessDistributionSmoother()
smoother.initialize()
cls.calculator = ContextlessDistributionCalculator(database_index_builder, target_form_given_context_counter, smoother)
cls.calculator.build_indexes()
0
Example 8
Project: trnltk Source File: test_calculator.py
@classmethod
def setUpClass(cls):
super(_LikelihoodCalculatorTest, cls).setUpClass()
all_roots = []
lexemes = LexiconLoader.load_from_file(os.path.join(os.path.dirname(__file__), '../../../../../resources/master_dictionary.txt'))
for di in lexemes:
all_roots.extend(RootGenerator.generate(di))
root_map_generator = RootMapGenerator()
cls.root_map = root_map_generator.generate(all_roots)
suffix_graph = CopulaSuffixGraph(NumeralSuffixGraph(ProperNounSuffixGraph(BasicSuffixGraph())))
suffix_graph.initialize()
predefined_paths = PredefinedPaths(cls.root_map, suffix_graph)
predefined_paths.create_predefined_paths()
word_root_finder = WordRootFinder(cls.root_map)
digit_numeral_root_finder = DigitNumeralRootFinder()
text_numeral_root_finder = TextNumeralRootFinder(cls.root_map)
proper_noun_from_apostrophe_root_finder = ProperNounFromApostropheRootFinder()
proper_noun_without_apostrophe_root_finder = ProperNounWithoutApostropheRootFinder()
cls.contextless_parser = ContextlessMorphologicalParser(suffix_graph, predefined_paths,
[word_root_finder, digit_numeral_root_finder, text_numeral_root_finder,
proper_noun_from_apostrophe_root_finder, proper_noun_without_apostrophe_root_finder])
cls.mongodb_connection = pymongo.Connection(host='127.0.0.1')
cls.collection_map = {
1: cls.mongodb_connection['trnltk']['wordUnigrams999'],
2: cls.mongodb_connection['trnltk']['wordBigrams999'],
3: cls.mongodb_connection['trnltk']['wordTrigrams999']
}
cls.generator = None
0
Example 9
Project: trnltk Source File: playground.py
def initialize():
all_roots = []
lexemes = LexiconLoader.load_from_file(os.path.join(os.path.dirname(__file__), '../resources/master_dictionary.txt'))
for di in lexemes:
all_roots.extend(CircuemflexConvertingRootGenerator.generate(di))
root_map_generator = RootMapGenerator()
root_map = root_map_generator.generate(all_roots)
suffix_graph = CopulaSuffixGraph(NumeralSuffixGraph(ProperNounSuffixGraph(BasicSuffixGraph())))
suffix_graph.initialize()
predefined_paths = PredefinedPaths(root_map, suffix_graph)
predefined_paths.create_predefined_paths()
word_root_finder = WordRootFinder(root_map)
text_numeral_root_finder = TextNumeralRootFinder(root_map)
digit_numeral_root_finder = DigitNumeralRootFinder()
proper_noun_from_apostrophe_root_finder = ProperNounFromApostropheRootFinder()
proper_noun_without_apostrophe_root_finder = ProperNounWithoutApostropheRootFinder()
global contextless_parser
contextless_parser = ContextlessMorphologicalParser(suffix_graph, predefined_paths,
[word_root_finder, text_numeral_root_finder, digit_numeral_root_finder,
proper_noun_from_apostrophe_root_finder, proper_noun_without_apostrophe_root_finder])
0
Example 10
Project: trnltk Source File: test_statistical_parser.py
@classmethod
def setUpClass(cls):
super(StatisticalParserTest, cls).setUpClass()
all_roots = []
lexemes = LexiconLoader.load_from_file(os.path.join(os.path.dirname(__file__), '../../resources/master_dictionary.txt'))
for di in lexemes:
all_roots.extend(RootGenerator.generate(di))
root_map_generator = RootMapGenerator()
cls.root_map = root_map_generator.generate(all_roots)
suffix_graph = CopulaSuffixGraph(NumeralSuffixGraph(BasicSuffixGraph()))
suffix_graph.initialize()
predefined_paths = PredefinedPaths(cls.root_map, suffix_graph)
predefined_paths.create_predefined_paths()
word_root_finder = WordRootFinder(cls.root_map)
digit_numeral_root_finder = DigitNumeralRootFinder()
text_numeral_root_finder = TextNumeralRootFinder(cls.root_map)
proper_noun_from_apostrophe_root_finder = ProperNounFromApostropheRootFinder()
proper_noun_without_apostrophe_root_finder = ProperNounWithoutApostropheRootFinder()
contextless_parser = ContextlessMorphologicalParser(suffix_graph, predefined_paths,
[word_root_finder, digit_numeral_root_finder, text_numeral_root_finder, proper_noun_from_apostrophe_root_finder, proper_noun_without_apostrophe_root_finder])
parseset_index = "001"
dom = parse(os.path.join(os.path.dirname(__file__), '../../testresources/parsesets/parseset{}.xml'.format(parseset_index)))
parseset = ParseSetBinding.build(dom.getElementsByTagName("parseset")[0])
parse_set_word_list = []
for sentence in parseset.sentences:
parse_set_word_list.extend(sentence.words)
complete_word_concordance_index = CompleteWordConcordanceIndex(parse_set_word_list)
cls.parser = StatisticalParser(contextless_parser, complete_word_concordance_index)
0
Example 11
Project: trnltk Source File: test_morphemecontainerstats.py
@classmethod
def setUpClass(cls):
super(MorphemeContainerContextlessProbabilityGeneratorWithContainersTest, cls).setUpClass()
all_roots = []
lexicon_lines = u'''
duvar
tutku
saç
oğul [A:LastVowelDrop]
demek [A:RootChange, Passive_In, Passive_InIl]
bu [P:Det]
'''.strip().splitlines()
lexemes = LexiconLoader.load_from_lines(lexicon_lines)
for di in lexemes:
all_roots.extend(RootGenerator.generate(di))
root_map_generator = RootMapGenerator()
cls.root_map = root_map_generator.generate(all_roots)
suffix_graph = BasicSuffixGraph()
suffix_graph.initialize()
word_root_finder = WordRootFinder(cls.root_map)
cls.contextless_parser = ContextlessMorphologicalParser(suffix_graph, None,
[word_root_finder])
0
Example 12
Project: trnltk Source File: test_transitiongenerator.py
@classmethod
def setUpClass(cls):
super(TransitionGeneratorTest, cls).setUpClass()
all_roots = []
lexemes = LexiconLoader.load_from_file(os.path.join(os.path.dirname(__file__), '../../resources/master_dictionary.txt'))
for di in lexemes:
all_roots.extend(RootGenerator.generate(di))
root_map_generator = RootMapGenerator()
cls.root_map = root_map_generator.generate(all_roots)
suffix_graph = CopulaSuffixGraph(NumeralSuffixGraph(ProperNounSuffixGraph(BasicSuffixGraph())))
suffix_graph.initialize()
predefined_paths = PredefinedPaths(cls.root_map, suffix_graph)
predefined_paths.create_predefined_paths()
word_root_finder = WordRootFinder(cls.root_map)
digit_numeral_root_finder = DigitNumeralRootFinder()
text_numeral_root_finder = TextNumeralRootFinder(cls.root_map)
proper_noun_from_apostrophe_root_finder = ProperNounFromApostropheRootFinder()
proper_noun_without_apostrophe_root_finder = ProperNounWithoutApostropheRootFinder()
cls.parser = ContextlessMorphologicalParser(suffix_graph, predefined_paths,
[word_root_finder, digit_numeral_root_finder, text_numeral_root_finder,
proper_noun_from_apostrophe_root_finder, proper_noun_without_apostrophe_root_finder])
cls.transition_generator = TransitionGenerator(cls.parser)