This notebook cross-validates the CLTK's part-of-speech taggers. The final results are found at the very bottom.

In [7]:
from nltk.corpus.reader import TaggedCorpusReader
from nltk.tag import AffixTagger
from nltk.tag import BigramTagger
from nltk.tag import tnt
from nltk.tag import TrigramTagger
from nltk.tag import UnigramTagger
from nltk.tokenize import wordpunct_tokenize
import math
import os
import pandas as pd
import random
from statistics import mean
from statistics import stdev
In [2]:
full_training_set_rel = '~/latin_treebank_perseus/latin_training_set.pos'
full_training_set = os.path.expanduser(full_training_set_rel)
In [3]:
unigram_accuracies = []
bigram_accuracies = []
trigram_accuracies = []
backoff_accuracies = []
two_prefix_accuracies = []
three_prefix_accuracies = []
four_prefix_accuracies = []
two_suffix_accuracies = []
three_suffix_accuracies = []
four_suffix_accuracies = []
five_suffix_accuracies = []
six_suffix_accuracies = []
tnt_accuracies = []

with open(full_training_set) as f:
    training_set_string = f.read()
    pos_set = training_set_string.split('\n\n')  # mk into a list

sentence_count = len(pos_set)
tenth = math.ceil(int(sentence_count) / int(10))

random.shuffle(pos_set)

def chunks(l, n):
    """Yield successive n-sized chunks from l.
    http://stackoverflow.com/a/312464
    """
    for i in range(0, len(l), n):
        yield l[i:i+n]

# a list of 10 lists
ten_parts = list(chunks(pos_set, tenth))  # a list of 10 lists with 695 sentences each

#for counter in list(range(10)):
for counter, part in list(enumerate(ten_parts)):
    # map test list to part of given loop
    test_set = ten_parts[counter]  # or: test_set = part
    
    # filter out this loop's test index
    training_set_lists = [x for x in ten_parts if x is not ten_parts[counter]]
    
    # next concatenate the list together into 1 file ( http://stackoverflow.com/a/952952 )
    training_set = [item for sublist in training_set_lists for item in sublist]
        
    # save shuffled tests to file
    # there might be a way of getting 
    local_dir_rel = '~/cltk_data/user_data'
    local_dir = os.path.expanduser(local_dir_rel)
    if not os.path.isdir(local_dir):
        os.makedirs(local_dir)

    test_path = os.path.join(local_dir, 'test_latin.pos')
    with open(test_path, 'w') as f:
        f.write('\n\n'.join(test_set))

    train_path = os.path.join(local_dir, 'train_latin.pos')
    with open(train_path, 'w') as f:
        f.write('\n\n'.join(training_set))

    # read POS corpora
    train_reader = TaggedCorpusReader(local_dir, 'train_latin.pos')
    train_sents = train_reader.tagged_sents()

    test_reader = TaggedCorpusReader(local_dir, 'test_latin.pos')
    test_sents = test_reader.tagged_sents()
    
    print('Loop #' + str(counter))
    # make unigram tagger
    unigram_tagger = UnigramTagger(train_sents)
    # evaluate unigram tagger
    unigram_accuracy = None
    unigram_accuracy = unigram_tagger.evaluate(test_sents)
    unigram_accuracies.append(unigram_accuracy)
    print('Unigram:', unigram_accuracy)
    
    # make bigram tagger
    bigram_tagger = BigramTagger(train_sents)
    # evaluate bigram tagger
    bigram_accuracy = None
    bigram_accuracy = bigram_tagger.evaluate(test_sents)
    bigram_accuracies.append(bigram_accuracy)
    print('Bigram:', bigram_accuracy)
    
    # make trigram tagger
    trigram_tagger = TrigramTagger(train_sents)
    # evaluate trigram tagger
    trigram_accuracy = None
    trigram_accuracy = trigram_tagger.evaluate(test_sents)
    trigram_accuracies.append(trigram_accuracy)
    print('Trigram:', trigram_accuracy)
    
    # make 1, 2, 3-gram backoff tagger
    tagger1 = UnigramTagger(train_sents)
    tagger2 = BigramTagger(train_sents, backoff=tagger1)
    tagger3 = TrigramTagger(train_sents, backoff=tagger2)
    # evaluate trigram tagger
    backoff_accuracy = None
    backoff_accuracy = tagger3.evaluate(test_sents)
    backoff_accuracies.append(backoff_accuracy)
    print('1, 2, 3-gram backoff:', backoff_accuracy)
    
    # make 2-char prefix tagger
    two_prefix_tagger = AffixTagger(train_sents, affix_length=2)
    # evaluate 2-char prefix tagger
    two_prefix_accuracy = None
    two_prefix_accuracy = two_prefix_tagger.evaluate(test_sents)
    two_prefix_accuracies.append(two_prefix_accuracy)
    print('2-char prefix:', two_prefix_accuracy)
    
    # make 3-char prefix tagger
    three_prefix_tagger = AffixTagger(train_sents, affix_length=3)
    # evaluate 3-char prefix tagger
    three_prefix_accuracy = None
    three_prefix_accuracy = three_prefix_tagger.evaluate(test_sents)
    three_prefix_accuracies.append(three_prefix_accuracy)
    print('3-char prefix:', three_prefix_accuracy)

    # make 4-char prefix tagger
    four_prefix_tagger = AffixTagger(train_sents, affix_length=4)
    # evaluate 4-char prefix tagger
    four_prefix_accuracy = None
    four_prefix_accuracy = four_prefix_tagger.evaluate(test_sents)
    four_prefix_accuracies.append(four_prefix_accuracy)
    print('4-char prefix:', four_prefix_accuracy)

    # make 2-char suffix tagger
    two_suffix_tagger = AffixTagger(train_sents, affix_length=2)
    # evaluate 2-char suffix tagger
    two_suffix_accuracy = None
    two_suffix_accuracy = two_suffix_tagger.evaluate(test_sents)
    two_suffix_accuracies.append(two_suffix_accuracy)
    print('2-char suffix:', two_suffix_accuracy)
    
    # make 3-char suffix tagger
    three_suffix_tagger = AffixTagger(train_sents, affix_length=3)
    # evaluate 3-char suffix tagger
    three_suffix_accuracy = None
    three_suffix_accuracy = three_suffix_tagger.evaluate(test_sents)
    three_suffix_accuracies.append(three_suffix_accuracy)
    print('3-char suffix:', three_suffix_accuracy)

    # make 4-char suffix tagger
    four_suffix_tagger = AffixTagger(train_sents, affix_length=4)
    # evaluate 4-char suffix tagger
    four_suffix_accuracy = None
    four_suffix_accuracy = four_suffix_tagger.evaluate(test_sents)
    four_suffix_accuracies.append(four_suffix_accuracy)
    print('4-char suffix:', four_suffix_accuracy)

    # make 5-char suffix tagger
    five_suffix_tagger = AffixTagger(train_sents, affix_length=5)
    # evaluate 5-char suffix tagger
    five_suffix_accuracy = None
    five_suffix_accuracy = five_suffix_tagger.evaluate(test_sents)
    five_suffix_accuracies.append(five_suffix_accuracy)
    print('5-char suffix:', five_suffix_accuracy)

    # make 6-char suffix tagger
    six_suffix_tagger = AffixTagger(train_sents, affix_length=6)
    # evaluate 6-char suffix tagger
    six_suffix_accuracy = None
    six_suffix_accuracy = six_suffix_tagger.evaluate(test_sents)
    six_suffix_accuracies.append(six_suffix_accuracy)
    print('6-char suffix:', six_suffix_accuracy)
    
    # make tnt tagger
    tnt_tagger = tnt.TnT()
    tnt_tagger.train(train_sents)
    # evaulate tnt tagger
    tnt_accuracy = None
    tnt_accuracy = tnt_tagger.evaluate(test_sents)
    tnt_accuracies.append(tnt_accuracy)
    print('TnT:', tnt_accuracy)
Loop #0
Unigram: 0.8560562844647271
Bigram: 0.6440387906446092
Trigram: 0.7253280091272105
1, 2, 3-gram backoff: 0.9478037649743297
2-char prefix: 0.10686442289408633
3-char prefix: 0.1344362046016353
4-char prefix: 0.13301007796158965
2-char suffix: 0.10686442289408633
3-char suffix: 0.1344362046016353
4-char suffix: 0.13301007796158965
5-char suffix: 0.12141091462255181
6-char suffix: 0.09564555999239399
TnT: 0.9556949990492489
Loop #1
Unigram: 0.8556756756756757
Bigram: 0.599009009009009
Trigram: 0.6564864864864864
1, 2, 3-gram backoff: 0.9378378378378378
2-char prefix: 0.11225225225225226
3-char prefix: 0.13882882882882883
4-char prefix: 0.13954954954954954
2-char suffix: 0.11225225225225226
3-char suffix: 0.13882882882882883
4-char suffix: 0.13954954954954954
5-char suffix: 0.11711711711711711
6-char suffix: 0.09594594594594595
TnT: 0.9502702702702702
Loop #2
Unigram: 0.8496920708237106
Bigram: 0.6066204772902233
Trigram: 0.670130869899923
1, 2, 3-gram backoff: 0.9351424172440339
2-char prefix: 0.11056581986143187
3-char prefix: 0.14001154734411086
4-char prefix: 0.13789453425712087
2-char suffix: 0.11056581986143187
3-char suffix: 0.14001154734411086
4-char suffix: 0.13789453425712087
5-char suffix: 0.12653964588144725
6-char suffix: 0.09699769053117784
TnT: 0.9464973056197075
Loop #3
Unigram: 0.8546726248299981
Bigram: 0.6292986205556635
Trigram: 0.7329512337283854
1, 2, 3-gram backoff: 0.9521080240917039
2-char prefix: 0.11103555469205362
3-char prefix: 0.13988731299786283
4-char prefix: 0.13464153876044296
2-char suffix: 0.11103555469205362
3-char suffix: 0.13988731299786283
4-char suffix: 0.13464153876044296
5-char suffix: 0.12269283077520886
6-char suffix: 0.0976296871964251
TnT: 0.9578395181659219
Loop #4
Unigram: 0.8500613728637523
Bigram: 0.5887073930695874
Trigram: 0.6658483618166368
1, 2, 3-gram backoff: 0.9373052591823247
2-char prefix: 0.10754414125200643
3-char prefix: 0.13605891794920216
4-char prefix: 0.1326598054952318
2-char suffix: 0.10754414125200643
3-char suffix: 0.13605891794920216
4-char suffix: 0.1326598054952318
5-char suffix: 0.1209517514871117
6-char suffix: 0.09555282787272212
TnT: 0.9458974601076385
Loop #5
Unigram: 0.8509334076344386
Bigram: 0.6081545462988762
Trigram: 0.7067892634902945
1, 2, 3-gram backoff: 0.9434382836444692
2-char prefix: 0.10987275935729544
3-char prefix: 0.13364911303055632
4-char prefix: 0.13216309092597753
2-char suffix: 0.10987275935729544
3-char suffix: 0.13364911303055632
4-char suffix: 0.13216309092597753
5-char suffix: 0.1172099934986533
6-char suffix: 0.09148323581313272
TnT: 0.9526330454165506
Loop #6
Unigram: 0.8453372380342271
Bigram: 0.6076690765992495
Trigram: 0.6998261187883225
1, 2, 3-gram backoff: 0.9455477258167841
2-char prefix: 0.10469479271529239
3-char prefix: 0.13864738720600348
4-char prefix: 0.13608492724444038
2-char suffix: 0.10469479271529239
3-char suffix: 0.13864738720600348
4-char suffix: 0.13608492724444038
5-char suffix: 0.11668344467831976
6-char suffix: 0.09023519721790062
TnT: 0.953052072847076
Loop #7
Unigram: 0.8614211309523809
Bigram: 0.6065848214285714
Trigram: 0.7169828869047619
1, 2, 3-gram backoff: 0.9520089285714286
2-char prefix: 0.1091889880952381
3-char prefix: 0.140625
4-char prefix: 0.13848586309523808
2-char suffix: 0.1091889880952381
3-char suffix: 0.140625
4-char suffix: 0.13848586309523808
5-char suffix: 0.12527901785714285
6-char suffix: 0.09812127976190477
TnT: 0.9594494047619048
Loop #8
Unigram: 0.8506954300312234
Bigram: 0.5791465606963762
Trigram: 0.6586242785504778
1, 2, 3-gram backoff: 0.9395401646324155
2-char prefix: 0.10966032737250449
3-char prefix: 0.13728829595988268
4-char prefix: 0.1343551897057432
2-char suffix: 0.10966032737250449
3-char suffix: 0.13728829595988268
4-char suffix: 0.1343551897057432
5-char suffix: 0.11883811145803766
6-char suffix: 0.09499479610180717
TnT: 0.9494748793641783
Loop #9
Unigram: 0.8450269853508096
Bigram: 0.6010023130300693
Trigram: 0.6753084040092521
1, 2, 3-gram backoff: 0.9330185042405551
2-char prefix: 0.11054356206630686
3-char prefix: 0.13405936777178104
4-char prefix: 0.13483037779491133
2-char suffix: 0.11054356206630686
3-char suffix: 0.13405936777178104
4-char suffix: 0.13483037779491133
5-char suffix: 0.11690439475713184
6-char suffix: 0.08519660755589822
TnT: 0.9449691595990748

In [26]:
final_accuracies_list = []
mean_accuracy_unigram = mean(unigram_accuracies)
standard_deviation_unigram = stdev(unigram_accuracies)
uni = {'unigram': {'mean': mean_accuracy_unigram, 'sd': standard_deviation_unigram}}
final_accuracies_list.append(uni)

mean_accuracy_bigram = mean(bigram_accuracies)
standard_deviation_bigram = stdev(bigram_accuracies)
bi = {'bigram': {'mean': mean_accuracy_bigram, 'sd': standard_deviation_bigram}}
final_accuracies_list.append(bi)

mean_accuracy_trigram = mean(trigram_accuracies)
standard_deviation_trigram = stdev(trigram_accuracies)
tri = {'trigram': {'mean': mean_accuracy_trigram, 'sd': standard_deviation_trigram}}
final_accuracies_list.append(tri)

mean_accuracy_backoff = mean(backoff_accuracies)
standard_deviation_backoff = stdev(backoff_accuracies)
back = {'1, 2, 3-gram backoff': {'mean': mean_accuracy_backoff, 'sd': standard_deviation_backoff}}
final_accuracies_list.append(back)

mean_accuracy_two_prefix = mean(two_prefix_accuracies)
standard_deviation_two_prefix = stdev(two_prefix_accuracies)
two_pre = {'2 prefix': {'mean': mean_accuracy_two_prefix, 'sd': standard_deviation_two_prefix}}
final_accuracies_list.append(two_pre)

mean_accuracy_three_prefix = mean(three_prefix_accuracies)
standard_deviation_three_prefix = stdev(three_prefix_accuracies)
three_pre = {'3 prefix': {'mean': mean_accuracy_three_prefix, 'sd': standard_deviation_three_prefix}}
final_accuracies_list.append(three_pre)

mean_accuracy_four_prefix = mean(four_prefix_accuracies)
standard_deviation_four_prefix = stdev(four_prefix_accuracies)
four_pre = {'4 prefix': {'mean': mean_accuracy_four_prefix, 'sd': standard_deviation_four_prefix}}
final_accuracies_list.append(four_pre)

mean_accuracy_two_suffix = mean(two_suffix_accuracies)
standard_deviation_two_suffix = stdev(two_suffix_accuracies)
two_suff= {'2 suffix': {'mean': mean_accuracy_two_suffix, 'sd': standard_deviation_two_suffix}}
final_accuracies_list.append(two_suff)

mean_accuracy_three_suffix = mean(three_suffix_accuracies)
standard_deviation_three_suffix = stdev(three_suffix_accuracies)
three_suff = {'3 suffix': {'mean': mean_accuracy_three_suffix, 'sd': standard_deviation_three_suffix}}
final_accuracies_list.append(three_suff)

mean_accuracy_four_suffix = mean(four_suffix_accuracies)
standard_deviation_four_suffix = stdev(four_suffix_accuracies)
four_suff = {'4 suffix': {'mean': mean_accuracy_four_suffix, 'sd': standard_deviation_four_suffix}}
final_accuracies_list.append(four_suff)

mean_accuracy_five_suffix = mean(five_suffix_accuracies)
standard_deviation_five_suffix = stdev(five_suffix_accuracies)
five_suff = {'5 suffix': {'mean': mean_accuracy_five_suffix, 'sd': standard_deviation_five_suffix}}
final_accuracies_list.append(five_suff)

mean_accuracy_six_suffix = mean(six_suffix_accuracies)
standard_deviation_six_suffix = stdev(six_suffix_accuracies)
six_suff = {'6 suffix': {'mean': mean_accuracy_six_suffix, 'sd': standard_deviation_six_suffix}}
final_accuracies_list.append(six_suff)

mean_accuracy_tnt = mean(tnt_accuracies)
standard_deviation_tnt = stdev(tnt_accuracies)
tnt = {'tnt': {'mean': mean_accuracy_tnt, 'sd': standard_deviation_tnt}}
final_accuracies_list.append(tnt)
In [27]:
final_dict = {}
for x in final_accuracies_list:
    final_dict.update(x)

df = pd.DataFrame(final_dict)
df
Out[27]:
1, 2, 3-gram backoff 2 prefix 2 suffix 3 prefix 3 suffix 4 prefix 4 suffix 5 suffix 6 suffix bigram tnt trigram unigram
mean 0.942375 0.109222 0.109222 0.137349 0.137349 0.135367 0.135367 0.120363 0.094180 0.607023 0.951578 0.690828 0.851957
sd 0.006851 0.002249 0.002249 0.002642 0.002642 0.002563 0.002563 0.003613 0.004038 0.018504 0.005047 0.028856 0.005064