/*
* Copyright (C) 2012, 2013
* Dale Weiler
- *
+ * Wolfgang Bumiller
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
*
* A little about how it works, and probability theory:
*
- * When given an identifier (which we will denote I), we're essentially
+ * When given an identifier (which we will denote I), we're essentially
* just trying to choose the most likely correction for that identifier.
* (the actual "correction" can very well be the identifier itself).
* There is actually no way to know for sure that certian identifers
* out of all possible corrections that maximizes the probability of C
* for the original identifer I.
*
- * Bayes' Therom suggests something of the following:
+ * Thankfully there exists some theroies for probalistic interpretations
+ * of data. Since we're operating on two distictive intepretations, the
+ * transposition from I to C. We need something that can express how much
+ * degree of I should rationally change to become C. this is called the
+ * Bayesian interpretation. You can read more about it from here:
+ * http://www.celiagreen.com/charlesmccreery/statistics/bayestutorial.pdf
+ * (which is probably the only good online documentation for bayes theroy
+ * no lie. Everything else just sucks ..)
+ *
+ * Bayes' Thereom suggests something like the following:
* AC P(I|C) P(C) / P(I)
- * Since P(I) is the same for every possibly I, we can ignore it giving
+ *
+ * However since P(I) is the same for every possibility of I, we can
+ * complete ignore it giving just:
* AC P(I|C) P(C)
*
* This greatly helps visualize how the parts of the expression are performed
* probability of the transposition from C to I. It's simply much more
* cleaner, and direct to seperate the two factors.
*
+ * Research tells us that 80% to 95% of all spelling errors have an edit
+ * distance no greater than one. Knowing this we can optimize for most
+ * cases of mistakes without taking a performance hit. Which is what we
+ * base longer edit distances off of. Opposed to the original method of
+ * I had concieved of checking everything.
+ *
* A little information on additional algorithms used:
- *
- * Initially when I implemented this corrector, it was very slow.
+ *
+ * Initially when I implemented this corrector, it was very slow.
* Need I remind you this is essentially a brute force attack on strings,
* and since every transformation requires dynamic memory allocations,
* you can easily imagine where most of the runtime conflated. Yes
* shock to me. A forward allocator (or as some call it a bump-point
* allocator, or just a memory pool) was implemented. To combat this.
*
- * But of course even other factors were making it slow. Initially
+ * But of course even other factors were making it slow. Initially
* this used a hashtable. And hashtables have a good constant lookup
* time complexity. But the problem wasn't in the hashtable, it was
* in the hashing (despite having one of the fastest hash functions
* very slow. To combat this I had suggested burst tries to Blub.
* The next day he had implemented them. Sure enough this brought
* down the runtime by a factory > 100%
+ *
+ * Future Work (If we really need it)
+ *
+ * Currently we can only distinguishes one source of error in the
+ * language model we use. This could become an issue for identifiers
+ * that have close colliding rates, e.g colate->coat yields collate.
+ *
+ * Currently the error model has been fairly trivial, the smaller the
+ * edit distance the smaller the error. This usually causes some un-
+ * expected problems. e.g reciet->recite yields recipt. For QuakeC
+ * this could become a problem when lots of identifiers are involved.
+ *
+ * Our control mechanisim could use a limit, i.e limit the number of
+ * sets of edits for distance X. This would also increase execution
+ * speed considerably.
+ *
*/
}
/*
- * A fast space efficent trie for a disctonary of identifiers. This is
+ * A fast space efficent trie for a dictionary of identifiers. This is
* faster than a hashtable for one reason. A hashtable itself may have
* fast constant lookup time, but the hash itself must be very fast. We
* have one of the fastest hash functions for strings, but if you do a
* lost of hashing (which we do, almost 3 million hashes per identifier)
- * a hashtable becomes slow. Very Very Slow.
+ * a hashtable becomes slow.
*/
correct_trie_t* correct_trie_new() {
correct_trie_t *t = (correct_trie_t*)mem_a(sizeof(correct_trie_t));
void correct_trie_set(correct_trie_t *t, const char *key, void * const value) {
const unsigned char *data = (const unsigned char*)key;
while (*data) {
- unsigned char ch = *data;
- correct_trie_t *entries = t->entries;
- const size_t vs = vec_size(t->entries);
- size_t i;
+ const size_t vs = vec_size(t->entries);
+ unsigned char ch = *data;
+ correct_trie_t *entries = t->entries;
+ size_t i;
+
for (i = 0; i < vs; ++i) {
if (entries[i].ch == ch) {
t = &entries[i];
}
if (i == vs) {
correct_trie_t *elem = (correct_trie_t*)vec_add(t->entries, 1);
+
elem->ch = ch;
elem->value = NULL;
elem->entries = NULL;
- t = elem;
+ t = elem;
}
++data;
}
}
void correct_del(correct_trie_t* dictonary, size_t **data) {
- size_t i;
+ size_t i;
const size_t vs = vec_size(data);
+
for (i = 0; i < vs; i++)
mem_d(data[i]);
* because they're only valid after the first character is of a _, or
* alpha character.
*/
-static const char correct_alpha[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_";
+static const char correct_alpha[] = "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "_"; /* TODO: Numbers ... */
/*
* correcting logic for the following forms of transformations:
* 2) transposition
* 3) alteration
* 4) insertion
+ *
+ * These functions could take an additional size_t **size paramater
+ * and store back the results of their new length in an array that
+ * is the same as **array for the memcmp in correct_exists. I'm just
+ * not able to figure out how to do that just yet. As my brain is
+ * not in the mood to figure out that logic. This is a reminder to
+ * do it, or for someone else to :-) correct_edit however would also
+ * need to take a size_t ** to carry it along (would all the argument
+ * overhead be worth it?)
*/
static size_t correct_deletion(const char *ident, char **array, size_t index) {
- size_t itr;
- size_t len = strlen(ident);
+ size_t itr = 0;
+ const size_t len = strlen(ident);
- for (itr = 0; itr < len; itr++) {
+ for (; itr < len; itr++) {
char *a = (char*)correct_pool_alloc(len+1);
memcpy(a, ident, itr);
memcpy(a + itr, ident + itr + 1, len - itr);
}
static size_t correct_transposition(const char *ident, char **array, size_t index) {
- size_t itr;
- size_t len = strlen(ident);
+ size_t itr = 0;
+ const size_t len = strlen(ident);
- for (itr = 0; itr < len - 1; itr++) {
+ for (; itr < len - 1; itr++) {
char tmp;
char *a = (char*)correct_pool_alloc(len+1);
memcpy(a, ident, len+1);
}
static size_t correct_alteration(const char *ident, char **array, size_t index) {
- size_t itr;
- size_t jtr;
- size_t ktr;
- size_t len = strlen(ident);
+ size_t itr = 0;
+ size_t jtr = 0;
+ size_t ktr = 0;
+ const size_t len = strlen(ident);
- for (itr = 0, ktr = 0; itr < len; itr++) {
+ for (; itr < len; itr++) {
for (jtr = 0; jtr < sizeof(correct_alpha)-1; jtr++, ktr++) {
char *a = (char*)correct_pool_alloc(len+1);
memcpy(a, ident, len+1);
}
static size_t correct_insertion(const char *ident, char **array, size_t index) {
- size_t itr;
- size_t jtr;
- size_t ktr;
- const size_t len = strlen(ident);
+ size_t itr = 0;
+ size_t jtr = 0;
+ size_t ktr = 0;
+ const size_t len = strlen(ident);
- for (itr = 0, ktr = 0; itr <= len; itr++) {
+ for (; itr <= len; itr++) {
for (jtr = 0; jtr < sizeof(correct_alpha)-1; jtr++, ktr++) {
char *a = (char*)correct_pool_alloc(len+2);
memcpy(a, ident, itr);
static int correct_exist(char **array, size_t rows, char *ident) {
size_t itr;
for (itr = 0; itr < rows; itr++)
- if (!strcmp(array[itr], ident))
+ if (!memcmp(array[itr], ident, strlen(ident)))
return 1;
return 0;
}
static char **correct_known(correct_trie_t* table, char **array, size_t rows, size_t *next) {
- size_t itr;
- size_t jtr;
- size_t len;
- size_t row;
+ size_t itr = 0;
+ size_t jtr = 0;
+ size_t len = 0;
+ size_t row = 0;
size_t nxt = 8;
char **res = correct_pool_alloc(sizeof(char *) * nxt);
char **end = NULL;
- for (itr = 0, len = 0; itr < rows; itr++) {
+ for (; itr < rows; itr++) {
end = correct_edit(array[itr]);
row = correct_size(array[itr]);
+ /* removing jtr=0 here speeds it up by 100ms O_o */
for (jtr = 0; jtr < row; jtr++) {
if (correct_find(table, end[jtr]) && !correct_exist(res, len, end[jtr])) {
res = correct_known_resize(res, &nxt, len+1);
}
static char *correct_maximum(correct_trie_t* table, char **array, size_t rows) {
- char *str = NULL;
- size_t *itm = NULL;
- size_t itr;
- size_t top;
+ char *str = NULL;
+ size_t *itm = NULL;
+ size_t itr = 0;
+ size_t top = 0;
- for (itr = 0, top = 0; itr < rows; itr++) {
+ for (; itr < rows; itr++) {
if ((itm = correct_find(table, array[itr])) && (*itm > top)) {
top = *itm;
str = array[itr];
*
* the add function works the same. Except the identifier is used to
* add to the dictonary.
- */
-
+ */
char *correct_str(correct_trie_t* table, const char *ident) {
- char **e1;
- char **e2;
- char *e1ident;
- char *e2ident;
-
- size_t e1rows = 0;
- size_t e2rows = 0;
+ char **e1 = NULL;
+ char **e2 = NULL;
+ char *e1ident = NULL;
+ char *e2ident = NULL;
+ size_t e1rows = 0;
+ size_t e2rows = 0;
correct_pool_new();