@inproceedings{89d5ea01b2ba4f65acd36b6f06279dc4,
title = "Learning linearly separable languages",
abstract = "This paper presents a novel paradigm for learning languages that consists of mapping strings to an appropriate high-dimensional feature space and learning a separating hyperplane in that space. It initiates the study of the linear separability of automata and languages by examining the rich class of piecewise-testable languages. It introduces a high-dimensional feature map and proves piecewise-testable languages to be linearly separable in that space. The proof makes use of word combinatorial results relating to subsequences. It also shows that the positive definite kernel associated to this embedding can be computed in quadratic time. It examines the use of support vector machines in combination with this kernel to determine a separating hyperplane and the corresponding learning guarantees. It also proves that all languages linearly separable under a regular finite cover embedding, a generalization of the embedding we used, are regular.",
author = "Leonid Kontorovich and Corinna Cortes and Mehryar Mohri",
year = "2006",
doi = "10.1007/11894841_24",
language = "English (US)",
isbn = "3540466495",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "288--303",
booktitle = "Algorithmic Learning Theory - 17th International Conference, ALT 2006, Proceedings",
note = "17th International Conference on Algorithmic Learning Theory, ALT 2006 ; Conference date: 07-10-2006 Through 10-10-2006",
}