@inproceedings{franklin2011crowddb,
abstract = {Some queries cannot be answered by machines only. Processing such queries requires human input for providing information that is missing from the database, for performing computationally difficult functions, and for matching, ranking, or aggregating results based on fuzzy criteria. CrowdDB uses human input via crowdsourcing to process queries that neither database systems nor search engines can adequately answer. It uses SQL both as a language for posing complex queries and as a way to model data. While CrowdDB leverages many aspects of traditional database systems, there are also important differences. Conceptually, a major change is that the traditional closed-world assumption for query processing does not hold for human input. From an implementation perspective, human-oriented query operators are needed to solicit, integrate and cleanse crowdsourced data. Furthermore, performance and cost depend on a number of new factors including worker affinity, training, fatigue, motivation and location. We describe the design of CrowdDB, report on an initial set of experiments using Amazon Mechanical Turk, and outline important avenues for future work in the development of crowdsourced query processing systems.},
acmid = {1989331},
address = {New York, NY, USA},
author = {Franklin, Michael J. and Kossmann, Donald and Kraska, Tim and Ramesh, Sukriti and Xin, Reynold},
booktitle = {Proceedings of the 2011 international conference on Management of data},
doi = {10.1145/1989323.1989331},
interhash = {8a3f1b0fb94083c918960f1e756fe496},
intrahash = {9525ebea13b41f27a49bafcf2f1132c6},
isbn = {978-1-4503-0661-4},
location = {Athens, Greece},
numpages = {12},
pages = {61--72},
publisher = {ACM},
title = {CrowdDB: answering queries with crowdsourcing},
url = {http://doi.acm.org/10.1145/1989323.1989331},
year = 2011
}