@INPROCEEDINGS{1405Gottlieb2013, AUTHOR = {Luke Gottlieb and Jaeyoung Choi and Gerald Friedland and Pascal Kelm and Thomas Sikora}, TITLE = {On Pushing the Limits of Mechanical Turk: Qualifying the Crowd for Video Geolocation}, BOOKTITLE = {MULTIMEDIA COMMUNICATIONS TECHNICAL COMMITTEE IEEE COMMUNICATIONS SOCIETY}, YEAR = {2013}, MONTH = jan, PUBLISHER = {IEEE Communications Society}, PAGES = {27--29}, ORGANIZATION = {IEEE}, NOTE = {Volume 8, Number 1 - January 2013 EMERGING TOPICS: SPECIAL ISSUE ON "Multimedia and Cloud Computing" INDUSTRIAL COLUMN: SPECIAL ISSUE ON "Crowdsourcing-based Multimedia Systems"}, PDF = {http://elvera.nue.tu-berlin.de/files/1405Gottlieb2013.pdf}, DOI = {http://committees.comsoc.org/mmc/eletters.asp}, URL = {http://committees.comsoc.org/mmc/e-news/E-Letter-January13.pdf}, ABSTRACT = {This work was first appeared in Gottlieb et al. [1]. In this article we summarize the methods we took for finding skilled Mechanical Turk participants for our annotation task, which will be to determine the geolocation of random videos from the web. The task itself is unlike the standard setup for a Mechanical Turk task, in that it is difficult for both humans and machines, whereas a standard Mechanical Turk task is usually easy for humans and difficult or impossible for machines. There are several notable challenges to finding skilled workers for this task: First, we must find what we termed “honest operators”, i.e., people who will seriously attempt to do the task and not just click quickly through it to collect the bounty. Second, we need to develop meaningful qualification test set(s) that are challenging enough to allow us to qualify people for the real task, but were also solvable by individuals regardless of their culture or location, although English language understanding was required for instructions.} }