GET /api/v2/video/504
HTTP 200 OK Vary: Accept Content-Type: text/html; charset=utf-8 Allow: GET, PUT, PATCH, HEAD, OPTIONS
{ "category": "PyOhio 2010", "language": "English", "slug": "pyohio-2010--processing-large-datasets-with-hadoo", "speakers": [ "William McVey" ], "tags": [ "datasets", "hadoop", "pyohio", "pyohio2010" ], "id": 504, "state": 1, "title": "PyOhio 2010: Processing Large Datasets with Hadoop and Python", "summary": "", "description": "Processing Large Datasets with Hadoop and Python\n\nPresented by William McVey\n\nThis talk will explore how Hadoop along with Python can be used to process\nlarge datasets. An overview of the Apache Hadoop project will be given. The\nmap/reduce concept will be introduced and some methods of coding the data\nprocessing routines in python will be explored. The talk will use real world\nexamples to illustrate how this approach can be used to parallelize\ncomputationally expensive operations across multiple cluster nodes effectively\nusing python.\n\nThe course will assume familiarity with the Python language during the demos,\nbut will not actually require a deep knowledge of python to understand the\nconcepts introduced.\n\n", "quality_notes": "", "copyright_text": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0", "embed": "", "thumbnail_url": "", "duration": null, "video_ogv_length": 105406594, "video_ogv_url": null, "video_ogv_download_only": false, "video_mp4_length": null, "video_mp4_url": null, "video_mp4_download_only": false, "video_webm_length": null, "video_webm_url": null, "video_webm_download_only": false, "video_flv_length": null, "video_flv_url": "", "video_flv_download_only": false, "source_url": "", "whiteboard": "", "recorded": null, "added": "2012-02-23T04:20:00", "updated": "2014-04-08T20:28:25.837" }