@conference {632891,
title = {WatchTower: Fast, Secure Mobile Page Loads Using Remote Dependency Resolution},
booktitle = {MobiSys, 2019},
year = {Forthcoming},
abstract = {Remote dependency resolution (RDR) is a proxy-driven scheme for reducing mobile page load times; a proxy loads a requested page using a local browser, fetching the page{\textquoteright}s resources over fast proxy-origin links instead of a client{\textquoteright}s slow last-mile links. In this paper, we describe two fundamental challenges to efficient RDR proxying: the increasing popularity of encrypted HTTPS content, and the fact that, due to time-dependent network conditions and page properties, RDR proxying can actually increase load times. We solve these problems by introducing a new, secure proxying scheme for HTTPS traffic, and by implementing WatchTower, a selective proxying system that uses dynamic models of network conditions and page structures to only enable RDR when it is predicted to help. WatchTower loads pages 21.2\%{\textendash}41.3\% faster than state-of-the-art proxies and server push systems, while preserving end-to-end HTTPS security.},
author = {Ravi Netravali and Anirudh Sivaraman and James Mickens and Hari Balakrishnan}
}
@proceedings {614185,
title = {Riverbed: Enforcing User-defined Privacy Constraints in Distributed Web Services},
journal = {NSDI},
year = {2019},
address = {Boston, MA},
abstract = {Riverbed is a new framework for building privacy-respecting web services. Using a simple policy language, users define restrictions on how a remote service can process and store sensitive data. A transparent \ Riverbed proxy sits between a user{\textquoteright}s front-end client (e.g., a web browser) and the back-end server code. The back-end code remotely attests to the proxy, demonstrating that the code respects user policies; in particular, the server code attests that it executes within a \ Riverbed-compatible managed runtime that uses IFC to enforce user policies. If attestation succeeds, the proxy releases the user{\textquoteright}s data, tagging it with the user-defined policies. On the server-side, the \ Riverbed runtime places all data with compatible policies into the same universe (i.e., the same isolated instance of the full web service). The universe mechanism allows \ Riverbed to work with unmodified, legacy software; unlike prior IFC systems, \ Riverbed does not require developers to reason about security lattices, or manually annotate code with labels. \ Riverbed imposes only modest performance overheads, with worst-case slowdowns of 10\% for several real applications.},
url = {https://mickens.seas.harvard.edu/files/mickens/files/riverbed.pdf},
author = {Frank Wang and Ronny Ko and James Mickens}
}
@conference {613217,
title = {Alto: Lightweight VMs using Virtualization-aware Managed Runtimes},
booktitle = {International Conference on Managed Languages \& Runtimes (ManLang)},
year = {2018},
address = {Linz, Austria},
abstract = {Virtualization enables datacenter operators to safely run computations that belong to untrusted tenants. An ideal virtual machine has three properties: a small memory footprint; strong isolation from other VMs and the host OS; and the ability to maintain in-memory state across client requests. Unfortunately, modern virtualization technologies cannot provide all three properties at once. In this paper, we explain why, and propose a new virtualization approach, called Alto, that virtualizes at the layer of a managed runtime interface. Through careful design of (1) the application-facing managed interface and (2) the internal runtime architecture, Alto provides VMs that are small, secure, and stateful. Conveniently, Alto also simplifies VM operations like suspension, migration, and resumption. We provide several details about the proposed design, and discuss the remaining challenges that must be solved to fully realize the Alto vision.},
url = {https://mickens.seas.harvard.edu/files/mickens/files/alto.pdf},
author = {James Larisch and James Mickens and Eddie Kohler}
}
@conference {610033,
title = {DeadBolt: Securing IoT Deployments},
booktitle = {Applied Networking Research Workshop},
year = {2018},
address = {Montreal, Quebec, Canada},
abstract = {In an IoT deployment, network access policies are a critical determinant of security. Devices that run unpatched or insecure code should not be allowed to communicate with the outside world; furthermore, unknown external hosts should not be able to contact sensitive devices that reside within an IoT deployment.In this paper, we introduce DeadBolt, a new security framework for managing IoT network access. DeadBolt hides all of the devices in an IoT deployment behind an access point that implements deny-by-default policies for both incoming and outgoing traffic. The DeadBolt AP also forces high-end IoT devices to use remote attestation to gain network access; attestation allows the devices to prove that they run up-to-date, trusted software. For lightweight IoT devices which lack the ability to attest, the DeadBolt AP uses virtual drivers (essentially, security-focused virtual network functions) to protect lightweight device traffic. For example, a virtual driver might provide network intrusion detection, or encrypt device traffic that is natively cleartext. Using these techniques, and several others, DeadBolt can prevent realistic attacks while imposing only modest performance costs.},
url = {https://mickens.seas.harvard.edu/files/mickens/files/deadbolt.pdf},
author = {Ronny Ko and James Mickens}
}
@conference {589531,
title = {Remote-Control Caching: Proxy-based URL Rewriting to Decrease Mobile Browsing Bandwidth},
booktitle = {HotMobile},
year = {2018},
address = {Tempe, Arizona},
abstract = {Mobile browsers suffer from unnecessary cache misses. The same binary object is often named by multiple URLs which correspond to different cache keys. Furthermore, servers frequently mark objects as uncacheable, even though the objects{\textquoteright} content is stable over time.In this paper, we quantify the excess network traffic that mobile devices generate due to inefficient caching logic. We demonstrate that mobile page loads suffer from more redundant transfers than reported by prior studies which focused on desktop page loads. We then propose a new scheme, called Remote-Control Caching (RC2), in which web proxies (owned by mobile carriers or device manufacturers) track the aliasing relationships between the objects that a client has fetched, and the URLs that were used to fetch those objects. Leveraging knowledge of those aliases, a proxy dynamically rewrites the URLs inside of pages, allowing the client{\textquoteright}s local browser cache to satisfy a larger fraction of requests. Using a concrete implementation of RC2, we show that, for two loads of a page separated by 8 hours, RC2 reduces bandwidth consumption by a median of 52\%. As a result, mobile browsers can save a median of 469 KB per warm-cache page load.},
url = {https://mickens.seas.harvard.edu/files/mickens/files/rc2.pdf},
author = {Ravi Netravali and James Mickens}
}
@conference {588641,
title = {Vesper: Measuring Time-to-Interactivity for Web Pages},
booktitle = {NSDI},
year = {2018},
address = {Renton, WA},
abstract = {Everyone agrees that web pages should load more quickly. However, a good definition for {\textquoteleft}{\textquoteleft}page load time{\textquoteright}{\textquoteright} is elusive. We argue that, in a modern web page, load times should be defined with respect to interactivity: a page is {\textquoteleft}{\textquoteleft}loaded{\textquoteright}{\textquoteright} when above-the-fold content is visible and the associated JavaScript event handling state is functional. We define a new load time metric, called Ready Index, which explicitly captures our proposed notion of load time. Defining the metric is straightforward, but actually measuring it is not, since web developers do not explicitly annotate the JavaScript state and the DOM elements which support interactivity. To solve this problem, we introduce Vesper, a tool which rewrites a page{\textquoteright}s JavaScript and HTML to automatically discover the page{\textquoteright}s interactive state. Armed with Vesper, we compare Ready Index to prior load time metrics like Speed Index; we find that, across a variety of network conditions, prior metrics underestimate or overestimate the true load time for a page by 24\%--64\%. We also introduce a tool that optimizes a page for Ready Index, decreasing the median time to page interactivity by 29\%--32\%.},
url = {https://mickens.seas.harvard.edu/files/mickens/files/vesper-nsdi.pdf},
author = {Ravi Netravali and Vikram Nathan and James Mickens and Hari Balakrishnan}
}
@conference {588636,
title = {Prophecy: Accelerating Mobile Page Loads Using Final-state Write Logs},
booktitle = {NSDI},
year = {2018},
address = {Renton, WA},
abstract = {Web browsing on mobile devices is expensive in terms of battery drainage and bandwidth consumption. Mobile pages also frequently suffer from long load times due to high-latency cellular connections. In this paper, we introduce Prophecy, a new acceleration technology for mobile pages. Prophecy simultaneously reduces energy costs, bandwidth consumption, and page load times. In Prophecy, web servers precompute the JavaScript heap and the DOM tree for a page; when a mobile browser requests the page, the server returns a write log that contains a single write per JavaScript variable or DOM node. The mobile browser replays the writes to quickly reconstruct the final page state, eliding unnecessary intermediate computations. Prophecy{\textquoteright}s server-side component generates write logs by tracking low-level data flows between the JavaScript heap and the DOM. Using knowledge of these flows, Prophecy enables optimizations that are impossible for prior web accelerators; for example, Prophecy can generate write logs that interleave DOM construction and JavaScript heap construction, allowing interactive page elements to become functional immediately after they become visible to the mobile user. Experiments with real pages and real phones show that Prophecy reduces median page load time by 53\%, energy expenditure by 36\%, and bandwidth costs by 21\%.},
url = {https://mickens.seas.harvard.edu/files/mickens/files/prophecy-nsdi.pdf},
author = {Ravi Netravali and James Mickens}
}
@conference {579466,
title = {Veil: Private Browsing Semantics Without Browser-side Assistance},
booktitle = {NDSS},
year = {2018},
address = {San Diego, CA},
abstract = {
All popular web browsers offer a "private browsing mode."\ After a private session terminates, the browser is supposed to remove client-side evidence that the session occurred. Unfortunately, browsers still leak information through the file system, the browser cache, the DNS cache, and on-disk reflections of RAM such as the swap file.
Veil is a new deployment framework that allows web developers to prevent these information leaks, or at least reduce their likelihood. Veil leverages the fact that, even though developers do not control the client-side browser implementation, developers do control 1) the content that is sent to those browsers, and 2) the servers which deliver that content. Veil web sites collectively store their content on Veil{\textquoteright}s blinding servers\ instead of on individual, site-specific servers. To publish a new page, developers pass their HTML, CSS, and JavaScript files to Veil{\textquoteright}s compiler; the compiler transforms the URLs in the content so that, when the page loads on a user{\textquoteright}s browser, URLs are derived from a secret user key. The blinding service and the Veil page exchange encrypted data that is also protected by the user{\textquoteright}s key. The result is that Veil pages can safely store encrypted content in the browser cache; furthermore, the URLs exposed to system interfaces like the DNS cache are unintelligible to attackers who do not possess the user{\textquoteright}s key. To protect against post-session inspection of swap file artifacts, Veil uses heap walking (which minimizes the likelihood that secret data is paged out), content mutation (which garbles in-memory artifacts if they do get swapped out), and DOM hiding (which prevents the browser from learning site-specific HTML, CSS, and JavaScript content in the first place). Veil pages load on unmodified commodity browsers, allowing developers to provide stronger semantics for private browsing without forcing users to install or reconfigure their machines. Veil provides these guarantees even if the user does not visit a page using a browser{\textquoteright}s native privacy mode; indeed, Veil{\textquoteright}s protections are stronger\ than what the browser alone can provide.
},
url = {https://mickens.seas.harvard.edu/files/mickens/files/veil.pdf},
author = {Frank Wang and James Mickens}
}
@conference {564676,
title = {Cobweb: Practical Remote Attestation Using Contextual Graphs},
booktitle = {SysTEX},
year = {2017},
address = {Shanghai, China},
abstract = {
In theory, remote attestation is a powerful primitive for building distributed systems atop untrusting peers. Unfortunately, the canonical attestation framework defined by the Trusted Computing Group is insufficient to express rich contextual relationships between client-side software components. Thus, attestors and verifiers must rely on ad-hoc mechanisms to handle real-world attestation challenges like attestors that load executables in nondeterministic orders, or verifiers that require attestors to track dynamic information flows between attestor-side components.
In this paper, we survey these practical attestation challenges. We then describe a newattestation framework, named Cobweb, which handles these challenges. The key insight is that real-world attestation is a graph problem. An attestation message is a graph in which each vertex is a software component, and has one or more labels, e.g., the hash value of the component, or the raw file data, or a signature over that data. Each edge in an attestation graph is a contextual relationship, like the passage of time, or a parent/child fork() relationship, or a sender/receiver IPC relationship. Cobweb{\textquoteright}s verifier-side policies are graph predicates which analyze contextual relationships. Experiments with real, complex software stacks demonstrate that Cobweb{\textquoteright}s abstractions are generic and can support a variety of real-world policies.
},
url = {https://mickens.seas.harvard.edu/files/mickens/files/cobweb.pdf},
author = {Frank Wang and Yuna Joung and James Mickens}
}
@conference {373616,
title = {Polaris: Faster Page Loads Using Fine-grained Dependency Tracking},
booktitle = {NSDI},
year = {2016},
address = {Santa Clara, CA},
abstract = {To load a web page, a browser must fetch and evaluate\ objects like HTML files and JavaScript source\ code. Evaluating an object can result in additional objects\ being fetched and evaluated. Thus, loading a web\ page requires a browser to resolve a dependency graph;\ this partial ordering constrains the sequence in which a\ browser can process individual objects. Unfortunately,\ many edges in a page{\textquoteright}s dependency graph are unobservable\ by today{\textquoteright}s browsers. To avoid violating these hidden\ dependencies, browsers make conservative assumptions\ about which objects to process next, leaving the network\ and CPU underutilized.
We provide two contributions. First, using a new measurement\ platform called Scout that tracks fine-grained\ data flows across the JavaScript heap and the DOM,\ we show that prior, coarse-grained dependency analyzers\ miss crucial edges: across a test corpus of 200\ pages, prior approaches miss 30\% of edges at the median,\ and 118\% at the 95th percentile. Second, we quantify\ the benefits of exposing these new edges to web\ browsers. We introduce Polaris, a dynamic client-side\ scheduler that is written in JavaScript and runs on unmodified\ browsers; using a fully automatic compiler,\ servers can translate normal pages into ones that load\ themselves with Polaris. Polaris uses fine-grained dependency\ graphs to dynamically determine which objects to\ load, and when. Since Polaris{\textquoteright} graphs have no missing\ edges, Polaris can aggressively fetch objects in a way that\ minimizes network round trips. Experiments in a variety\ of network conditions show that Polaris decreases page\ load times by 34\% at the median, and 59\% at the 95th\ percentile.},
url = {http://mickens.seas.harvard.edu/files/mickens/files/polaris.pdf},
author = {R. Netravali and A. Goyal and J. Mickens and H. Balakrishnan}
}
@conference {373611,
title = {Sieve: Cryptographically Enforced Access Control for User Data in Untrusted Clouds},
booktitle = {NSDI},
year = {2016},
address = {Santa Clara, CA},
abstract = {
Modern web services rob users of low-level control over\ cloud storage{\textemdash}a user{\textquoteright}s single logical data set is scattered\ across multiple storage silos whose access controls are\ set by web services, not users. The consequence is that\ users lack the ultimate authority to determine how their\ data is shared with other web services.
In this paper, we introduce Sieve, a new platform which\ selectively (and securely) exposes user data to web services.\ Sieve has a user-centric storage model: each user\ uploads encrypted data to a single cloud store, and by\ default, only the user knows the decryption keys. Given\ this storage model, Sieve defines an infrastructure to support\ rich, legacy web applications. Using attribute-based\ encryption, Sieve allows users to define intuitively understandable\ access policies that are cryptographically\ enforceable. Using key homomorphism, Sieve can reencrypt\ user data on storage providers in situ, revoking\ decryption keys from web services without revealing new\ keys to the storage provider. Using secret sharing and\ two factor authentication, Sieve protects cryptographic\ secrets against the loss of user devices like smartphones\ and laptops. The result is that users can enjoy rich, legacy\ web applications, while benefiting from cryptographically\ strong controls over which data a web service can access.
},
url = {http://mickens.seas.harvard.edu/files/mickens/files/sieve.pdf},
author = {Wang, F. and J. Mickens and N. Zeldovich and V. Vaikuntanathan}
}
@conference {300586,
title = {Domino: Understanding Wide-Area, Asynchronous Event Causality in Web Applications},
booktitle = {SOCC (short paper)},
year = {2015},
address = {Kohala Coast, Hawai{\textquoteright}i},
abstract = {In a modern web application, a single high-level action like\ a mouse click triggers a flurry of asynchronous events on\ the client browser and remote web servers. We introduce\ Domino, a new tool which automatically captures and analyzes end-to-end, asynchronous causal relationship of events\ that span clients and servers. Using Domino, we found uncharacteristically long event chains in Bing Maps, discovered data races in the WinJS implementation of promises,\ and developed a new server-side scheduling algorithm for\ reducing the tail latency of server responses.},
url = {http://scholar.harvard.edu/files/mickens/files/domino.pdf},
author = {Ding Li and James Mickens and Suman Nath and Lenin Ravindranath}
}
@conference {300486,
title = {Amber: Decoupling User Data from Web Applications},
booktitle = {HotOS},
year = {2015},
address = {Kartause Ittingen, Switzerland},
abstract = {User-generated content is becoming increasingly common\ on the Web, but current web applications isolate\ their users{\textquoteright} data, enabling only restricted sharing and\ cross-service integration. We believe users should be able\ to share their data seamlessly between their applications\ and with other users. To that end, we propose Amber, an\ architecture that decouples users{\textquoteright} data from applications,\ while providing applications with powerful global queries\ to find user data. We demonstrate how multi-user applications,\ such as e-mail, can use these global queries to\ efficiently collect and monitor relevant data created by\ other users. Amber puts users in control of which applications\ they use with their data and with whom it is shared,\ and enables a new class of applications by removing the\ artificial partitioning of users{\textquoteright} data by application.},
url = {http://scholar.harvard.edu/files/mickens/files/amber.pdf},
author = {Tej Chajed and Jon Gjengset and Jelle van den Hooff and M. Frans Kaashoek and James Mickens and Robert Morris and Nickolai Zeldovich}
}
@conference {300481,
title = {Mahimahi: Accurate Record-and-Replay for HTTP},
booktitle = {USENIX ATC},
year = {2015},
address = {Santa Clara, CA},
abstract = {This paper presents Mahimahi, a framework to record\ traffic from HTTP-based applications, and later replay it\ under emulated network conditions. Mahimahi improves\ upon prior record-and-replay frameworks in three ways.\ First, it is more accurate because it carefully emulates the\ multi-server nature of Web applications, present in 98\%\ of the Alexa US Top 500 Web pages. Second, it isolates\ its own network traffic, allowing multiple Mahimahi instances\ emulating different networks to run concurrently\ without mutual interference. And third, it is designed as\ a set of composable shells, providing ease-of-use and extensibility.
We evaluate Mahimahi by: (1) analyzing the performance\ of HTTP/1.1, SPDY, and QUIC on a corpus of 500\ sites, (2) using Mahimahi to understand the reasons why\ these protocols are suboptimal, (3) developing Cumulus,\ a cloud-based browser designed to overcome these\ problems, using Mahimahi both to implement Cumulus\ by extending one of its shells, and to evaluate it, (4) using\ Mahimahi to evaluate HTTP multiplexing protocols\ on multiple performance metrics (page load time and\ speed index), and (5) describing how others have used\ Mahimahi.},
url = {http://scholar.harvard.edu/files/mickens/files/mahimahi.pdf},
author = {Ravi Netravali and Anirudh Sivaraman and Somak Das and Ameesh Goyal and Keith Winstein and James Mickens and Hari Balakrishnan}
}
@conference {300476,
title = {Mj{\"o}lnir: The Magical Web Application Hammer},
booktitle = {APSys},
year = {2015},
address = {Tokyo, Japan},
abstract = {Conventional wisdom suggests that rich, large-scale web\ applications are difficult to build and maintain. An implicit\ assumption behind this intuition is that a large web\ application requires massive numbers of servers, and complicated,\ one-off back-end architectures. We provide\ empirical evidence to disprove this intuition. We then\ propose new programming abstractions and a new deployment\ model that reduce the overhead of building and\ running web services.},
url = {http://scholar.harvard.edu/files/mickens/files/mjolnir.pdf},
author = {Jelle van den Hooff and David Lazar and James Mickens}
}
@conference {300501,
title = {Blizzard: Fast, Cloud-scale Block Storage for Cloud-oblivious Applications},
booktitle = {NSDI},
year = {2014},
address = {Seattle, WA},
abstract = {Blizzard is a high-performance block store that exposes\ cloud storage to cloud-oblivious POSIX and\ Win32 applications. Blizzard connects clients and\ servers using a network with full-bisection bandwidth,\ allowing clients to access any remote disk as fast as if\ it were local. Using a novel striping scheme, Blizzard\ exposes high disk parallelism to both sequential and random\ workloads; also, by decoupling the durability and\ ordering requirements expressed by flush requests, Blizzard\ can commit writes out-of-order, providing high performance\ and crash consistency to applications that issue\ many small, random IOs. Blizzard{\textquoteright}s virtual disk drive,\ which clients mount like a normal physical one, provides\ maximum throughputs of 1200 MB/s, and can improve\ the performance of unmodified, cloud-oblivious applications\ by 2x{\textendash}10x. Compared to EBS, a commercially\ available, state-of-the-art virtual drive for cloud applications,\ Blizzard can improve SQL server IOp rates by\ seven-fold while still providing crash consistency.},
url = {http://scholar.harvard.edu/files/mickens/files/blizzard.pdf},
author = {James Mickens and Edmund B. Nightingale and Jeremy Elson and Krishna Nareddy and Darren Gehring and Bin Fan and Asim Kadav and Vijay Chidambaram and Osama Khan}
}
@conference {300496,
title = {Pivot: Fast, Synchronous Mashup Isolation Using Generator Chains},
booktitle = {IEEE Symposium on Security and Privacy},
year = {2014},
address = {San Jose, CA},
abstract = {
Pivot is a new JavaScript isolation framework for\ web applications. Pivot uses iframes as its low-level isolation\ containers, but it uses code rewriting to implement synchronous\ cross-domain interfaces atop the asynchronous cross-frame\ postMessage() primitive. Pivot layers a distributed\ scheduling abstraction across the frames, essentially treating\ each frame as a thread which can invoke RPCs that are\ serviced by external threads. By rewriting JavaScript call\ sites, Pivot can detect RPC invocations; Pivot exchanges RPC\ requests and responses via postMessage(), and it pauses\ and restarts frames using a novel rewriting technique that\ translates each frame{\textquoteright}s JavaScript code into a restartable\ generator function. By leveraging both iframes and rewriting,\ Pivot does not need to rewrite all code, providing an order-of-magnitude performance improvement over rewriting-only\ solutions. Compared to iframe-only approaches, Pivot provides\ synchronous RPC semantics, which developers typically prefer\ over asynchronous RPCs. Pivot also allows developers to use\ the full, unrestricted JavaScript language, including powerful\ statements like eval().
},
url = {http://scholar.harvard.edu/files/mickens/files/pivot.pdf},
author = {James Mickens}
}
@conference {300591,
title = {Shroud: Ensuring Private Access to Large-Scale Data in the Data Center},
booktitle = {FAST},
year = {2013},
address = {San Jose, CA},
abstract = {Recent events have shown online service providers\ the perils of possessing private information about users.\ Encrypting data mitigates but does not eliminate this\ threat: the pattern of data accesses still reveals information.\ Thus, we present Shroud, a general storage system\ that hides data access patterns from the servers running\ it, protecting user privacy. Shroud functions as a virtual\ disk with a new privacy guarantee: the user can look up a\ block without revealing the block{\textquoteright}s address. Such a virtual\ disk can be used for many purposes, including map\ lookup, microblog search, and social networking.
Shroud aggressively targets hiding accesses among\ hundreds of terabytes of data. We achieve our goals by\ adapting oblivious RAM algorithms to enable large-scale\ parallelization. Specifically, we show, via new techniques\ such as oblivious aggregation, how to securely\ use many inexpensive secure coprocessors acting in parallel\ to improve request latency. Our evaluation combines\ large-scale emulation with an implementation on\ secure coprocessors and suggests that these adaptations\ bring private data access closer to practicality.},
url = {http://scholar.harvard.edu/files/mickens/files/shroud.pdf},
author = {Jacob R. Lorch and Bryan Parno and James Mickens and Mariana Raykova and Joshua Schiffman}
}
@conference {300596,
title = {Gibraltar: Exposing Hardware Devices to Web Pages Using AJAX},
booktitle = {USENIX WebApps},
year = {2012},
address = {Boston, MA},
abstract = {Gibraltar is a new framework for exposing hardware devices\ to web pages. Gibraltar{\textquoteright}s fundamental insight is that JavaScript{\textquoteright}s AJAX facility can be used as a hardware access protocol.\ Instead of relying on the browser to mediate device interactions,\ Gibraltar sandboxes the browser and uses a small\ device server to handle hardware requests. The server uses\ native code to interact with devices, and it exports a standard\ web server interface on the localhost. To access hardware,\ web pages send device commands to the server using\ HTTP requests; the server returns hardware data via HTTP\ responses.
Using a client-side JavaScript library, we build a simple\ yet powerful device API atop this HTTP transfer protocol.\ The API is particularly useful to developers of mobile web\ pages, since mobile platforms like cell phones have an increasingly\ wide array of sensors that, prior to Gibraltar, were\ only accessible via native code plugins or the limited, inconsistent\ APIs provided by HTML5. Our implementation of\ Gibraltar on Android shows that Gibraltar provides stronger\ security guarantees than HTML5; furthermore, it shows that\ HTTP is responsive enough to support interactive web pages\ that perform frequent hardware accesses. Gibraltar also supports\ an HTML5 compatibility layer that implements the\ HTML5 interface but provides Gibraltar{\textquoteright}s stronger security.},
url = {http://scholar.harvard.edu/files/mickens/files/gibraltar.pdf},
author = {Kaisen Lin and David Chu and James Mickens and Li Zhuang and Feng Zhao and Jian Qiu}
}
@conference {300601,
title = {Jigsaw: Efficient, Low-effort Mashup Isolation},
booktitle = {USENIX WebApps},
year = {2012},
address = {Boston, MA},
abstract = {A web application often includes content from a variety\ of origins. Securing such a mashup application\ is challenging because origins often distrust each other\ and wish to expose narrow interfaces to their private\ code and data. Jigsaw is a new framework for isolating\ these mashup components. Jigsaw is an extension of\ the JavaScript language that can be run inside standard\ browsers using a Jigsaw-to-JavaScript compiler. Unlike\ prior isolation schemes that require developers to\ specify complex, error-prone policies, Jigsaw leverages\ the well-understood public/private keywords from traditional\ object-oriented languages, making it easy for a domain\ to tag internal data as externally visible. Jigsaw\ provides strong iframe-like isolation, but unlike previous\ approaches that use actual iframes as isolation containers,\ Jigsaw allows mutually distrusting code to run\ inside the same frame; this allows scripts to share state\ using synchronous method calls instead of asynchronous\ message passing. Jigsaw also introduces a novel encapsulation\ mechanism called surrogates. Surrogates allow\ domains to safely exchange objects by reference instead\ of by value. This improves sharing efficiency by eliminating\ cross-origin marshaling overhead.},
url = {http://scholar.harvard.edu/files/mickens/files/jigsaw.pdf},
author = {James Mickens and Matthew Finifter}
}
@conference {300606,
title = {Rivet: Browser-agnostic Remote Debugging for Web Applications},
booktitle = {USENIX ATC},
year = {2012},
address = {Boston, MA},
abstract = {Rivet is the first fully-featured, browser-agnostic remote debugger for web applications. Using Rivet, developers can inspect and modify the state of live web\ pages that are running inside unmodified end-user web\ browsers. This allows developers to explore real application bugs in the context of the actual machines on which\ those bugs occur. To make an application Rivet-aware,\ developers simply add the Rivet JavaScript library to the\ client-side portion of the application. Later, when a user\ detects a problem with the application, the user informs\ Rivet; in turn, Rivet pauses the application and notifies\ a remote debug server that a debuggable session is available. The server can launch an interactive debugger front-end for a human developer, or use Rivet{\textquoteright}s live patching\ mechanism to automatically install a fix on the client or\ run diagnostics for offline analysis. Experiments show\ that Rivet imposes negligible overhead during normal application operation. At debug time, Rivet{\textquoteright}s network footprint is small, and Rivet is computationally fast enough to\ support non-trivial diagnostics and live patches.},
url = {http://scholar.harvard.edu/files/mickens/files/rivet.pdf},
author = {James Mickens}
}
@conference {300611,
title = {Atlantis: Robust, Extensible Execution Environments for Web Applications},
booktitle = {SOSP},
year = {2011},
address = {Cascais, Portugal},
abstract = {Today{\textquoteright}s web applications run inside a complex browser environment\ that is buggy, ill-specified, and implemented in\ different ways by different browsers. Thus, web applications\ that desire robustness must use a variety of conditional code\ paths and ugly hacks to deal with the vagaries of their runtime.\ Our new exokernel browser, called Atlantis, solves\ this problem by providing pages with an extensible execution\ environment. Atlantis defines a narrow API for basic\ services like collecting user input, exchanging network data,\ and rendering images. By composing these primitives, web\ pages can define custom, high-level execution environments.\ Thus, an application which does not want a dependence on\ Atlantis{\textquoteright} predefined web stack can selectively redefine components\ of that stack, or define markup formats and scripting\ languages that look nothing like the current browser runtime.\ Unlike prior microkernel browsers like OP, and unlike\ compile-to-JavaScript frameworks like GWT, Atlantis is the\ first browsing system to truly minimize a web page{\textquoteright}s dependence\ on black box browser code. This makes it much easier\ to develop robust, secure web applications.},
url = {http://scholar.harvard.edu/files/mickens/files/atlantis.pdf},
author = {James Mickens and Mohan Dhawan}
}