Bib

@inproceedings{wukong2015wang,
author = {Wang, Haoyu and Guo, Yao and Ma, Ziang and Chen, Xiangqun},
title = {WuKong: A Scalable and Accurate Two-Phase Approach to Android App Clone Detection},
year = {2015},
isbn = {9781450336208},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/2771783.2771795},
doi = {10.1145/2771783.2771795},
booktitle = {Proceedings of the 2015 International Symposium on Software Testing and Analysis},
pages = {71–82},
numpages = {12},
keywords = {third-party library, Clone detection, mobile applications, repackaging, Android},
location = {Baltimore, MD, USA},
series = {ISSTA 2015}
}
@inproceedings{moran2018automated,
  title={Automated reporting of GUI design violations for mobile apps},
  author={Moran, Kevin and Li, Boyang and Bernal-C{\'a}rdenas, Carlos and Jelf, Dan and Poshyvanyk, Denys},
  booktitle={Proceedings of the 40th International Conference on Software Engineering},
  pages={165--175},
  year={2018}
}
@article{Yang2018,
abstract = {This work develops a static analysis to create a model of the behavior of an Android application's GUI. We propose the window transition graph (WTG), a model representing the possible GUI window sequences and their associated events and callbacks. A key component and contribution of our work is the careful modeling of the stack of currently-active windows, the changes to this stack, and the effects of callbacks related to these changes. To the best of our knowledge, this is the first detailed study of this important static analysis problem for Android. We develop novel analysis algorithms for WTG construction and traversal, based on this modeling of the window stack. We also propose WTG extensions to handle certain aspects of asynchronous control flow. We describe an application of the WTG for GUI test generation, using path traversals. The evaluation of the proposed algorithms indicates their effectiveness and practicality.},
author = {Yang, Shengqian and Wu, Haowei and Zhang, Hailong and Wang, Yan and Swaminathan, Chandrasekar and Yan, Dacong and Rountev, Atanas},
doi = {10.1007/s10515-018-0237-6},
issn = {15737535},
journal = {Automated Software Engineering},
keywords = {Android,GUI analysis,Static analysis},
number = {4},
pages = {833--873},
publisher = {Springer US},
title = {{Static window transition graphs for Android}},
url = {https://doi.org/10.1007/s10515-018-0237-6},
volume = {25},
year = {2018}
}
@article{Rountev2014,
abstract = {The popularity of Android software has grown dramatically in the last few years. It is essential for researchers in programming languages and compilers to contribute new techniques in this increasingly important area. Such techniques require a foundation of program analyses for Android. The target of our work is static object reference analysis, which models the ow of object references. Existing reference analyses cannot be applied directly to Android because the software is component-based and event-driven. An Android application is driven by a graphical user interface (GUI), with GUI objects responding to user actions. These objects and the event handlers associated with them ultimately determine the possible ow of control and data. We propose the first static analysis to model GUI-related Android objects, their ow through the application, and their interactions with each other via the abstractions de- fined by the Android platform. A formal semantics for the relevant Android constructs is developed to provide a solid foundation for this and other analyses. Next, we propose a constraint-based reference analysis based on the semantics. The analysis employs a constraint graph to model the ow of GUI objects, the hierarchical structure of these objects, and the effects of relevant Android operations. Experimental evaluation on real-world Android applications strongly suggests that the analysis achieves high precision with low cost. The analysis enables static modeling of control/data ow that is foundational for compiler analyses, instrumentation for event/interaction profiling, static error checking, security analysis, test generation, and automated debugging. It provides a key component to be used by compile-time analysis researchers in the growing area of Android software. Copyright {\textcopyright} 2014 by the Association for Computing Machinery, Inc. (ACM).},
author = {Rountev, Atanas and Yan, Dacong},
doi = {10.1145/2544137.2544159},
isbn = {9781450326704},
journal = {Proceedings of the 12th ACM/IEEE International Symposium on Code Generation and Optimization, CGO 2014},
keywords = {Android,GUI analysis,Reference analysis},
pages = {143--153},
title = {{Static reference analysis for GUI objects in android software}},
year = {2014}
}
@article{Yang2015,
abstract = {With the fast growing complexity of software systems, developers experience new challenges in understanding program's behavior to reveal performance and functional deficiencies and to support development, testing, debugging, optimization, and maintenance. These issues are especially important to mobile software due to limited computing resources on mobile devices, as well as short development life cycles. The correctness, security, and performance of mobile software is of paramount importance for many millions of users. For software engineering researchers, this raises high expectations for developing a comprehensive toolset of approaches for understanding, testing, checking, and verification of Android software. Static program analyses are essential components of such a toolset. Because of the event-driven and frameworkbased nature of the Android programming model, it is challenging to clearly understand application semantics and to represent it in static analysis algorithms. This dissertation makes several contributions towards solving this challenge. The ability to understand the interprocedural control flow is critical for reasoning statically about the semantics of a program. For Android, this flow is driven by the Graphical User Interface (GUI) of the application. As the first contribution of this dissertation, we propose a novel technique that analyzes the control flow of GUI event handlers in Android software. We build a callback control-flow graph, ii using a context-sensitive static analysis of callback methods such as GUI event handlers. The algorithm performs a graph reachability analysis by traversing contextcompatible interprocedural control-flow paths and identifying statements that may trigger callbacks, as well as paths that avoid such statements. We also develop a client analysis that builds a static model of the application's GUI. Experimental evaluation shows that this context-sensitive approach leads to substantial precision improvements, while having practical cost. The next contribution of this dissertation is an even more general model and static analysis of the control flow of an Android application's GUI. We propose the window transition graph (WTG), a model representing the possible GUI window sequences and their associated events and callbacks. A key component and contribution of our work is the careful modeling of the stack of currently-active windows, the changes to this stack, and the effects of callbacks related to these changes. To the best of our knowledge, this is the first detailed study of this important static analysis problem for Android. We develop novel analysis algorithms for WTG construction and traversal, based on this modeling of the window stack. We also describe an application of the WTG for GUI test generation, using path traversals. The evaluation of the proposed algorithms indicates their effectiveness and practicality. User's interactions with Android applications trigger callbacks in the UI thread. The handling of such events may initialize work on the background in order to perform expensive tasks. Because Android does not allow non-UI threads modifying the GUI state, standard Android “post” operations play a critical role in communicating between background and UI threads. To understand this additional aspect of Android semantics, we introduce a static analysis to model operations that post runnable tasks iii from non-UI threads to the UI thread's event queue. The results of this analysis are used to create a more general version of the WTG. This new WTG and the related static analysis present an important step toward other more comprehensive modeling of Android semantics. The experimental evaluation of the proposed representation indicates promising overall accuracy improvements. To conclude, this dissertation presents several static analysis techniques to model the behaviors of the GUIs of Android applications. These analyses present essential foundation for developing tools to uncover the symptoms of both functional and performance issues in the mobile system, to perform model-based testing, and to support the understanding, optimization, and evolution of Android software.},
author = {Yang, Shengqian},
title = {{Static Analyses of GUI Behavior in Android Applications}},
year = {2015}
}
@article{Wysopal2010,
abstract = {This paper describes a high level classification of backdoors that have been detected in applications. It provides real world examples of application backdoors, a generalization of the mechanisms they use, and strategies for detecting these mechanisms. These strategies encompass detection using static analysis of source or binary code.},
author = {Wysopal, Chris and Eng, Chris and Shields, Tyler},
doi = {10.1007/s11623-010-0024-4},
issn = {1614-0702},
journal = {Datenschutz und Datensicherheit - DuD},
number = {3},
pages = {149--155},
title = {{Static detection of application backdoors}},
volume = {34},
year = {2010}
}
@article{Trostanetski2017,
abstract = {In this work we present a modular and demand-driven analysis of the semantic difference between program versions. Our analysis characterizes initial states for which final states in the program versions differ. It also characterizes states for which the final states are identical. Such characterizations are useful for regression verification, for revealing security vulnerabilities and for identifying changes in the program's functionality. Syntactic changes in program versions are often small and local and may apply to procedures that are deep in the call graph. Our approach analyses only those parts of the programs that are affected by the changes. Moreover, the analysis is modular, processing a single pair of procedures at a time. Called procedures are not inlined. Rather, their previously computed summaries and difference summaries are used. For efficiency, procedure summaries and difference summaries can be abstracted and may be refined on demand. We have compared our method to well established tools and observed speedups of one order of magnitude and more. Furthermore, in many cases our tool proves equivalence or finds differences while the others fail to do so.},
author = {Trostanetski, Anna and Grumberg, Orna and Kroening, Daniel},
doi = {10.1007/978-3-319-66706-5_20},
isbn = {9783319667058},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {405--427},
title = {{Modular demand-driven analysis of semantic difference for program versions}},
volume = {10422 LNCS},
year = {2017}
}
@article{Li2018,
abstract = {With the thriving of mobile app markets, third-party libraries are pervasively used in Android applications. The libraries provide functionality such as advertising, location, and social networking services, making app development much more productive. However, the spread of vulnerable and harmful third-party libraries can also hurt the mobile ecosystem, leading to various security problems. Therefore, third-party library identification has emerged as an important problem and the basis of many security applications such as repackaging detection, vulnerability identification, and malware analysis. Previously, we proposed a novel approach to identifying third-party Android libraries at a massive scale. Our method uses the internal code dependencies of an app to detect and classify library candidates. With a fine-grained feature hashing strategy, it can better handle code whose package and method names are obfuscated. We have developed a prototypical tool called LibD and evaluated it with an up-to-date and humongous dataset. Our experimental results on 1,427,395 apps show that compared to existing tools, LibD can better handle multi-package third-party libraries in the presence of name-based obfuscation, leading to significantly improved precision without the loss of scalability. In this paper, we extend our previous work by demonstrating that effective and scalable library detection can significantly improve the performance of large-scale app analyses in the real world. We show that the technique of LibD can be used to speed up whole-app Android vulnerability detection and quickly identify variants of vulnerable third-party libraries. The extension sheds light on the practical value of our previous work.},
author = {Li, Menghao and Wang, Pei and Wang, Wei and Wang, Shuai and Wu, Dinghao and Liu, Jian and Xue, Rui and Huo, Wei and Zou, Wei},
doi = {10.1109/TSE.2018.2872958},
issn = {19393520},
journal = {IEEE Transactions on Software Engineering},
keywords = {Android,Androids,Feature extraction,Humanoid robots,Java,Libraries,Security,Tools,code similarity detection,software mining,third-party library},
title = {{Large-scale Third-party Library Detection in Android Markets}},
year = {2018}
}
@article{Gruska2010,
abstract = {Real production code contains lots of knowledge - on the domain, on the architecture, and on the environment. How can we leverage this knowledge in new projects? Using a novel lightweight source code parser, we have mined more than 6,000 open source Linux projects (totaling 200,000,000 lines of code) to obtain 16,000,000 temporal properties reflecting normal interface usage. New projects can be checked against these rules to detect anomalies - that is, code that deviates from the wisdom of the crowds. In a sample of 20 projects, ∼25{\%} of the top-ranked anomalies uncovered actual code smells or defects. {\textcopyright} 2010 ACM.},
author = {Gruska, Natalie and Wasylkowski, Andrzej and Zeller, Andreas},
doi = {10.1145/1831708.1831723},
isbn = {9781605588230},
journal = {ISSTA'10 - Proceedings of the 2010 International Symposium on Software Testing and Analysis},
keywords = {Formal concept analysis,Language independent parsing,Lightweight parsing,Mining specifications,Temporal properties},
pages = {119--129},
title = {{Learning from 6,000 projects: Lightweight cross-project anomaly detection}},
year = {2010}
}
@article{Analyses2017,
author = {Analyses, Program and {at the Ohio State University}, Software Tools (PRESTO) Research Group},
pages = {1--12},
title = {{GATOR: Program Analysis Toolkit For {\{}Android{\}}}},
url = {http://web.cse.ohio-state.edu/presto/software/gator/},
year = {2017}
}
@article{Lima2018,
abstract = {In a growing number of domains, the provisioning of end-to-end services to the users depends on the proper interoperation of multiple systems, forming a new distributed system, often subject to timing constraints. To ensure interoperability and integrity, it is important to conduct integration tests that verify the interactions with the environment and between the system components in key scenarios. To tackle test automation challenges, we propose algorithms for decentralized conformance checking and test input generation, and for checking and enforcing the conditions (local observability and controllability) that allow decentralized test execution.},
author = {Lima, Bruno},
doi = {10.1145/3236024.3275431},
isbn = {9781450355735},
journal = {ESEC/FSE 2018 - Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
keywords = {Distributed Systems,Integration Testing,Scenario-based Testing},
pages = {956--958},
title = {{Automated scenario-based integration testing of distributed systems}},
year = {2018}
}
@article{Wang2015,
author = {Wang, Haoyu and Klein, Jacques},
isbn = {9781450355735},
keywords = {ad fraud,android,automation,corresponding author,first two authors are,haoyu wang is the,in alphabetical order,mobile app,the names of the,user interface},
title = {{FraudDroid : Automated Ad Fraud Detection for Android Apps}},
year = {2015}
}
@book{Jedrzejowicz2005,
author = {Jȩdrzejowicz, Joanna and Szepietowski, Andrzej},
booktitle = {Lecture Notes in Computer Science},
isbn = {9783540735885},
issn = {03029743},
title = {{Lecture Notes in Computer Science: Preface}},
volume = {3618},
year = {2005}
}
@article{Sergey2015,
abstract = {We present a lightweight approach to Hoare-style specifications for fine-grained concurrency, based on a notion of time-stamped histories that abstractly capture atomic changes in the program state. Our key observation is that histories form a partial commutative monoid, a structure fundamental for representation of concurrent resources. This insight provides us with a unifying mechanism that allows us to treat histories just like heaps in separation logic. For example, both are subject to the same assertion logic and inference rules (e.g., the frame rule). Moreover, the notion of ownership transfer, which usually applies to heaps, has an equivalent in histories. It can be used to formally represent helping---an important design pattern for concurrent algorithms whereby one thread can execute code on behalf of another. Specifications in terms of histories naturally abstract granularity, in the sense that sophisticated fine-grained algorithms can be given the same specifications as their simplified coarse-grained counterparts, making them equally convenient for client-side reasoning. We illustrate our approach on a number of examples and validate all of them in Coq.},
archivePrefix = {arXiv},
arxivId = {1410.0306},
author = {Sergey, Ilya and Nanevski, Aleksandar and Banerjee, Anindya},
doi = {10.1007/978-3-662-46669-8_14},
eprint = {1410.0306},
isbn = {9783662466681},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
number = {1},
pages = {333--358},
title = {{Specifying and verifying concurrent algorithms with histories and subjectivity}},
volume = {9032},
year = {2015}
}
@article{Liang2016,
author = {Liang, Hongjin},
doi = {10.1145/2837614.2837635},
isbn = {9781450335492},
issn = {15232867},
journal = {POPL'16: Proceedings of the 43rd ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages},
keywords = {,concurrency,program logic,progress,reasoning,refinement,rely-guarantee},
pages = {385--399},
title = {{A Program Logic for Concurrent Objects under Fair Scheduling}},
year = {2016}
}
@article{Krebbers2016,
author = {Krebbers, Robbert and Birkedal, Lars},
isbn = {9781450346603},
keywords = {,coq,fine-grained concurrency,interactive theorem proving,logical relations,separation logic},
title = {{Interactive Proofs in Higher-Order Concurrent Separation Logic}},
year = {2016}
}
@article{Reynolds2002,
abstract = {In joint work with Peter O'Hearn and others, based on early ideas of Burstall, we have developed an extension of Hoare logic that permits reasoning about low-level imperative programs that use shared mutable data structure. The simple imperative programming language is extended with commands (not expressions) for accessing and modifying shared structures, and for explicit allocation and deallocation of storage. Assertions are extended by introducing a "separating conjunction" that asserts that its subformulas hold for disjoint parts of the heap, and a closely related "separating implication". Coupled with the inductive definition of predicates on abstract data structures, this extension permits the concise and flexible description of structures with controlled sharing. In this paper, we survey the current development of this program logic, including extensions that permit unrestricted address arithmetic, dynamically allocated arrays, and recursive procedures. We also discuss promising future directions.},
author = {Reynolds, John C.},
doi = {10.1109/LICS.2002.1029817},
isbn = {0-7695-1483-9},
issn = {1043-6871},
journal = {Symposium on Logic in Computer Science},
number = {1},
pages = {55----74},
title = {{Separation logic: a logic for shared mutable data structures}},
volume = {0},
year = {2002}
}
@article{Marlow2008,
abstract = {We present a parallel generational-copying garbage collector implemented for the Glasgow Haskell Compiler. We use a block-structured memory allocator, which provides a natural granularity for dividing the work of GC between many threads, leading to a simple yet effective method for parallelising copying GC. The results are encouraging: we demonstrate wall-clock speedups of on average a factor of 2 in GC time on a commodity 4-core machine with no programmer intervention, compared to our best sequential GC.},
author = {Marlow, Simon and Harris, Tim and James, Roshan P. and {Peyton Jones}, Simon},
doi = {10.1145/1375634.1375637},
isbn = {9781605581347},
journal = {Proceedings of the 7th international symposium on Memory management - ISMM '08},
keywords = {,concurrency,garbage collection,haskell,memory management},
pages = {11},
title = {{Parallel generational-copying garbage collection with a block-structured heap}},
url = {http://portal.acm.org/citation.cfm?doid=1375634.1375637},
year = {2008}
}
@article{SIVARAMAKRISHNAN2016,
abstract = {{\textless}p{\textgreater}The runtime for a modern, concurrent, garbage collected language like Java or Haskell is like an operating system: sophisticated, complex, performant, but alas very hard to change. If more of the runtime system were in the high-level language, it would be far more modular and malleable. In this paper, we describe a novel concurrency substrate design for the Glasgow Haskell Compiler that allows multicore schedulers for concurrent and parallel Haskell programs to be safely and modularly described as libraries in Haskell. The approach relies on abstracting the interface to the user-implemented schedulers through scheduler activations, together with the use of Software Transactional Memory to promote safety in a multicore context.{\textless}/p{\textgreater}},
author = {SIVARAMAKRISHNAN, K. C. and HARRIS, TIM and MARLOW, SIMON and {PEYTON JONES}, SIMON},
doi = {10.1017/S0956796816000071},
issn = {0956-7968},
journal = {Journal of Functional Programming},
number = {April},
pages = {e9},
title = {{Composable scheduler activations for Haskell}},
url = {http://www.journals.cambridge.org/abstract{\_}S0956796816000071},
volume = {26},
year = {2016}
}
@article{Kang2016,
author = {Kang, Jeehoon and Dreyer, Derek},
keywords = {,11,a number of com-,architectures,c,expensive fence instructions,memory shared by all,of the hardware,on these,one must therefore insert,operational semantics,secondly,threads,to simulate sc semantics,to subvert the efforts,weak memory models},
pages = {1--17},
title = {{A Promising Semantics for Relaxed-Memory Concurrency}},
year = {2016}
}
@article{Loncaric2018,
abstract = {Data structure synthesis is the task of generating data structure implementations from high-level specifications. Recent work in this area has shown potential to save programmer time and reduce the risk of defects. Existing techniques focus on data structures for manipulating subsets of a single collection, but real-world programs often track multiple related collections and aggregate properties such as sums, counts, minimums, and maximums. This paper shows how to synthesize data structures that track subsets and aggregations of multiple related collections. Our tech-nique decomposes the synthesis task into alternating steps of query synthesis and incrementalization. The query synthesis step imple-ments pure operations over the data structure state by leveraging existing enumerative synthesis techniques, specialized to the data structures domain. The incrementalization step implements imper-ative state modifications by re-framing them as fresh queries that determine what to change, coupled with a small amount of code to apply the change. As an added benefit of this approach over previous work, the synthesized data structure is optimized for not only the queries in the specification but also the required update op-erations. We have evaluated our approach in four large case studies, demonstrating that these extensions are broadly applicable.},
author = {Loncaric, Calvin and Ernst, Michael D. and Torlak, Emina},
doi = {10.1145/3180155.3180211},
isbn = {9781450356381},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {,Automatic programming,Data structures,Program synthesis},
pages = {958--968},
title = {{Generalized data structure synthesis}},
year = {2018}
}
@article{Swamy2011,
abstract = {Distributed applications are difficult to program reliably and securely. Dependently typed functional languages promise to prevent broad classes of errors and vulnerabilities, and to enable program verification to proceed side-by-side with development. However, as recursion, effects, and rich libraries are added, using types to reason about programs, specifications, and proofs becomes challenging. We present F-star, a full-fledged design and implementation of a new dependently typed language for secure distributed programming. Unlike prior languages, F-star provides arbitrary recursion while maintaining a logically consistent core; it enables modular reasoning about state and other effects using affine types; and it supports proofs of refinement properties using a mixture of cryptographic evidence and logical proof terms. The key mechanism is a new kind system that tracks several sub-languages within F-star and controls their interaction. F-star subsumes two previous languages, F7 and Fine. We prove type soundness (with proofs mechanized in Coq) and logical consistency for F-star. We have implemented a compiler that translates F-star to NET bytecode, based on a prototype for Fine. F-star provides access to libraries for concurrency, networking, cryptography, and interoperability with C{\#}, F{\#}, and the other .NET languages. The compiler produces verifiable binaries with 60{\%} code size overhead for proofs and types, as much as a 45x improvement over the Fine compiler, while still enabling efficient bytecode verification. To date, we have programmed and verified more than 20,000 lines of F-star including (1) new schemes for multi-party sessions; (2) a zero-knowledge privacy-preserving payment protocol; (3) a provenance-aware curated database; (4) a suite of 17 web-browser extensions verified for authorization properties; and (5) a cloud-hosted multi-tier web application with a verified reference monitor.},
author = {Swamy, Nikhil and Chen, Juan and Fournet, C{\'{e}}dric and Strub, Pierre-Yves and Bhargavan, Karthikeyan and Yang, Jean},
doi = {10.1145/2034574.2034811},
isbn = {9781450308656},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {,1,3,d,formal definitions,languages,refinement types,security,security type systems,theory,verification},
month = {sep},
number = {9},
pages = {266},
title = {{Secure distributed programming with value-dependent types}},
url = {http://dl.acm.org/citation.cfm?doid=2034574.2034811},
volume = {46},
year = {2011}
}
@article{VanHorn2012,
abstract = {We describe a derivational approach to abstract interpretation that yields novel and transparently sound static analyses when applied to well-established abstract machines for higher-order and imperative programming languages. To demonstrate the technique and support our claim, we transform the CEK machine of Felleisen and Friedman, a lazy variant of Krivine's machine, and the stack-inspecting CM machine of Clements and Felleisen into abstract interpretations of themselves. The resulting analyses bound temporal ordering of program events; predict return-flow and stack-inspection behavior; and approximate the flow and evaluation of by-need parameters. For all of these machines, we find that a series of well-known concrete machine refactorings, plus a technique of store-allocated continuations, leads to machines that abstract into static analyses simply by bounding their stores. We demonstrate that the technique scales up uniformly to allow static analysis of realistic language features, including tail calls, conditionals, side effects, exceptions, first-class continuations, and even garbage collection. In order to close the gap between formalism and implementation, we provide translations of the mathematics as running Haskell code for the initial development of our method.},
archivePrefix = {arXiv},
arxivId = {1107.3539},
author = {{Van Horn}, David and Might, Matthew},
doi = {10.1017/S0956796812000238},
eprint = {1107.3539},
issn = {0956-7968},
journal = {Journal of Functional Programming},
number = {4-5},
pages = {705--746},
title = {{Systematic abstraction of abstract machines}},
url = {http://www.journals.cambridge.org/abstract{\_}S0956796812000238},
volume = {22},
year = {2012}
}
@article{Devlin2018,
archivePrefix = {arXiv},
arxivId = {arXiv:1805.04276v2},
author = {Devlin, Jacob and Hausknecht, Matthew},
eprint = {arXiv:1805.04276v2},
pages = {1--15},
title = {{L EVERAGING G RAMMAR AND R EINFORCEMENT}},
year = {2018}
}
@article{Sheard2002,
author = {Sheard, Tim and Jones, Simon Peyton},
isbn = {1581134150},
journal = {Proc. of the 2002 ACM SIGPLAN Workshop on Haskell},
keywords = {,meta programming,templates},
pages = {1--16},
title = {{Template Meta-Programming for {\{}Haskell{\}}}},
year = {2002}
}
@article{Krebbers,
abstract = {Concurrent separation logics (CSLs) have come of age, and with age they have accumulated a great deal of complexity. Previous work on the Iris logic attempted to reduce the complex logical mecha-nisms of modern CSLs to two orthogonal concepts: partial commutative monoids (PCMs) and invariants. However, the realization of these con-cepts in Iris still bakes in several complex mechanisms—such as weakest preconditions and mask-changing view shifts—as primitive notions. In this paper, we take the Iris story to its (so to speak) logical conclu-sion, applying the reductionist methodology of Iris to Iris itself. Specifi-cally, we define a small, resourceful base logic, which distills the essence of Iris: it comprises only the assertion layer of vanilla separation logic, plus a handful of simple modalities. We then show how the much fancier logical mechanisms of Iris—in particular, its entire program specification layer—can be understood as merely derived forms in our base logic. This approach helps to explain the meaning of Iris's program specifications at a much higher level of abstraction than was previously possible. We also show that the step-indexed " later " modality of Iris is an essential source of complexity, in that removing it leads to a logical inconsistency. All our results are fully formalized in the Coq proof assistant.},
author = {Krebbers, Robbert and Jung, Ralf and Jourdan, Jacques-henri and Dreyer, Derek and Birkedal, Lars},
pages = {1--29},
title = {{The Essence of Higher-Order Concurrent Separation Logic}}
}
@article{Lahiri2010,
abstract = {It is widely believed that program analysis can be more closely targeted to the needs of programmers if the program is accompanied by further redundant documentation. This may include regression test suites, API protocol usage, and code contracts. To this should be added the largest and most redundant text of all: the previous version of the same program. It is the differences between successive versions of a legacy program already in use which occupy most of a programmer's time. Although differential analysis in the form of equivalence checking has been quite successful for hardware designs, it has not received as much attention in the static program analysis community. This paper briey summarizes the current state of the art in differential static analysis for software, and suggests a number of promising applications. Although regression test generation has often been thought of as the ultimate goal of differential analysis, we highlight several other applications that can be enabled by differential static analysis. This includes equivalence checking, semantic diffing, differential contract checking, summary validation, invariant discovery and better debugging. We speculate that differential static analysis tools have the potential to be widely deployed on the developer's toolbox despite the fundamental stumbling blocks that limit the adoption of static analysis. Copyright 2010 ACM.},
author = {Lahiri, Shuvendu K. and Vaswani, Kapil and Hoare, C. A.R.},
doi = {10.1145/1882362.1882405},
isbn = {9781450304276},
journal = {Proceedings of the FSE/SDP Workshop on the Future of Software Engineering Research, FoSER 2010},
keywords = {Differential analysis,Equivalence checking,Regression testing,Se-mantic diff,Static analysis},
pages = {201--204},
title = {{Differential static analysis: Opportunities, applications, and challenges}},
year = {2010}
}
@article{Swamy2016,
abstract = {We present F , a new language that works both as a proof assis-tant as well as a general-purpose, verification-oriented, effectful programming language. In support of these complementary roles, F is a dependently typed, higher-order, call-by-value language with primitive effects including state, exceptions, divergence and IO. Although primitive, programmers choose the granularity at which to specify effects by equipping each effect with a monadic, predicate transformer semantics. F uses this to efficiently compute weakest preconditions and discharges the resulting proof obligations using a combination of SMT solving and manual proofs. Isolated from the effects, the core of F is a language of pure functions used to write specifications and proof terms—its consistency is maintained by a semantic termination check based on a well-founded order. We evaluate our design on more than 55,000 lines of F we have authored in the last year, focusing on three main case studies. Showcasing its use as a general-purpose programming language, F is programmed (but not verified) in F , and bootstraps in both OCaml and F{\#}. Our experience confirms F 's pay-as-you-go cost model: writing idiomatic ML-like code with no finer specifications imposes no user burden. As a verification-oriented language, our most significant evaluation of F is in verifying several key modules in an implementation of the TLS-1.2 protocol standard. For the modules we considered, we are able to prove more properties, with fewer annotations using F than in a prior verified implementation of TLS-1.2. Finally, as a proof assistant, we discuss our use of F in mechanizing the metatheory of a range of lambda calculi, starting from the simply typed lambda calculus to F $\omega$ and even µF , a sizeable fragment of F itself—these proofs make essential use of F 's flexible combination of SMT automation and constructive proofs, enabling a tactic-free style of programming and proving at a relatively large scale.},
author = {Swamy, Nikhil and Hritcu, Catalin and Keller, Chantal and Rastogi, Aseem and Delignat-lavaud, Antoine and Forest, Simon and Bhargavan, Karthikeyan and Fournet, Cedric and Strub, Pierre-yves and Kohlweiss, Markulf and Zinzindohoue, Jean-karim and Zanella-Beguelin, Sasntiago},
doi = {10.1145/2837614.2837655},
isbn = {978-1-4503-3549-2},
issn = {15232867},
journal = {Popl},
keywords = {,advantage and that copies,all or part of,bear this notice and,classroom use is granted,copies are not made,effectful programming,for profit or commercial,or,or distributed,or hard copies of,permission to make digital,proof assistants,the full citation,this work for personal,verification,without fee provided that},
pages = {256--270},
title = {{Dependent Types and Multi-Monadic Effects in F}},
year = {2016}
}
@article{cibior2015,
abstract = {The machine learning community has recently shown a lot of interest in practical probabilistic programming systems that target the problem of Bayesian inference. Such systems come in different forms, but they all express probabilistic models as computational processes using syntax resembling programming languages. In the functional programming community monads are known to offer a convenient and elegant abstraction for programming with probability distributions, but their use is often limited to very simple inference problems. We show that it is possible to use the monad abstraction to construct probabilistic models for machine learning, while still offering good performance of inference in challenging models.We use a GADT as an underlying representation of a probability distribution and apply Sequential Monte Carlo-based methods to achieve efficient inference.We define a formal semantics via measure theory. We demonstrate a clean and elegant implementation that achieves performance comparable with Anglican, a stateof- the-art probabilistic programming system.},
author = {͆cibior, Adam and Ghahramani, Zoubin and Gordon, Andrew D.},
doi = {10.1145/2804302.2804317},
isbn = {9781450338080},
journal = {Haskell 2015 - Proceedings of the 8th ACM SIGPLAN Symposium on Haskell, co-located with ICFP 2015},
keywords = {Bayesian statistics,Haskell,Monads,Monte Carlo,Probabilistic programming},
pages = {165--176},
title = {{Practical probabilistic programming with monads}},
year = {2015}
}
@article{Cusumano-Towner2019,
abstract = {Although probabilistic programming is widely used for some restricted classes of statistical models, existing systems lack the flexibility and efficiency needed for practical use with more challenging models arising in fields like computer vision and robotics. This paper introduces Gen, a general-purpose probabilistic programming system that achieves modeling flexibility and inference efficiency via several novel language constructs: (i) the generative function interface for encapsulating probabilistic models; (ii) interoperable modeling languages that strike different flexibility/efficiency tradeoffs; (iii) combinators that exploit common patterns of conditional independence; and (iv) an inference library that empowers users to implement efficient inference algorithms at a high level of abstraction. We show that Gen outperforms state-of-the-art probabilistic programming systems, sometimes by multiple orders of magnitude, on diverse problems including object tracking, estimating 3D body pose from a depth image, and inferring the structure of a time series.},
author = {Cusumano-Towner, Marco F. and Lew, Alexander K. and Saad, Feras A. and Mansinghka, Vikash K.},
doi = {10.1145/3314221.3314642},
isbn = {9781450367127},
journal = {Proceedings of the ACM SIGPLAN Conference on Programming Language Design and Implementation (PLDI)},
keywords = {Markov chain Monte Carlo,Probabilistic programming,Sequential Monte Carlo,Variational inference},
pages = {221--236},
title = {{Gen: A general-purpose probabilistic programming system with programmable inference}},
year = {2019}
}
@article{Ellis2019,
abstract = {We present a neural program synthesis approach integrating components which write, execute, and assess code to navigate the search space of possible programs. We equip the search process with an interpreter or a read-eval-print-loop (REPL), which immediately executes partially written programs, exposing their semantics. The REPL addresses a basic challenge of program synthesis: tiny changes in syntax can lead to huge changes in semantics. We train a pair of models, a policy that proposes the new piece of code to write, and a value function that assesses the prospects of the code written so-far. At test time we can combine these models with a Sequential Monte Carlo algorithm. We apply our approach to two domains: synthesizing text editing programs and inferring 2D and 3D graphics programs.},
archivePrefix = {arXiv},
arxivId = {1906.04604},
author = {Ellis, Kevin and Nye, Maxwell and Pu, Yewen and Sosa, Felix and Tenenbaum, Josh and Solar-Lezama, Armando},
eprint = {1906.04604},
title = {{Write, Execute, Assess: Program Synthesis with a REPL}},
url = {http://arxiv.org/abs/1906.04604},
year = {2019}
}
@article{Bastani2017,
abstract = {We present an algorithm for synthesizing a context-free grammar encoding the language of valid program inputs from a set of input examples and blackbox access to the program. Our algorithm addresses shortcomings of existing grammar inference algorithms, which both severely overgeneralize and are prohibitively slow. Our implementation, GLADE, leverages the grammar synthesized by our algorithm to fuzz test programs with structured inputs. We show that GLADE substantially increases the incremental coverage on valid inputs compared to two baseline fuzzers.},
author = {Bastani, Osbert and Sharma, Rahul and Aiken, Alex and Liang, Percy},
doi = {10.1145/3140587.3062349},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {fuzzing,grammar synthesis},
number = {6},
pages = {95--110},
title = {{Synthesizing program input grammars}},
volume = {52},
year = {2017}
}
@article{Song2018,
abstract = {We study the problem of learning a good search policy for combinatorial search spaces. We propose retrospective imitation learning, which, after initial training by an expert, improves itself by learning from $\backslash$textit{\{}retrospective inspections{\}} of its own roll-outs. That is, when the policy eventually reaches a feasible solution in a combinatorial search tree after making mistakes and backtracks, it retrospectively constructs an improved search trace to the solution by removing backtracks, which is then used to further train the policy. A key feature of our approach is that it can iteratively scale up, or transfer, to larger problem sizes than those solved by the initial expert demonstrations, thus dramatically expanding its applicability beyond that of conventional imitation learning. We showcase the effectiveness of our approach on a range of tasks, including synthetic maze solving and combinatorial problems expressed as integer programs.},
archivePrefix = {arXiv},
arxivId = {1804.00846},
author = {Song, Jialin and Lanka, Ravi and Zhao, Albert and Bhatnagar, Aadyot and Yue, Yisong and Ono, Masahiro},
eprint = {1804.00846},
title = {{Learning to Search via Retrospective Imitation}},
url = {http://arxiv.org/abs/1804.00846},
year = {2018}
}
@article{Nye2019,
abstract = {Our goal is to build systems which write code automatically from the kinds of specifications humans can most easily provide, such as examples and natural language instruction. The key idea of this work is that a flexible combination of pattern recognition and explicit reasoning can be used to solve these complex programming problems. We propose a method for dynamically integrating these types of information. Our novel intermediate representation and training algorithm allow a program synthesis system to learn, without direct supervision, when to rely on pattern recognition and when to perform symbolic search. Our model matches the memorization and generalization performance of neural synthesis and symbolic search, respectively, and achieves state-of-the-art performance on a dataset of simple English description-to-code programming problems.},
archivePrefix = {arXiv},
arxivId = {1902.06349},
author = {Nye, Maxwell and Hewitt, Luke and Tenenbaum, Joshua and Solar-Lezama, Armando},
eprint = {1902.06349},
title = {{Learning to Infer Program Sketches}},
url = {http://arxiv.org/abs/1902.06349},
year = {2019}
}
@article{Wu,
author = {Wu, Zhengkai and Johnson, Evan and Bastani, Osbert and Song, Dawn},
isbn = {9781450355728},
keywords = {dynamic symbolic exe-,grammar synthesis,reinforcement learning},
pages = {488--498},
title = {{REINAM : Reinforcement Learning for Input-Grammar Inference}}
}
@article{Chen2019,
abstract = {Neural program synthesis from input-output examples has attracted an increasing interest from both the machine learning and the programming language community. Most existing neural program synthesis approaches employ an encoder-decoder architecture, which uses an encoder to compute the embedding of the given input-output examples, as well as a decoder to generate the program from the embedding following a given syntax. Although such approaches achieve a reasonable performance on simple tasks such as FlashFill, on more complex tasks such as Karel, the state-of-the-art approach can only achieve an accuracy of around 77{\%}. We observe that the main drawback of existing approaches is that the semantic information is greatly under-utilized. In this work, we propose two simple yet principled techniques to better leverage the semantic information, which are execution-guided synthesis and synthesizer ensemble. These techniques are general enough to be combined with any existing encoder-decoder-style neural program synthesizer. Applying our techniques to the Karel dataset, we can boost the accuracy from around 77{\%} to more than 90{\%}.},
author = {Chen, Xinyun and Liu, Chang and Song, Dawn},
journal = {7th International Conference on Learning Representations, ICLR 2019},
pages = {1--15},
title = {{Execution-guided neural program synthesis}},
year = {2019}
}
@article{Balunovic2018,
abstract = {We present a new approach for learning to solve SMT formulas. We phrase the challenge of solving SMT formulas as a tree search problem where at each step a transformation is applied to the input formula until the formula is solved. Our approach works in two phases: first, given a dataset of unsolved formulas we learn a policy that for each formula selects a suitable transformation to apply at each step in order to solve the formula, and second, we synthesize a strategy in the form of a loop-free program with branches. This strategy is an interpretable representation of the policy decisions and is used to guide the SMT solver to decide formulas more efficiently, without requiring any modification to the solver itself and without needing to evaluate the learned policy at inference time. We show that our approach is effective in practice - it solves 17{\%} more formulas over a range of benchmarks and achieves up to 100× runtime improvement over a state-of-the-art SMT solver.},
author = {Balunovi{\'{c}}, Mislav and Bielik, Pavol and Vechev, Martin},
issn = {10495258},
journal = {Advances in Neural Information Processing Systems},
number = {NeurIPS},
pages = {10317--10328},
title = {{Learning to solve SMT formulas}},
volume = {2018-December},
year = {2018}
}
@article{Chen2018,
abstract = {We introduce a learning-based framework to optimize tensor programs for deep learning workloads. Efficient implementations of tensor operators, such as matrix multiplication and high dimensional convolution, are key enablers of effective deep learning systems. However, current systems rely on manually optimized libraries, e.g., cuDNN, that support only a narrow range of server class GPUs. Such reliance limits the applicability of high-level graph optimizations and incurs significant engineering costs when deploying to new hardware targets. We use learning to remove this engineering burden. We learn domain-specific statistical cost models to guide the search of tensor operator implementations over billions of possible program variants. We further accelerate the search using effective model transfer across workloads. Experimental results show that our framework delivers performance that is competitive with state-of-the-art hand-tuned libraries for low-power CPUs, mobile GPUs, and server-class GPUs.},
archivePrefix = {arXiv},
arxivId = {1805.08166},
author = {Chen, Tianqi and Zheng, Lianmin and Yan, Eddie and Jiang, Ziheng and Moreau, Thierry and Ceze, Luis and Guestrin, Carlos and Krishnamurthy, Arvind},
eprint = {1805.08166},
issn = {10495258},
journal = {Advances in Neural Information Processing Systems},
number = {NeurIPS},
pages = {3389--3400},
title = {{Learning to optimize tensor programs}},
volume = {2018-December},
year = {2018}
}
@article{Wang2019,
abstract = {Constraint-solving is an expensive phase for scenario finding tools. It has been widely observed that there is no single 'dominant' SAT solver that always wins in every case; instead, the performance of different solvers varies by cases. Some SAT solvers perform particularly well for certain tasks while other solvers perform well for other tasks. In this paper, we propose an approach that uses machine learning techniques to automatically select a SAT solver for one of the widely used scenario finding tools, i.e. Alloy Analyzer, based on the features extracted from a given model. The goal is to choose the best SAT solver for a given model to minimize the expensive constraint solving time. We extract features from three different levels, i.e. the Alloy source code level, the Kodkod formula level and the boolean formula level. The experimental results show that our portfolio approach outperforms the best SAT solver by 30{\%} as well as the baseline approach by 128{\%} where users randomly select a solver for any given model.},
author = {Wang, Wenxi and Wang, Kaiyuan and Zhang, Mengshi and Khurshid, Sarfraz},
doi = {10.1109/ICST.2019.00031},
isbn = {9781728117355},
journal = {Proceedings - 2019 IEEE 12th International Conference on Software Testing, Verification and Validation, ICST 2019},
keywords = {Alloy Analyzer,Machine learning,SAT solver},
pages = {228--239},
publisher = {IEEE},
title = {{Learning to optimize the alloy analyzer}},
year = {2019}
}
@article{Cruciani2019,
abstract = {Test suite reduction approaches aim at decreasing software regression testing costs by selecting a representative subset from large-size test suites. Most existing techniques are too expensive for handling modern massive systems and moreover depend on artifacts, such as code coverage metrics or specification models, that are not commonly available at large scale. We present a family of novel very efficient approaches for similaritybased test suite reduction that apply algorithms borrowed from the big data domain together with smart heuristics for finding an evenly spread subset of test cases. The approaches are very general since they only use as input the test cases themselves (test source code or command line input).We evaluate four approaches in a version that selects a fixed budget B of test cases, and also in an adequate version that does the reduction guaranteeing some fixed coverage. The results show that the approaches yield a fault detection loss comparable to state-of-the-art techniques, while providing huge gains in terms of efficiency. When applied to a suite of more than 500K real world test cases, the most efficient of the four approaches could select B test cases (for varying B values) in less than 10 seconds.},
author = {Cruciani, Emilio and Miranda, Breno and Verdecchia, Roberto and Bertolino, Antonia},
doi = {10.1109/ICSE.2019.00055},
isbn = {9781728108698},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {Clustering,Random projection,Similarity-based testing,Software testing,Test suite reduction},
pages = {419--429},
title = {{Scalable Approaches for Test Suite Reduction}},
volume = {2019-May},
year = {2019}
}
@article{Banerjee2019,
abstract = {NullPointerExceptions (NPEs) are a key source of crashes in modern Java programs. Previous work has shown how such errors can be prevented at compile time via code annotations and pluggable type checking. However, such systems have been difficult to deploy on large-scale software projects, due to significant build-time overhead and / or a high annotation burden. This paper presents NullAway, a new type-based null safety checker for Java that overcomes these issues. NullAway has been carefully engineered for low overhead, so it can run as part of every build. Further, NullAway reduces annotation burden through targeted unsound assumptions, aiming for no false negatives in practice on checked code. Our evaluation shows that NullAway has significantly lower build-time overhead (1.15×) than comparable tools (2.8-5.1×). Further, on a corpus of production crash data for widely-used Android apps built with NullAway, remaining NPEs were due to unchecked third-party libraries (64{\%}), deliberate error suppressions (17{\%}), or reflection and other forms of post-checking code modification (17{\%}), never due to NullAways unsound assumptions for checked code.},
archivePrefix = {arXiv},
arxivId = {1907.02127},
author = {Banerjee, Subarno and Clapp, Lazaro and Sridharan, Manu},
doi = {10.1145/3338906.3338919},
eprint = {1907.02127},
isbn = {9781450355728},
journal = {ESEC/FSE 2019 - Proceedings of the 2019 27th ACM Joint Meeting European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
keywords = {Null safety,Pluggable type systems,Static analysis,Type systems},
pages = {740--750},
title = {{NullAway: Practical type-based null safety for Java}},
year = {2019}
}
@article{Su2019,
abstract = {Modern software packages have become increasingly complex with millions of lines of code and references to many external libraries. Redundant operations are a common performance limiter in these code bases. Missed compiler optimization opportunities, inappropriate data structure and algorithm choices, and developers' inattention to performance are some common reasons for the existence of redundant operations. Developers mainly depend on compilers to eliminate redundant operations. However, compilers' static analysis often misses optimization opportunities due to ambiguities and limited analysis scope; automatic optimizations to algorithmic and data structural problems are out of scope. We develop LoadSpy, a whole-program profiler to pinpoint redundant memory load operations, which are often a symptom of many redundant operations. The strength of LoadSpy exists in identifying and quantifying redundant load operations in programs and associating the redundancies with program execution contexts and scopes to focus developers' attention on problematic code. LoadSpy works on fully optimized binaries, adopts various optimization techniques to reduce its overhead, and provides a rich graphic user interface, which make it a complete developer tool. Applying LoadSpy showed that a large fraction of redundant loads is common in modern software packages despite highest levels of automatic compiler optimizations. Guided by LoadSpy, we optimize several well-known benchmarks and real-world applications, yielding significant speedups.},
archivePrefix = {arXiv},
arxivId = {1902.05462},
author = {Su, Pengfei and Wen, Shasha and Yang, Hailong and Chabbi, Milind and Liu, Xu},
doi = {10.1109/ICSE.2019.00103},
eprint = {1902.05462},
isbn = {9781728108698},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {Performance measurement,Software optimization,Tools,Whole-program profiling},
pages = {982--993},
title = {{Redundant Loads: A Software Inefficiency Indicator}},
volume = {2019-May},
year = {2019}
}
@article{Wang2019a,
abstract = {Crowdtesting has become an effective alternative to traditional testing, especially for mobile applications. However, crowdtesting is hard to manage in nature. Given the complexity of mobile applications and unpredictability of distributed crowdtesting processes, it is difficult to estimate (a) remaining number of bugs yet to be detected or (b) required cost to find those bugs. Experience-based decisions may result in ineffective crowdtesting processes, e.g., there is an average of 32{\%} wasteful spending in current crowdtesting practices. This paper aims at exploring automated decision support to effectively manage crowdtesting processes. It proposes an approach named ISENSE which applies incremental sampling technique to process crowdtesting reports arriving in chronological order, organizes them into fixed-size groups as dynamic inputs, and predicts two test completion indicators in an incremental manner. The two indicators are: 1) total number of bugs predicted with Capture-ReCapture model, and 2) required test cost for achieving certain test objectives predicted with AutoRegressive Integrated Moving Average model. The evaluation of ISENSE is conducted on 46,434 reports of 218 crowdtesting tasks from one of the largest crowdtesting platforms in China. Its effectiveness is demonstrated through two application studies for automating crowdtesting management and semi-automation of task closing trade-off analysis. The results show that ISENSE can provide managers with greater awareness of testing progress to achieve cost-effectiveness gains of crowdtesting. Specifically, a median of 100{\%} bugs can be detected with 30{\%} saved cost based on the automated close prediction.},
author = {Wang, Junjie and Yang, Ye and Krishna, Rahul and Menzies, Tim and Wang, Qing},
doi = {10.1109/ICSE.2019.00097},
isbn = {9781728108698},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {Crowdtesting,automated close prediction,crowdtesting management,test completion},
pages = {912--923},
title = {{ISENSE: Completion-Aware Crowdtesting Management}},
volume = {2019-May},
year = {2019}
}
@article{Qiu2019,
abstract = {Sustained participation by contributors in opensource software is critical to the survival of open-source projects and can provide career advancement benefits to individual contributors. However, not all contributors reap the benefits of open-source participation fully, with prior work showing that women are particularly underrepresented and at higher risk of disengagement. While many barriers to participation in open-source have been documented in the literature, relatively little is known about how the social networks that open-source contributors form impact their chances of long-term engagement. In this paper we report on a mixed-methods empirical study of the role of social capital (i.e., the resources people can gain from their social connections) for sustained participation by women and men in open-source GitHub projects. After combining survival analysis on a large, longitudinal data set with insights derived from a user survey, we confirm that while social capital is beneficial for prolonged engagement for both genders, women are at disadvantage in teams lacking diversity in expertise.},
author = {Qiu, Huilian Sophie and Nolte, Alexander and Brown, Anita and Serebrenik, Alexander and Vasilescu, Bogdan},
doi = {10.1109/ICSE.2019.00078},
isbn = {9781728108698},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {gender,open source software,social capital},
pages = {688--699},
title = {{Going Farther Together: The Impact of Social Capital on Sustained Participation in Open Source}},
volume = {2019-May},
year = {2019}
}
@article{Wei2019,
abstract = {The heavily fragmented Android ecosystem has induced various compatibility issues in Android apps. The search space for such fragmentation-induced compatibility issues (FIC issues) is huge, comprising three dimensions: device models, Android OS versions, and Android APIs. FIC issues, especially those arising from device models, evolve quickly with the frequent release of new device models to the market. As a result, an automated technique is desired to maintain timely knowledge of such FIC issues, which are mostly undocumented. In this paper, we propose such a technique, PIVOT, that automatically learns API-device correlations of FIC issues from existing Android apps. PIVOT extracts and prioritizes API-device correlations from a given corpus of Android apps. We evaluated PIVOT with popular Android apps on Google Play. Evaluation results show that PIVOT can effectively prioritize valid API-device correlations for app corpora collected at different time. Leveraging the knowledge in the learned API-device correlations, we further conducted a case study and successfully uncovered ten previously-undetected FIC issues in open-source Android apps.},
author = {Wei, Lili and Liu, Yepang and Cheung, Shing Chi},
doi = {10.1109/ICSE.2019.00094},
isbn = {9781728108698},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {Android fragmentation,compatibility,learning,static analysis},
pages = {878--888},
title = {{PIVOT: Learning API-Device Correlations to Facilitate Android Compatibility Issue Detection}},
volume = {2019-May},
year = {2019}
}
@article{Saini2019,
abstract = {Current research in clone detection suffers from poor ecosystems for evaluating precision of clone detection tools. Corpora of labeled clones are scarce and incomplete, making evaluation labor intensive and idiosyncratic, and limiting intertool comparison. Precision-assessment tools are simply lacking. We present a semiautomated approach to facilitate precision studies of clone detection tools. The approach merges automatic mechanisms of clone classification with manual validation of clone pairs. We demonstrate that the proposed automatic approach has a very high precision and it significantly reduces the number of clone pairs that need human validation during precision experiments. Moreover, we aggregate the individual effort of multiple teams into a single evolving dataset of labeled clone pairs, creating an important asset for software clone research.},
archivePrefix = {arXiv},
arxivId = {1812.05195},
author = {Saini, Vaibhav and Farmahinifarahani, Farima and Lu, Yadong and Yang, Di and Martins, Pedro and Sajnani, Hitesh and Baldi, Pierre and Lopes, Cristina V.},
doi = {10.1109/ICSE.2019.00023},
eprint = {1812.05195},
isbn = {9781728108698},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {Clone Detection,Machine learning,Open source labeled datasets,Precision Evaluation},
pages = {49--59},
title = {{Towards Automating Precision Studies of Clone Detectors}},
volume = {2019-May},
year = {2019}
}
@article{Alur2013,
abstract = {The classical formulation of the program-synthesis problem is to find a program that meets a correctness specification given as a logical formula. Recent work on program synthesis and program optimization illustrates many potential benefits of allowing the user to supplement the logical specification with a syntactic template that constrains the space of allowed implementations. Our goal is to identify the core computational problem common to these proposals in a logical framework. The input to the syntax-guided synthesis problem (SyGuS) consists of a background theory, a semantic correctness specification for the desired program given by a logical formula, and a syntactic set of candidate implementations given by a grammar. The computational problem then is to find an implementation from the set of candidate expressions so that it satisfies the specification in the given theory. We describe three different instantiations of the counter-example-guided-inductive-synthesis (CEGIS) strategy for solving the synthesis problem, report on prototype implementations, and present experimental results on an initial set of benchmarks. {\textcopyright} 2013 FMCAD Inc.},
author = {Alur, Rajeev and Bodik, Rastislav and Juniwal, Garvit and Martin, Milo M.K. and Raghothaman, Mukund and Seshia, Sanjit A. and Singh, Rishabh and Solar-Lezama, Armando and Torlak, Emina and Udupa, Abhishek},
doi = {10.1109/fmcad.2013.6679385},
isbn = {9780983567837},
journal = {2013 Formal Methods in Computer-Aided Design, FMCAD 2013},
pages = {1--8},
publisher = {FMCAD Inc.},
title = {{Syntax-guided synthesis}},
year = {2013}
}
@article{Sommarive2010,
author = {Sommarive, Via and Report, Technical and Tin, Truong Duy},
number = {November},
title = {{Automated Parameter Configuration for an SMT Solver Duy Tin Truong}},
year = {2010}
}
@article{Pradel2018,
abstract = {Natural language elements in source code, e.g., the names of variables and functions, convey useful information. However, most existing bug detection tools ignore this information and therefore miss some classes of bugs. The few existing name-based bug detection approaches reason about names on a syntactic level and rely on manually designed and tuned algorithms to detect bugs. This paper presents DeepBugs, a learning approach to name-based bug detection, which reasons about names based on a semantic representation and which automatically learns bug detectors instead of manually writing them. We formulate bug detection as a binary classification problem and train a classifier that distinguishes correct from incorrect code. To address the challenge that effectively learning a bug detector requires examples of both correct and incorrect code, we create likely incorrect code examples from an existing corpus of code through simple code transformations. A novel insight learned from our work is that learning from artificially seeded bugs yields bug detectors that are effective at finding bugs in real-world code. We implement our idea into a framework for learning-based and name-based bug detection. Three bug detectors built on top of the framework detect accidentally swapped function arguments, incorrect binary operators, and incorrect operands in binary operations. Applying the approach to a corpus of 150,000 JavaScript files yields bug detectors that have a high accuracy (between 89{\%} and 95{\%}), are very efficient (less than 20 milliseconds per analyzed file), and reveal 102 programming mistakes (with 68{\%} true positive rate) in real-world code.},
archivePrefix = {arXiv},
arxivId = {arXiv:1805.11683v1},
author = {Pradel, Michael and Darmstadt, T U and Sen, Koushik},
doi = {10.1145/3276517},
eprint = {arXiv:1805.11683v1},
journal = {Proceedings of the ACM on Programming Languages},
number = {OOPSLA},
pages = {1--25},
title = {{DeepBugs: a learning approach to name-based bug detection}},
volume = {2},
year = {2018}
}
@article{Margolies,
archivePrefix = {arXiv},
arxivId = {arXiv:1410.6935v1},
author = {Margolies, Robert and Gorlatova, Maria and Sarik, John and Kinget, Peter and Kymissis, Ioannis and Zussman, Gil},
eprint = {arXiv:1410.6935v1},
isbn = {9781450320788},
keywords = {,embedded systems,interdisciplinary learning,internet of things,less networking,project-based learning,wire-},
number = {c},
title = {{No Title}},
volume = {1}
}
@article{Tufano2019,
abstract = {Millions of open-source projects with numerous bug fixes are available in code repositories. This proliferation of software development histories can be leveraged to learn how to fix common programming bugs. To explore such a potential, we perform an empirical study to assess the feasibility of using Neural Machine Translation techniques for learning bug-fixing patches for real defects. First, we mine millions of bug-fixes from the change histories of projects hosted on GitHub, in order to extract meaningful examples of such bug-fixes. Next, we abstract the buggy and corresponding fixed code, and use them to train an Encoder-Decoder model able to translate buggy code into its fixed version. In our empirical investigation we found that such a model is able to fix thousands of unique buggy methods in the wild. Overall, this model is capable of predicting fixed patches generated by developers in 9-50{\%} of the cases, depending on the number of candidate patches we allow it to generate. Also, the model is able to emulate a variety of different Abstract Syntax Tree operations and generate candidate patches in a split second.},
author = {Tufano, Michele and Watson, Cody and Bavota, Gabriele and Penta, Massimiliano Di and White, Martin and Poshyvanyk, Denys},
doi = {10.1145/3340544},
isbn = {9781450359375},
issn = {1049331X},
journal = {ACM Transactions on Software Engineering and Methodology},
keywords = {,acm reference format,bug-fixes,cody watson,gabriele bavota,massimiliano di penta,michele tufano,neural machine translation},
number = {4},
pages = {1--29},
title = {{An Empirical Study on Learning Bug-Fixing Patches in the Wild via Neural Machine Translation}},
volume = {28},
year = {2019}
}
@article{Song2015,
abstract = {In June 2014, DARPA launched the Cyber Grand Challenge (CGC) to spur innovation in fully automated software vulnerability analysis and repair. The competitors' automated systems evaluated challenges in the CGC Qualifying Event, with the top seven including the University of Idaho's Center for Secure and Dependable Systems team moving on to the fully automated capture-the-flag competition, which will be held in August 2016.},
author = {Song, Jia and Alves-Foss, Jim},
doi = {10.1109/MSP.2016.14},
issn = {15584046},
journal = {IEEE Security and Privacy},
keywords = {,DARPA,cyber competition,cybersecurity,defense,military,security,software},
number = {6},
pages = {76--81},
title = {{The DARPA Cyber Grand Challenge: A Competitor's Perspective, Part 2}},
volume = {14},
year = {2015}
}
@article{Stampoulis2018,
author = {Stampoulis, Antonis and Chlipala, Adam},
keywords = {higher-order logic programming, programming langua},
number = {September},
title = {{Prototyping a Functional Language using Higher-Order Logic Programming : A Functional Pearl on Learning the Ways of $\lambda$ Prolog / Makam}},
volume = {2},
year = {2018}
}
@article{Breitner2018,
author = {Breitner, Joachim and Spector-zabusky, Antal and Li, Yao and Rizkallah, Christine and Wiegley, John and Weirich, Stephanie},
keywords = {Coq, Haskell, verification},
number = {September},
title = {{Ready , Set , Verify ! Applying hs-to-coq to Real-World Haskell Code ( Experience Report )}},
volume = {2},
year = {2018}
}
@article{Wei,
author = {Wei, Jiayi and Chen, Jia},
isbn = {9781450355735},
keywords = {acm reference format,availability vulnerability,complexity testing,fuzzing,genetic,optimal program synthesis,performance bug,programming},
title = {{Singularity : Pattern Fuzzing for Worst Case Complexity}}
}
@article{Dutta2018,
abstract = {Probabilistic programming systems (PP systems) allow developers to model stochastic phenomena and perform efficient inference on the models. The number and adoption of probabilistic programming systems is growing significantly. However, there is no prior study of bugs in these systems and no methodology for systematically testing PP systems. Yet, testing PP systems is highly non-trivial, especially when they perform approximate inference. In this paper, we characterize 118 previously reported bugs in three open-source PP systems-Edward, Pyro and Stan-and propose ProbFuzz, an extensible system for testing PP systems. Prob-Fuzz allows a developer to specify templates of probabilistic models, from which it generates concrete probabilistic programs and data for testing. ProbFuzz uses language-specific translators to generate these concrete programs, which use the APIs of each PP system. ProbFuzz finds potential bugs by checking the output from running the generated programs against several oracles, including an accuracy checker. Using ProbFuzz, we found 67 previously unknown bugs in recent versions of these PP systems. Developers already accepted 51 bug fixes that we submitted to the three PP systems, and their underlying systems, PyTorch and TensorFlow. CCS CONCEPTS • Software and its engineering → Software testing;},
author = {Dutta, Saikat and Legunsen, Owolabi and Huang, Zixin and Misailovic, Sasa},
doi = {10.1145/3236024.3236057},
isbn = {9781450355735},
journal = {ESEC/FSE 2018 - Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
keywords = {Probabilistic programming languages,Software Testing},
pages = {574--586},
title = {{Testing probabilistic programming systems}},
year = {2018}
}
@article{Arcuri2011,
abstract = {R},
author = {Arcuri, Andrea and Briand, Lionel},
doi = {10.1145/1985793.1985795},
isbn = {9781450304450},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {bonferroni adjustment,confidence interval,effect size,non-parametric test,parametric test,statistical difference,survey,systematic review},
pages = {1--10},
title = {{A practical guide for using statistical tests to assess randomized algorithms in software engineering}},
year = {2011}
}
@article{Lemieux2018,
abstract = {Performance problems in software can arise unexpectedly when programs are provided with inputs that exhibit worst-case behavior. A large body of work has focused on diagnosing such problems via statistical profiling techniques. But how does one find these inputs in the first place? We present PerfFuzz, a method to automatically generate inputs that exercise pathological behavior across program locations, without any domain knowledge. Perf-Fuzz generates inputs via feedback-directed mutational fuzzing. Unlike previous approaches that attempt to maximize only a scalar characteristic such as the total execution path length, PerfFuzz uses multi-dimensional feedback and independently maximizes execution counts for all program locations. This enables PerfFuzz to (1) find a variety of inputs that exercise distinct hot spots in a program and (2) generate inputs with higher total execution path length than previous approaches by escaping local maxima. PerfFuzz is also effective at generating inputs that demonstrate algorithmic complexity vulnerabilities. We implement PerfFuzz on top of AFL, a popular coverage-guided fuzzing tool, and evaluate PerfFuzz on four real-world C programs typically used in the fuzzing literature. We find that PerfFuzz outperforms prior work by generating inputs that exercise the most-hit program branch 5× to 69× times more, and result in 1.9× to 24.7× longer total execution paths. CCS CONCEPTS • Software and its engineering → Software testing and de-bugging; Software performance;},
author = {Lemieux, Caroline and Padhye, Rohan and Sen, Koushik and Song, Dawn},
doi = {10.1145/3213846.3213874},
isbn = {9781450356992},
journal = {ISSTA 2018 - Proceedings of the 27th ACM SIGSOFT International Symposium on Software Testing and Analysis},
keywords = {Algorithmic complexity,Fuzz testing,Performance,Worst-case},
pages = {254--265},
title = {{PerfFuzz: Automatically generating pathological inputs}},
year = {2018}
}
@article{Long2016,
abstract = {We present Prophet, a novel patch generation system that works with a set of successful human patches obtained from open-source software repositories to learn a probabilistic, application-independent model of correct code. It generates a space of candi-date patches, uses the model to rank the candidate patches in order of likely correctness, and validates the ranked patches against a suite of test cases to find correct patches. Experimental results show that, on a benchmark set of 69 real-world defects drawn from eight open-source projects, Prophet significantly outperforms the previous state-of-the-art patch generation system.},
author = {Long, Fan and Rinard, Martin},
doi = {10.1145/2837614.2837617},
isbn = {9781450335492},
issn = {15232867},
journal = {ACM SIGPLAN Notices},
keywords = {Code correctness model,Learning correct code,Program repair},
number = {1},
pages = {298--312},
title = {{Automatic patch generation by learning correct code}},
volume = {51},
year = {2016}
}
@article{Fan2019,
abstract = {Detecting memory leak at industrial scale is still not well addressed, in spite of the tremendous effort from both industry and academia in the past decades. Existing work suffers from an unresolved paradox-a highly precise analysis limits its scalability and an imprecise one seriously hurts its precision or recall. In this work, we present SMOKE, a staged approach to resolve this paradox. In the first stage, instead of using a uniform precise analysis for all paths, we use a scalable but imprecise analysis to compute a succinct set of candidate memory leak paths. In the second stage, we leverage a more precise analysis to verify the feasibility of those candidates. The first stage is scalable, due to the design of a new sparse program representation, the use-flow graph (UFG), that models the problem as a polynomial-time state analysis. The second stage analysis is both precise and efficient, due to the smaller number of candidates and the design of a dedicated constraint solver. Experimental results show that SMOKE can finish checking industrial-sized projects, up to 8MLoC, in forty minutes with an average false positive rate of 24.4{\%}. Besides, SMOKE is significantly faster than the state-of-the-art research techniques as well as the industrial tools, with the speedup ranging from 5.2X to 22.8X. In the twenty-nine mature and extensively checked benchmark projects, SMOKE has discovered thirty previously-unknown memory leaks which were confirmed by developers, and one even assigned a CVE ID.},
author = {Fan, Gang and Wu, Rongxin and Shi, Qingkai and Xiao, Xiao and Zhou, Jinguo and Zhang, Charles},
doi = {10.1109/icse.2019.00025},
pages = {72--82},
title = {{SMOKE: Scalable Path-Sensitive Memory Leak Detection for Millions of Lines of Code}},
volume = {2},
year = {2019}
}
@article{Motwani2019,
abstract = {Software specifications often use natural language to describe the desired behavior, but such specifications are difficult to verify automatically. We present Swami, an automated technique that extracts test oracles and generates executable tests from structured natural language specifications. Swami focuses on exceptional behavior and boundary conditions that often cause field failures but that developers often fail to manually write tests for. Evaluated on the official JavaScript specification (ECMA-262), 98.4{\%} of the tests Swami generated were precise to the specification. Using Swami to augment developer-written test suites improved coverage and identified 1 previously unknown defect and 15 missing JavaScript features in Rhino, 1 previously unknown defect in Node.js, and 18 semantic ambiguities in the ECMA-262 specification.},
author = {Motwani, Manish and Brun, Yuriy},
doi = {10.1109/icse.2019.00035},
pages = {188--199},
title = {{Automatically Generating Precise Oracles from Structured Natural Language Specifications}},
year = {2019}
}
@article{Heo2019,
author = {Heo, Kihong and Oh, Hakjoo and Yang, Hongseok},
doi = {10.1109/icse.2019.00027},
pages = {94--104},
title = {{Resource-Aware Program Analysis Via Online Abstraction Coarsening}},
year = {2019}
}
@article{Rolando2018,
abstract = {{\textcopyright} 2018 ACM. Background: Statistical concepts and techniques are often applied incorrectly, even in mature disciplines such as medicine or psychology. Surprisingly, there are very few works that study statistical problems in software engineering (SE). Aim: Assess the existence of statistical errors in SE experiments. Method: Compile the most common statistical errors in experimental disciplines. Survey experiments published in ICSE to assess whether errors occur in high quality SE publications. Results: The same errors as identified in others disciplines were found in ICSE experiments, where 30{\%} of the reviewed papers included several error types such as: a) missing statistical hypotheses, b) missing sample size calculation, c) failure to assess statistical test assumptions, and d) uncorrected multiple testing. This rather large error rate is greater for research papers where experiments are confined to the validation section. The origin of the errors can be traced back to: a) researchers not having sufficient statistical training, and, b) a profusion of exploratory research. Conclusions: This paper provides preliminary evidence that SE research suffers from the same statistical problems as other experimental disciplines. However, the SE community appears to be unaware of any shortcomings in its experiments, whereas other disciplines work hard to avoid these threats. Further research is necessary to find the underlying causes and set up corrective measures, but there are some potentially effective actions and are a priori easy to implement: a) improve the statistical training of SE researchers, and b) enforce quality assessment and reporting guidelines in SE publications.},
author = {Rolando, P. Reyes Ch and Dieste, Oscar and Efra{\'{i}}n, R. Fonseca C. and Juristo, Natalia},
doi = {10.1145/3180155.3180161},
isbn = {9781450356381},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {Literature review,Prevalence,Statistical errors,Survey},
pages = {1195--1206},
title = {{Statistical errors in software engineering experiments: A preliminary literature review}},
year = {2018}
}
@article{Hellendoorn2018,
abstract = {Dynamically typed languages such as JavaScript and Python are increasingly popular, yet static typing has not been totally eclipsed: Python now supports type annotations and languages like TypeScript offer a middle-ground for JavaScript: a strict superset of JavaScript, to which it transpiles, coupled with a type system that permits partially typed programs. However, static typing has a cost: adding annotations, reading the added syntax, and wrestling with the type system to fix type errors. Type inference can ease the transition to more statically typed code and unlock the benefits of richer compile-time information, but is limited in languages like JavaScript as it cannot soundly handle duck-typing or runtime evaluation via eval. We propose DeepTyper, a deep learning model that understands which types naturally occur in certain contexts and relations and can provide type suggestions, which can often be verified by the type checker, even if it could not infer the type initially. DeepTyper, leverages an automatically aligned corpus of tokens and types to accurately predict thousands of variable and function type annotations. Furthermore, we demonstrate that context is key in accurately assigning these types and introduce a technique to reduce overfitting on local cues while highlighting the need for further improvements. Finally, we show that our model can interact with a compiler to provide more than 4,000 additional type annotations with over 95{\%} precision that could not be inferred without the aid of DeepTyper. CCS CONCEPTS • Software and its engineering → Software notations and tools; Automated static analysis; • Theory of computation → Type structures; KEYWORDS},
author = {Hellendoorn, Vincent J. and Bird, Christian and Barr, Earl T. and Allamanis, Miltiadis},
doi = {10.1145/3236024.3236051},
isbn = {9781450355735},
journal = {ESEC/FSE 2018 - Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
keywords = {Deep Learning,Naturalness,Type Inference},
pages = {152--162},
title = {{Deep learning type inference}},
year = {2018}
}
@article{Foo2018,
abstract = {Software engineering practices have evolved to the point where a developer writing a new application today doesn't start from scratch, but reuses a number of open source libraries and components. These third-party libraries evolve independently of the applications in which they are used, and may not maintain stable interfaces as bugs and vulnerabilities in them are fixed. This in turn causes API incompatibilities in downstream applications which must be manually resolved. Oversight here may manifest in many ways, from test failures to crashes at runtime. To address this problem , we present a static analysis for automatically and efficiently checking if a library upgrade introduces an API incompatibility. Our analysis does not rely on reported version information from library developers, and instead computes the actual differences between methods in libraries across different versions. The analysis is scalable, enabling real-time diff queries involving arbitrary pairs of library versions. It supports a vulnerability remediation product which suggests library upgrades automatically and is lightweight enough to be part of a continuous integration/delivery (CI/CD) pipeline. To evaluate the effectiveness of our approach, we determine semantic versioning adherence of a corpus of open source libraries taken from Maven Central, PyPI, and RubyGems. We find that on average, 26{\%} of library versions are in violation of semantic versioning. We also analyze a collection of popular open source projects from GitHub to determine if we can automatically update libraries in them without causing API incompatibilities. Our results indicate that we can suggest upgrades automatically for 10{\%} of the libraries.},
author = {Foo, Darius and Chua, Hendy and Yeo, Jason and Ang, Ming Yi and Sharma, Asankhaya},
doi = {10.1145/3236024.3275535},
isbn = {9781450355735},
journal = {ESEC/FSE 2018 - Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
keywords = {api diffs,automated remediation,call graphs,library upgrades,semantic versioning},
pages = {791--796},
title = {{Efficient static checking of library updates}},
year = {2018}
}
@article{Hellendoorn2017,
abstract = {Current statistical language modeling techniques, including deep-learning based models, have proven to be quite effective for source code. We argue here that the special properties of source code can be exploited for further improvements. In this work, we enhance established language modeling approaches to handle the special challenges of modeling source code, such as: frequent changes, larger, changing vocabularies, deeply nested scopes, etc. We present a fast, nested language modeling toolkit specifically designed for software, with the ability to add {\&} remove text, and mix {\&} swap out many models. Specifically, we improve upon prior cache-modeling work and present a model with a much more expansive, multi-level notion of locality that we show to be well-suited for modeling software. We present results on varying corpora in comparison with traditional N-gram, as well as RNN, and LSTM deep-learning language models, and release all our source code for public use. Our evaluations suggest that carefully adapting N-gram models for source code can yield performance that surpasses even RNN and LSTM based deep-learning models. CCS CONCEPTS • Software and its engineering → Software maintenance tools; KEYWORDS naturalness, language models, software tools ACM Reference format: Vincent},
author = {Hellendoorn, Vincent J. and Devanbu, Premkumar},
doi = {10.1145/3106237.3106290},
isbn = {9781450351058},
journal = {Proceedings of the ACM SIGSOFT Symposium on the Foundations of Software Engineering},
keywords = {Language models,Naturalness,Software tools},
pages = {763--773},
title = {{Are deep neural networks the best choice for modeling source code?}},
volume = {Part F1301},
year = {2017}
}
@article{VanTonder2018,
abstract = {Static analysis tools have demonstrated effectiveness at finding bugs in real world code. Such tools are increasingly widely adopted to improve software quality in practice. Automated Program Repair (APR) has the potential to further cut down on the cost of improving software quality. However, there is a disconnect between these effective bug-finding tools and APR. Recent advances in APR rely on test cases, making them inapplicable to newly discovered bugs or bugs difficult to test for deterministically (like memory leaks). Additionally, the quality of patches generated to satisfy a test suite is a key challenge. We address these challenges by adapting advances in practical static analysis and verification techniques to enable a new technique that finds and then accurately fixes real bugs without test cases. We present a new automated program repair technique using Separation Logic. At a high-level, our technique reasons over semantic effects of existing program fragments to fix faults related to general pointer safety properties: resource leaks, memory leaks, and null dereferences. The procedure automatically translates identified fragments into source-level patches, and verifies patch correctness with respect to reported faults. In this work we conduct the largest study of automatically fixing undiscovered bugs in real-world code to date. We demonstrate our approach by correctly fixing 55 bugs, including 11 previously undiscovered bugs, in 11 real-world projects. CCS CONCEPTS • Software and its engineering → Error handling and recovery ; Maintaining software; Software defect analysis;},
author = {van Tonder, Rijnard and Goues, Claire Le},
doi = {10.1145/3180155.3180250},
isbn = {9781450356381},
keywords = {2018,acm reference format,automated program repair,claire le goues,rijnard van tonder and,separation logic,static automated program re-},
pages = {151--162},
title = {{Static automated program repair for heap properties}},
year = {2018}
}
@article{Shi2018,
abstract = {{\textcopyright} 2018 Association for Computing Machinery. Test-suite reduction (TSR) speeds up regression testing by removing redundant tests from the test suite, thus running fewer tests in the future builds. To decide whether to use TSR or not, a developer needs some way to predict howwell the reduced test suite will detect real faults in the future compared to the original test suite. Prior research evaluated the cost of TSR using only program versions with seeded faults, but such evaluations do not explicitly predict the effectiveness of the reduced test suite in future builds. We perform the first extensive study of TSR using real test failures in (failed) builds that occurred for real code changes. We analyze 1478 failed builds from 32 GitHub projects that run their tests on Travis. Each failed build can have multiple faults, so we propose a family of mappings from test failures to faults. We use these mappings to compute Failed-Build Detection Loss (FBDL), the percentage of failed builds where the reduced test suite misses to detect all the faults detected by the original test suite. We find that FBDL can be up to 52.2{\%}, which is higher than suggested by traditional TSR metrics. Moreover, traditional TSR metrics are not good predictors of FBDL, making it difficult for developers to decide whether to use reduced test suites.},
author = {Shi, August and Gyori, Alex and Mahmood, Suleman and Zhao, Peiyuan and Marinov, Darko},
doi = {10.1145/3213846.3213875},
isbn = {9781450356992},
journal = {ISSTA 2018 - Proceedings of the 27th ACM SIGSOFT International Symposium on Software Testing and Analysis},
keywords = {Continuous integration,Regression testing,Test-suite reduction},
number = {1},
pages = {84--94},
title = {{Evaluating test-suite reduction in real software evolution}},
year = {2018}
}
@article{Saha2018,
abstract = {We present Bugs.jar, a large-scale dataset for research in automated debugging, patching, and testing of Java programs. Bugs.jar is comprised of 1,158 bugs and patches, drawn from 8 large, popular open-source Java projects, spanning 8 diverse and prominent application categories. It is an order of magnitude larger than Defects4J, the only other dataset in its class. We discuss the methodology used for constructing Bugs.jar, the representation of the dataset, several use-cases, and an illustration of three of the use-cases through the application of 3 specific tools on Bugs.jar, namely our own tool, Elixir, and two third-party tools, Ekstazi and JaCoCo.},
author = {Saha, Ripon K. and Lyu, Yingjun and Lam, Wing and Yoshida, Hiroaki and Prasad, Mukul R.},
doi = {10.1145/3196398.3196473},
isbn = {9781450357166},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {Java programs,large-scale dataset,reproducible bugs},
pages = {10--13},
title = {{Bugs.jar: A large-scale, diverse dataset of real-world Java bugs}},
year = {2018}
}
@article{Gyimesi2019,
abstract = {JavaScript is a popular programming language that is also error-prone due to its asynchronous, dynamic, and loosely-typed nature. In recent years, numerous techniques have been proposed for analyzing and testing JavaScript applications. However, our survey of the literature in this area revealed that the proposed techniques are often evaluated on different datasets of programs and bugs. The lack of a commonly used benchmark limits the ability to perform fair and unbiased comparisons for assessing the efficacy of new techniques. To fill this gap, we propose BUGSJS, a benchmark of 453 real, manually validated JavaScript bugs from 10 popular JavaScript server-side programs, comprising 444k LOC in total. Each bug is accompanied by its bug report, the test cases that detect it, as well as the patch that fixes it. BUGSJS features a rich interface for accessing the faulty and fixed versions of the programs and executing the corresponding test cases, which facilitates conducting highly-reproducible empirical studies and comparisons of JavaScript analysis and testing tools.},
author = {Gyimesi, Peter and Vancsics, Bela and Stocco, Andrea and Mazinanian, Davood and Beszedes, Arpad and Ferenc, Rudolf and Mesbah, Ali},
doi = {10.1109/ICST.2019.00019},
isbn = {9781728117355},
journal = {Proceedings - 2019 IEEE 12th International Conference on Software Testing, Verification and Validation, ICST 2019},
keywords = {Benchmark,Bug database,BugsJS,JavaScript,Literature survey,Real bugs,Reproducibility},
pages = {90--101},
title = {{BugsJS: A benchmark of javascript bugs}},
year = {2019}
}
@article{Fan2018,
author = {Fan, Yuanrui and Xia, Xin and Lo, David and Hassan, Ahmed E.},
doi = {10.1109/TSE.2018.2864217},
issn = {19393520},
journal = {IEEE Transactions on Software Engineering},
keywords = {Bug Report,Collaboration,Computer bugs,Feature Generation,Feature extraction,Forestry,Machine Learning,Software,Support vector machines,Task analysis},
title = {{Chaff from the Wheat: Characterizing and Determining Valid Bug Reports}},
year = {2018}
}
@article{Licker2019,
abstract = {Automated build systems are routinely used by software engineers to minimize the number of objects that need to be recompiled after incremental changes to the source files of a project. In order to achieve efficient and correct builds, developers must provide the build tools with dependency information between the files and modules of a project, usually expressed in a macro language specific to each build tool. Most build systems offer good support for well-known languages and compilers, but as projects grow larger, engineers tend to include source files generated using custom tools. In order to guarantee correctness, the authors of these tools are responsible for enumerating all the files whose contents an output depends on. Unfortunately, this is a tedious process and not all dependencies are captured in practice, which leads to incorrect builds. We automatically uncover such missing dependencies through a novel method that we call build fuzzing. The correctness of build definitions is verified by modifying files in a project, triggering incremental builds and comparing the set of changed files to the set of expected changes. These sets are determined using a dependency graph inferred by tracing the system calls executed during a clean build. We evaluate our method by exhaustively testing build rules of open-source projects, uncovering issues leading to race conditions and faulty builds in 30 of them. We provide a discussion of the bugs we detect, identifying anti-patterns in the use of the macro languages. We fix some of the issues in projects where the features of build systems allow a clean solution.},
author = {Licker, Nandor and Rice, Andrew},
doi = {10.1109/icse.2019.00125},
pages = {1234--1244},
title = {{Detecting Incorrect Build Rules}},
year = {2019}
}
@article{Tomassi2019,
abstract = {Fault-detection, localization, and repair methods are vital to software quality; but it is difficult to evaluate their generality, applicability, and current effectiveness. Large, diverse, realistic datasets of durably-reproducible faults and fixes are vital to good experimental evaluation of approaches to software quality, but they are difficult and expensive to assemble and keep current. Modern continuous-integration (CI) approaches, like Travis-CI, which are widely used, fully configurable, and executed within custom-built containers, promise a path toward much larger defect datasets. If we can identify and archive failing and subsequent passing runs, the containers will provide a substantial assurance of durable future reproducibility of build and test. Several obstacles, however, must be overcome to make this a practical reality. We describe BugSwarm, a toolset that navigates these obstacles to enable the creation of a scalable, diverse, realistic, continuously growing set of durably reproducible failing and passing versions of real-world, open-source systems. The BugSwarm toolkit has already gathered 3,091 fail-pass pairs, in Java and Python, all packaged within fully reproducible containers. Furthermore, the toolkit can be run periodically to detect fail-pass activities, thus growing the dataset continually.},
author = {Tomassi, David A. and Dmeiri, Naji and Wang, Yichen and Bhowmick, Antara and Liu, Yen-Chuan and Devanbu, Premkumar T. and Vasilescu, Bogdan and Rubio-Gonzalez, Cindy},
doi = {10.1109/icse.2019.00048},
pages = {339--349},
title = {{BugSwarm: Mining and Continuously Growing a Dataset of Reproducible Failures and Fixes}},
year = {2019}
}
@article{Mazi2007,
abstract = {Paxos is a simple protocol that a group of machines in a distributed system can use to agree on a value proposed by a member of the group. If it terminates, the protocol reaches consensus even if the network was unreliable and multiple machines simultaneously tried to propose different values.},
author = {Mazi, David},
journal = {Other},
title = {{Paxos Made Practical}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.143.6093{\&}rep=rep1{\&}type=pdf},
year = {2007}
}
@article{Klees2018,
abstract = {Fuzz testing has enjoyed great success at discovering security critical bugs in real software. Recently, researchers have devoted significant effort to devising new fuzzing techniques, strategies, and algorithms. Such new ideas are primarily evaluated experimentally so an important question is: What experimental setup is needed to produce trustworthy results? We surveyed the recent research literature and assessed the experimental evaluations carried out by 32 fuzzing papers. We found problems in every evaluation we considered. We then performed our own extensive experimental evaluation using an existing fuzzer. Our results showed that the general problems we found in existing experimental evaluations can indeed translate to actual wrong or misleading assessments. We conclude with some guidelines that we hope will help improve experimental evaluations of fuzz testing algorithms, making reported results more robust.},
archivePrefix = {arXiv},
arxivId = {arXiv:1808.09700v2},
author = {Klees, George and Ruef, Andrew and Cooper, Benji and Wei, Shiyi and Hicks, Michael},
doi = {10.1145/3243734.3243804},
eprint = {arXiv:1808.09700v2},
isbn = {9781450356930},
issn = {15437221},
journal = {Proceedings of the ACM Conference on Computer and Communications Security},
keywords = {Evaluation,Fuzzing,Security},
pages = {2123--2138},
title = {{Evaluating fuzz testing}},
year = {2018}
}
@article{Basios2018,
abstract = {Data structure selection and tuning is laborious but can vastly improve an application's performance and memory footprint. Some data structures share a common interface and enjoy multiple implementations. We call them Darwinian Data Structures (DDS), since we can subject their implementations to survival of the fittest. We introduce ARTEMIS a multi-objective, cloud-based search-based optimisation framework that automatically finds optimal, tuned DDS modulo a test suite, then changes an application to use that DDS. ARTEMIS achieves substantial performance improvements for $\backslash$emph{\{}every{\}} project in {\$}5{\$} Java projects from DaCapo benchmark, {\$}8{\$} popular projects and {\$}30{\$} uniformly sampled projects from GitHub. For execution time, CPU usage, and memory consumption, ARTEMIS finds at least one solution that improves $\backslash$emph{\{}all{\}} measures for {\$}86\backslash{\%}{\$} ({\$}37/43{\$}) of the projects. The median improvement across the best solutions is {\$}4.8\backslash{\%}{\$}, {\$}10.1\backslash{\%}{\$}, {\$}5.1\backslash{\%}{\$} for runtime, memory and CPU usage. These aggregate results understate ARTEMIS's potential impact. Some of the benchmarks it improves are libraries or utility functions. Two examples are gson, a ubiquitous Java serialization framework, and xalan, Apache's XML transformation tool. ARTEMIS improves gson by {\$}16.5{\$}$\backslash${\%}, {\$}1\backslash{\%}{\$} and {\$}2.2\backslash{\%}{\$} for memory, runtime, and CPU; ARTEMIS improves xalan's memory consumption by {\$}23.5{\$}$\backslash${\%}. $\backslash$emph{\{}Every{\}} client of these projects will benefit from these performance improvements.},
archivePrefix = {arXiv},
arxivId = {arXiv:1706.03232v3},
author = {Basios, Michail and Li, Lingbo and Wu, Fan and Kanthan, Leslie and Barr, Earl T.},
doi = {10.1145/3236024.3236043},
eprint = {arXiv:1706.03232v3},
isbn = {9781450355735},
journal = {ESEC/FSE 2018 - Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
keywords = {Data Structure Optimisation,Genetic Improvement,Search-based Software Engineering,Software Analysis and Optimisation},
pages = {118--128},
title = {{Darwinian data structure selection}},
year = {2018}
}
@article{Henkel2018,
abstract = {With the rise of machine learning, there is a great deal of interest in treating programs as data to be fed to learning algorithms. However, programs do not start off in a form that is immediately amenable to most off-the-shelf learning techniques. Instead, it is necessary to transform the program to a suitable representation before a learning technique can be applied. In this paper, we use abstractions of traces obtained from symbolic execution of a program as a representation for learning word embeddings. We trained a variety of word embeddings under hundreds of parameterizations, and evaluated each learned embedding on a suite of different tasks. In our evaluation, we obtain 93{\%} top-1 accuracy on a benchmark consisting of over 19,000 API-usage analogies extracted from the Linux kernel. In addition, we show that embeddings learned from (mainly) semantic abstractions provide nearly triple the accuracy of those learned from (mainly) syntactic abstractions.},
archivePrefix = {arXiv},
arxivId = {arXiv:1803.06686v2},
author = {Henkel, Jordan and Lahiri, Shuvendu K. and Liblit, Ben and Reps, Thomas},
doi = {10.1145/3236024.3236085},
eprint = {arXiv:1803.06686v2},
isbn = {9781450355735},
journal = {ESEC/FSE 2018 - Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
keywords = {Analogical Reasoning,Linux,Program Understanding,Word Embeddings},
pages = {163--174},
title = {{Code vectors: Understanding programs through embedded abstracted symbolic traces}},
year = {2018}
}
@article{Arpaci2014,
author = {Arpaci and Dusseau},
title = {{Virtual Machine Monitors}},
url = {http://pages.cs.wisc.edu/{~}remzi/OSTEP/vmm-intro.pdf},
year = {2014}
}
@article{Fikes2010,
abstract = {Google},
author = {Fikes, Andrew},
journal = {Talk at the Google Faculty Summit},
title = {{Google Storage architecture and challenges}},
url = {http://scholar.google.com/scholar?hl=en{\&}btnG=Search{\&}q=intitle:Storage+Architecture+and+Challenges{\#}0},
year = {2010}
}
@article{Liskov2010,
abstract = {The paper provides an historical perspective about two replication protocols, each of which was intended for practical deployment. The first is Viewstamped Replication, which was developed in the 1980's and allows a group of replicas to continue to provide service in spite of a certain number of crashes among them. The second is an extension of Viewstamped Replication that allows the group to survive Byzantine (arbitrary) failures. Both protocols allow users to execute general operations (thus they provide state machine replication); both were developed in the Programming Methodology group at MIT. {\textcopyright} 2010 Springer Berlin Heidelberg.},
author = {Liskov, Barbara},
doi = {10.1007/978-3-642-11294-2_7},
isbn = {3642112935},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {121--149},
title = {{From viewstamped replication to Byzantine fault tolerance}},
volume = {5959 LNCS},
year = {2010}
}
@article{Cummins2018,
abstract = {Random program generation-fuzzing-is an effective technique for discovering bugs in compilers but successful fuzzers require extensive development effort for every language supported by the compiler, and often leave parts of the language space untested. We introduce DeepSmith, a novel machine learning approach to accelerating compiler validation through the inference of gen-erative models for compiler inputs. Our approach infers a learned model of the structure of real world code based on a large corpus of open source code. Then, it uses the model to automatically generate tens of thousands of realistic programs. Finally, we apply established differential testing methodologies on them to expose bugs in compilers. We apply our approach to the OpenCL programming language, automatically exposing bugs with little effort on our side. In 1,000 hours of automated testing of commercial and open source compilers, we discover bugs in all of them, submitting 67 bug reports. Our test cases are on average two orders of magnitude smaller than the state-of-the-art, require 3.03× less time to generate and evaluate, and expose bugs which the state-of-the-art cannot. Our random program generator, comprising only 500 lines of code, took 12 hours to train for OpenCL versus the state-of-the-art taking 9 man months to port from a generator for C and 50,000 lines of code. With 18 lines of code we extended our program generator to a second language, uncovering crashes in Solidity compilers in 12 hours of automated testing. CCS CONCEPTS • Software and its engineering → Software testing and de-bugging;},
author = {Cummins, Chris and Petoumenos, Pavlos and Murray, Alastair and Leather, Hugh},
doi = {10.1145/3213846.3213848},
isbn = {9781450356992},
journal = {ISSTA 2018 - Proceedings of the 27th ACM SIGSOFT International Symposium on Software Testing and Analysis},
keywords = {Compiler Fuzzing,Deep Learning,Differential Testing},
pages = {95--105},
title = {{Compiler fuzzing through deep learning}},
year = {2018}
}
@article{Qiu2018,
abstract = {Numerous static analysis techniques have recently been proposed for identifying information flows in mobile applications. These techniques are compared to each other, usually on a set of syntactic benchmarks. Yet, configurations used for such comparisons are rarely described. Our experience shows that tools are often compared under different setup, rendering the comparisons irreproducible and largely inaccurate. In this paper, we provide a large, controlled, and independent comparison of the three most prominent static analysis tools: FlowDroid combined with IccTA, AmAndroid, and DroidSafe. We evaluate all tools using common configuration setup and on the same set of benchmark applications. We compare the results of our analysis to the results reported in previous studies, identify main reasons for inaccuracy in existing tools, and provide suggestions for future research. {\textcopyright} 2018 Association for Computing Machinery.},
author = {Qiu, Lina and Wang, Yingying and Rubin, Julia},
doi = {10.1145/3213846.3213873},
isbn = {9781450356992},
journal = {ISSTA 2018 - Proceedings of the 27th ACM SIGSOFT International Symposium on Software Testing and Analysis},
keywords = {Empirical studies,Information flow analysis,Mobile,Static analysis},
pages = {176--186},
title = {{Analyzing the analyzers: FlowDroid/IccTA, AmanDroid, and DroidSafe}},
year = {2018}
}
@article{Habib2018,
abstract = {Static bug detectors are becoming increasingly popular and are widely used by professional software developers. While most work on bug detectors focuses on whether they find bugs at all, and on how many false positives they report in addition to legitimate warnings, the inverse question is often neglected: How many of all real-world bugs do static bug detectors find? This paper addresses this question by studying the results of applying three widely used static bug detectors to an extended version of the Defects4J dataset that consists of 15 Java projects with 594 known bugs. To decide which of these bugs the tools detect, we use a novel methodology that combines an automatic analysis of warnings and bugs with a manual validation of each candidate of a detected bug. The results of the study show that: (i) static bug detectors find a non-negligible amount of all bugs, (ii) different tools are mostly complementary to each other, and (iii) current bug detectors miss the large majority of the studied bugs. A detailed analysis of bugs missed by the static detectors shows that some bugs could have been found by variants of the existing detectors, while others are domain-specific problems that do not match any existing bug pattern. These findings help potential users of such tools to assess their utility, motivate and outline directions for future work on static bug detection, and provide a basis for future comparisons of static bug detection with other bug finding techniques, such as manual and automated testing.},
author = {Habib, Andrew and Pradel, Michael},
doi = {10.1145/3238147.3238213},
isbn = {9781450359375},
journal = {ASE 2018 - Proceedings of the 33rd ACM/IEEE International Conference on Automated Software Engineering},
keywords = {Bug finding,Defects4J,Static analysis,Static bug checkers},
pages = {317--328},
title = {{How many of all bugs do we find? A study of static bug detectors}},
year = {2018}
}
@article{Lamport2001,
abstract = {The Paxos algorithm, when presented in plain English, is very simple.},
author = {Lamport, Leslie},
doi = {10.1145/568425.568433},
issn = {01635700},
journal = {ACM SIGACT News},
number = {4},
pages = {51--58},
title = {{Paxos Made Simple}},
volume = {32},
year = {2001}
}
@article{Luan2018,
abstract = {Programmers often write code that has similarity to existing code written somewhere. A tool that could help programmers to search such similar code would be immensely useful. Such a tool could help programmers to extend partially written code snippets to completely implement necessary functionality, help to discover extensions to the partial code which are commonly included by other programmers, help to cross-check against similar code written by other programmers, or help to add extra code which would fix common mistakes and errors. We propose Aroma, a tool and technique for code recommendation via structural code search. Aroma indexes a huge code corpus including thousands of open-source projects, takes a partial code snippet as input, searches the corpus for method bodies containing the partial code snippet, and clusters and intersects the results of the search to recommend a small set of succinct code snippets which both contain the query snippet and appear as part of several methods in the corpus. We evaluated Aroma on 2000 randomly selected queries created from the corpus, as well as 64 queries derived from code snippets obtained from Stack Overflow, a popular website for discussing code. We implemented Aroma for 4 different languages, and developed an IDE plugin for Aroma. Furthermore, we conducted a study where we asked 12 programmers to complete programming tasks using Aroma, and collected their feedback. Our results indicate that Aroma is capable of retrieving and recommending relevant code snippets efficiently.},
archivePrefix = {arXiv},
arxivId = {1812.01158},
author = {Luan, Sifei and Yang, Di and Barnaby, Celeste and Sen, Koushik and Chandra, Satish},
eprint = {1812.01158},
title = {{Aroma: Code Recommendation via Structural Code Search}},
url = {http://arxiv.org/abs/1812.01158},
year = {2018}
}
@article{Dean2008,
abstract = {MapReduce is a programming model and an associated implementation for processing and generating large data sets. Users specify a map function that processes a key/value pair to generate a set of intermediate key/value pairs, and a reduce function that merges all intermediate values associated with the same intermediate key. Many real world tasks are expressible in this model, as shown in the paper. Programs written in this functional style are automatically parallelized and executed on a large cluster of commodity machines. The run-time system takes care of the details of partitioning the input data, scheduling the program's execution across a set of machines, handling machine failures, and managing the required inter-machine communication. This allows programmers without any experience with parallel and distributed systems to easily utilize the resources of a large distributed system. Our implementation of MapReduce runs on a large cluster of commodity machines and is highly scalable: a typical MapReduce computation processes many terabytes of data on thousands of machines. Programmers find the system easy to use: hundreds of MapReduce programs have been implemented and upwards of one thousand MapReduce jobs are executed on Google's clusters every day.},
author = {Dean, Jeffrey and Ghemawat, Sanjay},
doi = {10.1145/1327452.1327492},
issn = {00010782},
journal = {Communications of the ACM},
number = {1},
pages = {107--113},
title = {{MapReduce: Simplified data processing on large clusters}},
volume = {51},
year = {2008}
}
@article{Hadzilacos,
author = {Hadzilacos, V and Goodman, N},
title = {{Concurrency Control and Recovery in Database Systems. 1987}}
}
@article{Rosenblum1991,
abstract = {This paper presents a new technique for disk storage management called a log-structured file system. A logstructured file system writes all modifications to disk sequentially in a log-like structure, thereby speeding up both file writing and crash recovery. The log is the only structure on disk; it contains indexing information so that files can be read back from the log efficiently. In order to maintain large free areas on disk for fast writing, we divide the log into segments and use a segment cleaner to compress the live information from heavily fragmented segments. We present a series of simulations that demonstrate the efficiency of a simple cleaning policy based on cost and benefit. We have implemented a prototype logstructured file system called Sprite LFS; it outperforms current Unix file systems by an order of magnitude for small-file writes while matching or exceeding Unix performance for reads and large writes. Even when the overhead for cleaning is included, Sprite LFS can use 70 {\%} of the disk bandwidth for writing, whereas Unix file systems typically can use only 5-10{\%}. 1.},
author = {Rosenblum, Mendel and Ousterhout, John K.},
doi = {10.1145/121132.121137},
isbn = {0897914473},
journal = {Proceedings of the 13th ACM Symposium on Operating Systems Principles, SOSP 1991},
keywords = {and phrases,disk storage management},
number = {1},
pages = {1--15},
title = {{The design and implementation of a log-structured file system}},
volume = {10},
year = {1991}
}
@article{Meyer1988,
abstract = {IIX is a multi-processing and multi-user timesharing operating system. It has become quite popular since its inception in 1969, running on machines of varying processing power from microprocessors to mainframes across different manufacturers' product lines. UNIX provides a great range of programs and services that have made the UNIX system environment popular among users. This environment contains the command interpreter shell, text processing packages, the source code control, a powerful mailing system and many more. The importance of UNIX for manufacturers lies in its philosophy of simplicity and consistency. Since UNIX is written almost totally in a high-level programming language it is very easy to port the system to all kinds of different machines. As a result, by the beginning of 984, there were already about 100 000 UNIX or UNIX-like system installations around the whole world, and the number is still increasing. {\textcopyright} 1988.},
author = {Meyer, Veronika and Meyer, Walter},
doi = {10.1016/0010-4655(88)90115-4},
issn = {00104655},
journal = {Computer Physics Communications},
keywords = {1974,30,32,4,and phrases,association for computing machinery,command language,copyright,cr categories,file system,inc,operating,pdp-11,system,time-sharing},
number = {1-2},
pages = {51--57},
title = {{The UNIX{\textregistered} timesharing operating system}},
volume = {50},
year = {1988}
}
@article{Barham2003,
abstract = {Numerous systems have been designed which use virtualization to subdivide the ample resources of a modern computer. Some require specialized hardware, or cannot support commodity operating sys- tems. Some target 100{\%} binary compatibility at the expense of performance. Others sacrifice security or functionality for speed. Few offer resource isolation or performance guarantees; most pro- vide only best-effort provisioning, risking denial of service. This paper presents Xen, an x86 virtual machine monitor which allows multiple commodity operating systems to share conventional hardware in a safe and resource managed fashion, but without sac- rificing either performance or functionality. This is achieved by providing an idealized virtual machine abstraction to which oper- ating systems such as Linux, BSD andWindows XP, can be ported with minimal effort. Our design is targeted at hosting up to 100 virtual machine in- stances simultaneously on a modern server. The virtualization ap- proach taken by Xen is extremely efficient: we allowoperating sys- tems such as Linux and Windows XP to be hosted simultaneously for a negligible performance overhead — at most a few percent compared with the unvirtualized case. We considerably outperform competing commercial and freely available solutions in a range of microbenchmarks and system-wide tests.},
author = {Barham, Paul and Dragovic, Boris and Fraser, Keir and Hand, Steven and Harris, Tim and Ho, Alex and Neugebauer, Rolf and Pratt, Ian and Warfield, Andrew},
isbn = {1581137575},
journal = {19th ACM Symposium on Operating Systems Principles},
keywords = {hypervisors,paravirtualization,virtual machine monitors},
pages = {164--177},
title = {{Xen and the Art of Virtualization Categories and Subject Descriptors}},
year = {2003}
}
@article{Kamp2019,
abstract = {We propose an efficient protocol for decentralized training of deep neural networks from distributed data sources. The proposed protocol allows to handle different phases of model training equally well and to quickly adapt to concept drifts. This leads to a reduction of communication by an order of magnitude compared to periodically communicating state-of-the-art approaches. Moreover, we derive a communication bound that scales well with the hardness of the serialized learning problem. The reduction in communication comes at almost no cost, as the predictive performance remains virtually unchanged. Indeed, the proposed protocol retains loss bounds of periodically averaging schemes. An extensive empirical evaluation validates major improvement of the trade-off between model performance and communication which could be beneficial for numerous decentralized learning applications, such as autonomous driving, or voice recognition and image classification on mobile phones.},
author = {Kamp, Michael and Adilova, Linara and Sicking, Joachim and H{\"{u}}ger, Fabian and Schlicht, Peter and Wirtz, Tim and Wrobel, Stefan},
doi = {10.1007/978-3-030-10925-7_24},
isbn = {9783030109240},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
number = {i},
pages = {393--409},
title = {{Efficient decentralized deep learning by dynamic model averaging}},
volume = {11051 LNAI},
year = {2019}
}
@article{Weng2018,
abstract = {Deep learning technology has achieved the high-accuracy of state-of-the-art algorithms in a variety of AI tasks. Its popularity has drawn security researchers' attention to the topic of privacy-preserving deep learning, in which neither training data nor model is expected to be exposed. Recently, federated learning becomes promising for the development of deep learning where multi-parties upload local gradients and a server updates parameters with collected gradients, the privacy issues of which have been discussed widely. In this paper, we explore additional security issues in this case, not merely the privacy. First, we consider that the general assumption of honest-but-curious server is problematic, and the malicious server may break privacy. Second, the malicious server or participants may damage the correctness of training, such as incorrect gradient collecting or parameter updating. Third, we discover that federated learning lacks an effective incentive mechanism for distrustful participants due to privacy and financial considerations. To address the aforementioned issues, we introduce a value-driven incentive mechanism based on Blockchain. Adapted to this incentive setting, we migrate the malicious threats from server and participants, and guarantee the privacy and auditability. Thus, we propose to present DeepChain which gives mistrustful parties incentives to participate in privacy-preserving learning, share gradients and update parameters correctly, and eventually accomplish iterative learning with a win-win result. At last, we give an implementation prototype by integrating deep learning module with a Blockchain development platform (Corda V3.0). We evaluate it in terms of encryption performance and training accuracy, which demonstrates the feasibility of DeepChain.},
author = {Weng, Jia-Si and Weng, Jian and Li, Ming and Zhang, Yue and Luo, Weiqi},
journal = {IACR Cryptology ePrint Archive},
number = {8},
pages = {679},
title = {{DeepChain : Auditable and Privacy-Preserving Deep Learning with Blockchain-based Incentive}},
url = {https://eprint.iacr.org/2018/679.pdf},
volume = {2018},
year = {2018}
}
@article{Addair,
abstract = {Training a machine learning model with terabytes to petabytes of data using very deep neural networks doesn't scale well within a single machine. A significant amount of work in recent years has gone into distributing the training of such neural networks across a cluster of machines, by partitioning on both the data and the model itself. The most well-established form of distributed training uses a centralized parameter server to manage the shared state of neural network weights used across all partitions of the data, but this introduces a bottleneck and single-point of failure during training. In this paper, we explore a more experimental form of decentralized training that removes this bottleneck. Finally, we show that by taking advantage of sparse updates to the shared parameter matrix, decentralized training can be tuned to make tradeoffs between training speed and model accuracy.},
author = {Addair, Travis},
title = {{Decentralized and Distributed Machine Learning Model Training with Actors}},
url = {https://blog.skymind.ai/distributed-deep-learning-part-1-an}
}
@article{Pham2016,
abstract = {The problem of anomaly detection has been studied for a long time. In short, anomalies are abnormal or unlikely things. In financial networks, thieves and illegal activities are often anomalous in nature. Members of a network want to detect anomalies as soon as possible to prevent them from harming the network's community and integrity. Many Machine Learning techniques have been proposed to deal with this problem; some results appear to be quite promising but there is no obvious superior method. In this paper, we consider anomaly detection particular to the Bitcoin transaction network. Our goal is to detect which users and transactions are the most suspicious; in this case, anomalous behavior is a proxy for suspicious behavior. To this end, we use three unsupervised learning methods including k-means clustering, Mahalanobis distance, and Unsupervised Support Vector Machine (SVM) on two graphs generated by the Bitcoin transaction network: one graph has users as nodes, and the other has transactions as nodes.},
archivePrefix = {arXiv},
arxivId = {1611.03941},
author = {Pham, Thai and Lee, Steven},
eprint = {1611.03941},
title = {{Anomaly Detection in Bitcoin Network Using Unsupervised Learning Methods}},
url = {http://arxiv.org/abs/1611.03941},
year = {2016}
}
@misc{,
title = {{CSE550.Concurrency{\_}Control{\_}Recovery (1).pdf}}
}
@article{Verma2015,
abstract = {Google's Borg system is a cluster manager that runs hun-dreds of thousands of jobs, from many thousands of differ-ent applications, across a number of clusters each with up to tens of thousands of machines. It achieves high utilization by combining admission con-trol, efficient task-packing, over-commitment, and machine sharing with process-level performance isolation. It supports high-availability applications with runtime features that min-imize fault-recovery time, and scheduling policies that re-duce the probability of correlated failures. Borg simplifies life for its users by offering a declarative job specification language, name service integration, real-time job monitor-ing, and tools to analyze and simulate system behavior. We present a summary of the Borg system architecture and features, important design decisions, a quantitative anal-ysis of some of its policy decisions, and a qualitative ex-amination of lessons learned from a decade of operational experience with it.},
author = {Verma, Abhishek and Pedrosa, Luis and Korupolu, Madhukar and Oppenheimer, David and Tune, Eric and Wilkes, John},
doi = {10.1145/2741948.2741964},
isbn = {9781450332385},
journal = {Proceedings of the 10th European Conference on Computer Systems, EuroSys 2015},
title = {{Large-scale cluster management at Google with Borg}},
year = {2015}
}
@article{Jacobson1988,
abstract = {In October of '86, the Internet had the first of what became a series of 'congestion collapses'. During this period, the data throughput from LBL to UC Berkeley (sites separated by 400 yards and three IMP hops) dropped from 32 Kbps to 40 bps. Mike Karels1 and I were fascinated by this sudden factor-of-thousand drop in bandwidth and embarked on an investigation of why things had gotten so bad. We wondered, in particular, if the 4.3BSD (Berkeley UNIX) TCP was mis-behaving or if it could be tuned to work better under abysmal network conditions. The answer to both of these questions was yes. Since that time, we have put seven new algorithms into the 4BSD TCP: round-trip-time variance estimation exponential retransmit timer backoff slow-start more aggressive receiver ack policy dynamic window sizing on congestion Karn's clamped retransmit backoff fast retransmit Our measurements and the reports of beta testers suggest that the final product is fairly good at dealing with congested conditions on the Internet. This paper is a brief description of (i) - (v) and the rationale behind them. (vi) is an algorithm recently developed by Phil Karn of Bell Communications Research, described in KP87. (viii) is described in a soon-to-be-published RFC. Algorithms (i) - (v) spring from one observation: The flow on a TCP connection (or ISO TP-4 or Xerox NS SPP connection) should obey a 'conservation of packets' principle. And, if this principle were obeyed, congestion collapse would become the exception rather than the rule. Thus congestion control involves finding places that violate conservation and fixing them. By 'conservation of packets' I mean that for a connection 'in equilibrium', i.e., running stably with a full window of data in transit, the packet flow is what a physicist would call 'conservative': A new packet isn't put into the network until an old packet leaves. The physics of flow predicts that systems with this property should be robust in the face of congestion. Observation of the Internet suggests that it was not particularly robust. Why the discrepancy? There are only three ways for packet conservation to fail: The connection doesn't get to equilibrium, or A sender injects a new packet before an old packet has exited, or The equilibrium can't be reached because of resource limits along the path. In the following sections, we treat each of these in turn.},
author = {Jacobson, Van},
doi = {10.1145/52324.52356},
isbn = {0897912799},
journal = {Symposium Proceedings on Communications Architectures and Protocols, SIGCOMM 1988},
number = {60},
pages = {314--329},
title = {{Congestion avoidance and control}},
year = {1988}
}
@article{Clark1988,
author = {Clark, David D},
pages = {1--10},
title = {{Paper{\_}3{\_}12April}},
volume = {02139},
year = {1988}
}
@article{Basiri2019,
abstract = {Distributed systems often face transient errors and localized component degradation and failure. Verifying that the overall system remains healthy in the face of such failures is challenging. At Netflix, we have built a platform for automatically generating and executing chaos experiments, which check how well the production system can handle component failures and slowdowns. This paper describes the platform and our experiences operating it.},
archivePrefix = {arXiv},
arxivId = {arXiv:1905.04648v1},
author = {Basiri, Ali and Hochstein, Lorin and Jones, Nora and Tucker, Haley},
doi = {10.1109/icse-seip.2019.00012},
eprint = {arXiv:1905.04648v1},
pages = {31--40},
title = {{Automating Chaos Experiments in Production}},
year = {2019}
}
@article{Chang2006,
abstract = {Bigtable is a distributed storage system for managing structured data that is designed to scale to a very large size: petabytes of data across thousands of commodity servers. Many projects at Google store data in Bigtable, including web indexing, Google Earth, and Google Fi- nance. These applications place very different demands on Bigtable, both in terms of data size (from URLs to web pages to satellite imagery) and latency requirements (from backend bulk processing to real-time data serving). Despite these varied demands, Bigtable has successfully provided a flexible, high-performance solution for all of these Google products. In this paper we describe the sim- ple data model provided by Bigtable, which gives clients dynamic control over data layout and format, and we de- scribe the design and implementation of Bigtable.},
author = {Chang, Fay and Dean, Jeffrey and Ghemawat, Sanjay and Hsieh, Wilson C. and Wallach, Deborah A. and Burrows, Mike and Chandra, Tushar and Fikes, Andrew and Gruber, Robert E.},
journal = {Proceedings of the 7th Symposium on Operating Systems Design and Implementation (OSDI '06), November 6-8, Seattle, WA, USA},
keywords = {Seminal work},
pages = {205--218},
title = {{[seminal] Bigtable: A distributed storage system for structured data}},
year = {2006}
}
@article{Balakrishnan2009,
abstract = {Our goal is to explain how routing between different administrative domains works in the Internet. We discuss how Internet Service Providers (ISPs) exchange routing information, packets, and (above all) money between each other, and how the way in which they buy service from and sell service to each other and their customers influences routing. We discuss the salient features of the Border Gateway Protocol, Version 4 (BGP4, which we will refer to simply as BGP), the current interdomain routing protocol in the Internet. Finally, we discuss a few interesting failures and shortcomings of the routing system. These notes focus only on the essential elements of interdomain routing, often sacrificing detail for clarity and sweeping generality.},
author = {Balakrishnan, Hari},
journal = {Interdomain Internet Routing},
keywords = {BGP},
number = {January},
pages = {1--24},
title = {{Interdomain Internet Routing}},
url = {http://bnrg.eecs.berkeley.edu/{~}randy/Courses/CS268.F09/papers/03{\_}L4-routing.pdf},
year = {2009}
}
@article{Purohit,
abstract = {Generating a synthetic graph that is similar to a given real-world graph is a critical requirement for privacy preservation and benchmarking purposes. Various generative models attempt to generate static graphs similar to real-world graphs. However, generation of temporal graphs is still an open research area. We present a temporal-motif based approach to generate synthetic temporal graph datasets and show results from three real-world use cases. We show that our approach can generate high fidelity synthetic graph. We also show that this approach can also generate multi-type heterogeneous graph. We also present a parameterized version of our approach which can generate linear, sub-linear, and super-linear preferential attachment graph.},
author = {Purohit, Sumit and Holder, Lawrence B and Chin, George},
keywords = {Graph Generative Model,Motifs Distribu-tion,Temporal Graph},
title = {{Temporal Graph Generation Based on a Distribution of Temporal Motifs}},
url = {http://www.mlgworkshop.org/2018/papers/MLG2018{\_}paper{\_}42.pdf}
}
@article{Lin2019,
abstract = {Bitcoin is a cryptocurrency that features a distributed, decentralized and trustworthy mechanism, which has made Bitcoin a popular global transaction platform. The transaction efficiency among nations and the privacy benefiting from address anonymity of the Bitcoin network have attracted many activities such as payments, investments, gambling, and even money laundering in the past decade. Unfortunately, some criminal behaviors which took advantage of this platform were not identified. This has discouraged many governments to support cryptocurrency. Thus, the capability to identify criminal addresses becomes an important issue in the cryptocurrency network. In this paper, we propose new features in addition to those commonly used in the literature to build a classification model for detecting abnormality of Bitcoin network addresses. These features include various high orders of moments of transaction time (represented by block height) which summarizes the transaction history in an efficient way. The extracted features are trained by supervised machine learning methods on a labeling category data set. The experimental evaluation shows that these features have improved the performance of Bitcoin address classification significantly. We evaluate the results under eight classifiers and achieve the highest Micro-F1/Macro-F1 of 87{\%}/86{\%} with LightGBM.},
archivePrefix = {arXiv},
arxivId = {arXiv:1903.07994v1},
author = {Lin, Yu-Jing and Wu, Po-Wei and Hsu, Cheng-Han and Tu, I-Ping and Liao, Shih-wei},
doi = {10.1109/bloc.2019.8751410},
eprint = {arXiv:1903.07994v1},
isbn = {9781728113289},
pages = {302--310},
title = {{An Evaluation of Bitcoin Address Classification based on Transaction History Summarization}},
year = {2019}
}
@misc{,
title = {{Nelsonthesis.Pdf}}
}
@article{Kondor2014,
abstract = {A main focus in economics research is understanding the time series of prices of goods and assets. While statistical models using only the properties of the time series itself have been successful in many aspects, we expect to gain a better understanding of the phenomena involved if we can model the underlying system of interacting agents. In this article, we consider the history of Bitcoin, a novel digital currency system, for which the complete list of transactions is available for analysis. Using this dataset, we reconstruct the transaction network between users and analyze changes in the structure of the subgraph induced by the most active users. Our approach is based on the unsupervised identification of important features of the time variation of the network. Applying the widely used method of Principal Component Analysis to the matrix constructed from snapshots of the network at different times, we are able to show how structural changes in the network accompany significant changes in the exchange price of bitcoins.},
author = {Kondor, D{\'{a}}niel and Csabai, Istv{\'{a}}n and Sz{\"{u}}le, J{\'{a}}nos and P{\'{o}}sfai, M{\'{a}}rton and Vattay, G{\'{a}}bor},
doi = {10.1088/1367-2630/16/12/125003},
issn = {13672630},
journal = {New Journal of Physics},
keywords = {Bitcoin, transaction network,financial network,principal component analysis,temporal network},
publisher = {IOP Publishing},
title = {{Inferring the interplay between network structure and market effects in Bitcoin}},
volume = {16},
year = {2014}
}
@article{Babveyh2009,
abstract = {Introduction Bitcoin is a form of crypto currency introduced by Satoshi Nakamoto in 2009. Even though it only received interest from the software community at the beginning, today Bitcoin has a market cap of 5 billion dollars. Bitcoin offers a certain layer of anonymity to its users. Everyone can participate in the Bitcoin network without any registration process. Bitcoin transactions use a pair of elliptic curve cryptographic keys to redeem and spend the money. The money is held in the public key, which is also called address. Each public key has a corresponding secret key, known only to the owner of the account. The user can redeem the money in an address using his/her secret key [1]. The best practice is to use new keys for every new transaction to retain the user anonymity. If an address is used more than once, it would reveal some information about the users involved in the transaction. It could also reveal information about other users who were not part of this transaction, but simply had a transaction with one of the users involved in it. The most popular type of transaction in Bitcoin is pay to public hash. Each transaction has an input field and an output field. The input field specifies the addresses, which contain the funds. The output address specifies the destination address. The funds specified in the input are equal to the funds in the output. For example if Alice owns two addresses A1, A2 each holding 5 Bitcoin, and wants to deposit 7 Bitcoin to Bob with address B1, the transaction will have A1 and A2 as input. It will have B1 with 7 Bitcoins, and A3 with 3 Bitcoins as the output. Alice controls A3, which is also called change address. There are several heuristic methods to cluster addresses into users. As an example, one could argue that all the addresses in the input field belong to the same entity which is used in creating the dataset on the course website. It is easy to see that if an address is used more than once, it would reveal information about anyone that ever had a transaction with it. The best practice in Bitcoin is to use a new address for every new transaction [2]. This work is organized as follows. In the first section we review the prior work and we have obtained our data. Next, we will look at address reuse in the Bitcoin network. We show that a great portion of users reuse their addresses which could enable us to cluster the addresses and attribute them to single users. Next, we will categorize the nodes based on their role in the network as a customer or seller. Finally, we do a study of nodes and network performance.},
author = {Babveyh, Afshin and Ebrahimi, Sadegh},
pages = {1--8},
title = {{Predicting User Performance and Bitcoin Price Using Block Chain Transaction Network}},
year = {2009}
}
@article{Paranjape2017,
abstract = {Networks are a fundamental tool for modeling complex systems in a variety of domains including social and communication networks as well as biology and neuroscience. Small subgraph patterns in networks, called network motifs, are crucial to understanding the structure and function of these systems. However, the role of network motifs in temporal networks, which contain many timestamped links between the nodes, is not yet well understood. Here we develop a notion of a temporal network motif as an elementary unit of temporal networks and provide a general methodology for counting such motifs. We define temporal network motifs as induced subgraphs on sequences of temporal edges, design fast algorithms for counting temporal motifs, and prove their runtime complexity. Our fast algorithms achieve up to 56.5x speedup compared to a baseline method. Furthermore, we use our algorithms to count temporal motifs in a variety of networks. Results show that networks from different domains have significantly different motif counts, whereas networks from the same domain tend to have similar motif counts. We also find that different motifs occur at different time scales, which provides further insights into structure and function of temporal networks.},
author = {Paranjape, Ashwin and Benson, Austin R. and Leskovec, Jure},
doi = {10.1145/3018661.3018731},
isbn = {9781450346757},
journal = {WSDM 2017 - Proceedings of the 10th ACM International Conference on Web Search and Data Mining},
pages = {601--610},
title = {{Motifs in temporal networks}},
year = {2017}
}
@article{Norman,
author = {Norman, Archie},
pages = {1--54},
title = {{Classification of Bitcoin transactions based on supervised machine learning and transaction network metrics .}}
}
@article{Fire2019,
abstract = {Trends change rapidly in today's world, prompting this key question: What is the mechanism behind the emergence of new trends? By representing real-world dynamic systems as complex networks, the emergence of new trends can be symbolized by vertices that “shine.” That is, at a specific time interval in a network's life, certain vertices become increasingly connected to other vertices. This process creates new high-degree vertices, i.e., network stars. Thus, to study trends, we must look at how networks evolve over time and determine how the stars behave. In our research, we constructed the largest publicly available network evolution dataset to date, which contains 38,000 real-world networks and 2.5 million graphs. Then, we performed the first precise wide-scale analysis of the evolution of networks with various scales. Three primary observations resulted: (a) links are most prevalent among vertices that join a network at a similar time; (b) the rate that new vertices join a network is a central factor in molding a network's topology; and (c) the emergence of network stars (high-degree vertices) is correlated with fast-growing networks. We applied our learnings to develop a flexible network-generation model based on large-scale, real-world data. This model gives a better understanding of how stars rise and fall within networks, and is applicable to dynamic systems both in nature and society. Multimedia Links ▶ Video ▶ Interactive Data Visualization ▶ Data ▶ Code Tutorials},
archivePrefix = {arXiv},
arxivId = {arXiv:1706.06690v3},
author = {Fire, Michael and Guestrin, Carlos},
doi = {10.1016/j.ipm.2019.05.002},
eprint = {arXiv:1706.06690v3},
issn = {03064573},
journal = {Information Processing and Management},
keywords = {Big data,Data science,Network datasets,Network dynamics,Network science},
title = {{The rise and fall of network stars: Analyzing 2.5 million graphs to reveal how high-degree vertices emerge over time}},
year = {2019}
}
@article{Kondor2014a,
abstract = {The possibility to analyze everyday monetary transactions is limited by the scarcity of available data, as this kind of information is usually considered highly sensitive. Present econophysics models are usually employed on presumed random networks of interacting agents, and only macroscopic properties (e.g. the resulting wealth distribution) are compared to real-world data. In this paper, we analyze BitCoin, which is a novel digital currency system, where the complete list of transactions is publicly available. Using this dataset, we reconstruct the network of transactions, and extract the time and amount of each payment. We analyze the structure of the transaction network by measuring network characteristics over time, such as the degree distribution, degree correlations and clustering. We find that linear preferential attachment drives the growth of the network. We also study the dynamics taking place on the transaction network, i.e. the flow of money. We measure temporal patterns and the wealth accumulation. Investigating the microscopic statistics of money movement, we find that sublinear preferential attachment governs the evolution of the wealth distribution. We report a scaling relation between the degree and wealth associated to individual nodes.},
archivePrefix = {arXiv},
arxivId = {arXiv:1308.3892v3},
author = {Kondor, D{\'{a}}niel and P{\'{o}}sfai, M{\'{a}}rton and Csabai, Istv{\'{a}}n and Vattay, G{\'{a}}bor},
doi = {10.1371/journal.pone.0086197},
eprint = {arXiv:1308.3892v3},
issn = {19326203},
journal = {PLoS ONE},
number = {2},
pages = {1--9},
title = {{Do the rich get richer? An empirical analysis of the Bitcoin transaction network}},
volume = {9},
year = {2014}
}
@article{Garcia2014,
abstract = {What is the role of social interactions in the creation of price bubbles? Answering this question requires obtaining collective behavioural traces generated by the activity of a large number of actors. Digital currencies offer a unique possibility to measure socio-economic signals from such digital traces. Here, we focus on Bitcoin, the most popular cryptocurrency. Bitcoin has experienced periods of rapid increase in exchange rates (price) followed by sharp decline; we hypothesize that these fluctuations are largely driven by the interplay between different social phenomena. We thus quantify four socio-economic signals about Bitcoin from large datasets: price on online exchanges, volume of word-of-mouth communication in online social media, volume of information search and user base growth. By using vector autoregression, we identify two positive feedback loops that lead to price bubbles in the absence of exogenous stimuli: one driven by word of mouth, and the other by new Bitcoin adopters. We also observe that spikes in information search, presumably linked to external events, precede drastic price declines. Understanding the interplay between the socio-economic signals we measured can lead to applications beyond cryptocurrencies to other phenomena that leave digital footprints, such as online social network usage.},
author = {Garcia, David and Tessone, Claudio J. and Mavrodiev, Pavlin and Perony, Nicolas},
doi = {10.1098/rsif.2014.0623},
issn = {17425662},
journal = {Journal of the Royal Society Interface},
keywords = {Bitcoin,Bubbles,Social interactions,Socio-economic signals},
number = {99},
title = {{The digital traces of bubbles: Feedback cycles between socio-economic signals in the Bitcoin economy}},
volume = {11},
year = {2014}
}
@article{Pham2016a,
abstract = {The problem of anomaly detection has been studied for a long time, and many Network Analysis techniques have been proposed as solutions. Although some results appear to be quite promising, no method is clearly to be superior to the rest. In this paper, we particularly consider anomaly detection in the Bitcoin transaction network. Our goal is to detect which users and transactions are the most suspicious; in this case, anomalous behavior is a proxy for suspicious behavior. To this end, we use the laws of power degree and densification and local outlier factor (LOF) method (which is proceeded by k-means clustering method) on two graphs generated by the Bitcoin transaction network: one graph has users as nodes, and the other has transactions as nodes. We remark that the methods used here can be applied to any type of setting with an inherent graph structure, including, but not limited to, computer networks, telecommunications networks, auction networks, security networks, social networks, Web networks, or any financial networks. We use the Bitcoin transaction network in this paper due to the availability, size, and attractiveness of the data set.},
archivePrefix = {arXiv},
arxivId = {1611.03942},
author = {Pham, Thai and Lee, Steven},
eprint = {1611.03942},
keywords = {anomaly detection,bitcoin,k -means,lof},
title = {{Anomaly Detection in the Bitcoin System - A Network Perspective}},
url = {http://arxiv.org/abs/1611.03942},
year = {2016}
}
@article{Hassani2018,
abstract = {Cryptocurrency has been a trending topic over the past decade, pooling tremendous technological power and attracting investments valued over trillions of dollars on a global scale. The cryptocurrency technology and its network have been endowed with many superior features due to its unique architecture, which also determined its worldwide efficiency, applicability and data intensive characteristics. This paper introduces and summarises the interactions between two significant concepts in the digitalized world, i.e., cryptocurrency and Big Data. Both subjects are at the forefront of technological research, and this paper focuses on their convergence and comprehensively reviews the very recent applications and developments after 2016. Accordingly, we aim to present a systematic review of the interactions between Big Data and cryptocurrency and serve as the one stop reference directory for researchers with regard to identifying research gaps and directing future explorations.},
author = {Hassani, Hossein and Huang, Xu and Silva, Emmanuel},
doi = {10.3390/bdcc2040034},
journal = {Big Data and Cognitive Computing},
keywords = {big data,bitcoin,blockchain,cryptocurrency,review},
number = {4},
pages = {34},
title = {{Big-Crypto: Big Data, Blockchain and Cryptocurrency}},
volume = {2},
year = {2018}
}
@article{Sullivan2018,
author = {Sullivan, Danielle and Tran, Tuan and Gu, Huaping},
title = {{Fraud Detection in Signed Bitcoin Trading Platform Networks}},
year = {2018}
}
@article{Leskovec2014,
abstract = {The objective of this case study was to obtain some first-hand information about the functional consequences of a cosmetic tongue split operation for speech and tongue motility. One male patient who had performed the operation on himself was interviewed and underwent a tongue motility assessment, as well as an ultrasound examination. Tongue motility was mildly reduced as a result of tissue scarring. Speech was rated to be fully intelligible and highly acceptable by 4 raters, although 2 raters noticed slight distortions of the sibilants /s/ and /z/. The 3-dimensional ultrasound demonstrated that the synergy of the 2 sides of the tongue was preserved. A notably deep posterior genioglossus furrow indicated compensation for the reduced length of the tongue blade. It is concluded that the tongue split procedure did not significantly affect the participant's speech intelligibility and tongue motility.},
author = {Leskovec, Jure and Rajaraman, Anand and Ullman, Jeffrey David and Leskovec, Jure and Rajaraman, Anand and Ullman, Jeffrey David},
doi = {10.1017/cbo9781139924801.011},
journal = {Mining of Massive Datasets},
pages = {325--383},
title = {{Mining Social-Network Graphs}},
year = {2014}
}
@article{Lischke2016,
abstract = {In this explorative study, we examine the economy and transaction network of the decentralized digital currency Bitcoin during the first four years of its existence. The objective is to develop insights into the evolution of the Bitcoin economy during this period. For this, we establish and analyze a novel integrated dataset that enriches data from the Bitcoin blockchain with off-network data such as business categories and geo-locations. Our analyses reveal the major Bitcoin businesses and markets. Our results also give insights on the business distribution by countries and how businesses evolve over time. We also show that there is a gambling network that features many very small transactions. Furthermore, regional differences in the adoption and business distribution could be found. In the network analysis, the small world phenomenon is investigated and confirmed for several subgraphs of the Bitcoin network.},
author = {Lischke, Matthias and Fabian, Benjamin},
doi = {10.3390/fi8010007},
issn = {19995903},
journal = {Future Internet},
keywords = {Bitcoin,Blockchain,Complex networks,Cryptocurrencies,Electronic payment,Graph analysis,Network analysis},
number = {1},
title = {{Analyzing the bitcoin network: The First Four Years}},
volume = {8},
year = {2016}
}
@article{Hirshman2013,
author = {Hirshman, Jason and Huang, Yifei and Macke, Stephen},
journal = {3rd ed. Technical report Stanford University},
title = {{Unsupervised Approaches to Detecting Anomalous Behavior in the Bitcoin Transaction Network}},
year = {2013}
}
@article{Acar,
author = {Acar, Umut A and Blelloch, Guy E and Blume, Matthias},
isbn = {1595933204},
keywords = {computational geometry,dependence graphs,dynamic,dynamic algorithms,memoization,self-adjusting computation},
title = {{An Experimental Analysis of Self-Adjusting Computation}}
}
@article{Haslhofer2016,
abstract = {{\textcopyright} 2016 Copyright held by the author/owner(s).s). Bitcoin is a rising digital currency and exemplifies the growing need for systematically gathering and analyzing public transaction data sets such as the blockchain. However, the blockchain in its raw form is just a large ledger listing transfers of currency units between alphanumeric character strings, without revealing contextually relevant real-world information. In this demo, we present GraphSense, which is a solution that applies a graph-centric perspective on digital currency transactions. It allows users to explore transactions and follow the money ow, facilitates analytics by semantically enriching the transaction graph, supports path and graph pattern search, and guides analysts to anomalous data points. To deal with the growing volume and velocity of transaction data, we implemented our solution on a horizontally scalable data processing and analytics infrastructure. Given the ongoing digital transformation in financial services and technologies, we believe that our approach contributes to development of analytics solutions for digital currency ecosystems, which is relevant in fields such as financial analytics, law enforcement, or scientific research.},
author = {Haslhofer, Bernhard and Karl, Roman and Filtz, Erwin},
issn = {16130073},
journal = {CEUR Workshop Proceedings},
keywords = {Anomaly detection,Bitcoin,Graph processing},
pages = {1--4},
title = {{O Bitcoin where art thou? Insight into large-scale transaction graphs}},
volume = {1695},
year = {2016}
}
@article{Chen2012,
abstract = {Application data often changes slowly or incrementally over time. Since incremental changes to input often result in only small changes in output, it is often feasible to respond to such changes asymptotically more efficiently than by re-running the whole computation. Traditionally, realizing such asymptotic efficiency improvements requires designing problem-specific algorithms known as dynamic or incremental algorithms, which are often significantly more complicated than conventional algorithms to design, analyze, implement, and use. A long-standing open problem is to develop techniques that automatically transform conventional programs so that they correctly and efficiently respond to incremental changes. In this paper, we describe a significant step towards solving the problem of automatic incrementalization: a programming language and a compiler that can, given a few type annotations describing what can change over time, compile a conventional program that assumes its data to be static (unchanging over time) to an incremental program. Based on recent advances in self-adjusting computation, including a theoretical proposal for translating purely functional programs to self-adjusting programs, we develop techniques for translating conventional Standard ML programs to self-adjusting programs. By extending the Standard ML language, we design a fully featured programming language with higher-order features, a module system, and a powerful type system, and implement a compiler for this language. The resulting programming language, LML, enables translating conventional programs decorated with simple type annotations into incremental programs that can respond to changes in their data correctly and efficiently. We evaluate the effectiveness of our approach by considering a range of benchmarks involving lists, vectors, and matrices, as well as a ray tracer. For these benchmarks, our compiler incrementalizes existing code with only trivial amounts of annotation. The resulting programs are often asymptotically more efficient, leading to orders of magnitude speedups in practice.},
author = {Chen, Yan and Dunfield, Joshua and Acar, Umut A.},
doi = {10.1145/2345156.2254100},
isbn = {9781450312059},
issn = {15232867},
journal = {ACM SIGPLAN Notices},
keywords = {Compiler optimization,Incrementalization,Performance,Self-adjusting computation,Type annotations},
number = {6},
pages = {299--310},
title = {{Type-directed automatic incrementalization}},
volume = {47},
year = {2012}
}
@article{Ahmad2009,
abstract = {We present DBToaster, a novel query compilation framework for producing high performance compiled query executors that incrementally and continuously answer stand- ing aggregate queries using in-memory views. DBToaster targets applications that require efficient main-memory processing of standing queries (views) fed by high-volume data streams, recursively compiling view maintenance (VM) queries into simple C++ functions for evaluating database updates (deltas). While today's VM algorithms consider the impact of single deltas on view queries to produce main- tenance queries, we recursively consider deltas of maintenance queries and compile to thoroughly transform queries into code. Recursive compilation successively elides certain scans and joins, and eliminates significant query plan interpreter overheads. In this demonstration, we walk through our compilation algorithm, and show the significant performance advantages of our compiled executors over other query processors. We are able to demonstrate 1-3 orders of magnitude improve- ments in processing times for a financial application and a data warehouse loading application, both implemented across a wide range of database systems, including Post- greSQL, HSQLDB, a commercial DBMS 'A', the Stanford STREAM engine, and a commercial stream processor 'B'.},
author = {Ahmad, Yanif and Koch, Christoph},
doi = {10.14778/1687553.1687592},
isbn = {0000000000000},
issn = {21508097},
journal = {Proceedings of the VLDB Endowment},
number = {2},
pages = {1566--1569},
title = {{DBToaster: A SQL compiler for highperformance delta processing in main-memory databases}},
volume = {2},
year = {2009}
}
@article{Dewar1979,
author = {Dewar, Robert B.K. and Grand, Arthur and Liu, Ssu Cheng and Schwartz, Jacob T. and Schonberg, Edmond},
doi = {10.1145/357062.357064},
issn = {15584593},
journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)},
keywords = {automatic data structure choice,high level languages,optimization,set-theoretic languages,stepwise refinement},
number = {1},
pages = {27--49},
title = {{Programming by Refinement, as Exemplified by the SETL Representation Sublanguage}},
volume = {1},
year = {1979}
}
@article{Ramsey1930,
abstract = {A direct search on the CDC 6600 yielded 27 5 + 84 5 + HO 5 + 133 6-144 5 as the smallest instance in which four fifth powers sum to a fifth power. This is a counterexample to a conjecture by Euler [l] that at least n nth powers are required to sum to an nth power, n{\textgreater}2.},
author = {Ramsey, F. P.},
doi = {10.1112/plms/s2-30.1.264},
issn = {1460244X},
journal = {Proceedings of the London Mathematical Society},
number = {1},
pages = {264--286},
title = {{On a problem of formal logic}},
volume = {s2-30},
year = {1930}
}
@article{Thompson1984,
abstract = {To what extent should one trust a statement that a program is free of Trojan horses?  Perhaps it is more important to trust the people who wrote the software.},
author = {Thompson, Ken},
doi = {10.1145/358198.358210},
issn = {15577317},
journal = {Communications of the ACM},
number = {8},
pages = {761--763},
title = {{Reflections on Trusting Trust}},
volume = {27},
year = {1984}
}
@article{Koch2014,
abstract = {Applications ranging from algorithmic trading to scientific data analysis require realtime analytics based on views over databases that change at very high rates. Such views have to be kept fresh at low maintenance cost and latencies. At the same time, these views have to support classical SQL, rather than window semantics, to enable applications that combine current with aged or historical data. In this paper, we present viewlet transforms, a recursive finite differencing technique applied to queries. The viewlet transform materializes a query and a set of its higher-order deltas as views. These views support each other's incremental maintenance, leading to a reduced overall view maintenance cost. The viewlet transform of a query admits efficient evaluation, the elimination of certain expensive query operations, and aggressive parallelization. We develop viewlet transforms into a workable query execution technique, present a heuristic and cost-based optimization framework, and report on experiments with a prototype dynamic data management system that combines viewlet transforms with an optimizing compilation technique. The system supports tens of thousands of complete view refreshes a second for a wide range of queries.},
author = {Koch, Christoph and Ahmad, Yanif and Kennedy, Oliver and Nikolic, Milos and N{\"{o}}tzli, Andres and Lupei, Daniel and Shaikhha, Amir},
doi = {10.1007/s00778-013-0348-4},
issn = {0949877X},
journal = {VLDB Journal},
keywords = {Compilation,Database queries,Incremental view maintenance,Materialized views},
number = {2},
pages = {253--278},
title = {{DBToaster: Higher-order delta processing for dynamic, frequently fresh views}},
volume = {23},
year = {2014}
}
@article{Chen2018a,
abstract = {There is an increasing need to bring machine learning to a wide diversity of hardware devices. Current frameworks rely on vendor-specific operator libraries and optimize for a narrow range of server-class GPUs. Deploying workloads to new platforms -- such as mobile phones, embedded devices, and accelerators (e.g., FPGAs, ASICs) -- requires significant manual effort. We propose TVM, a compiler that exposes graph-level and operator-level optimizations to provide performance portability to deep learning workloads across diverse hardware back-ends. TVM solves optimization challenges specific to deep learning, such as high-level operator fusion, mapping to arbitrary hardware primitives, and memory latency hiding. It also automates optimization of low-level programs to hardware characteristics by employing a novel, learning-based cost modeling method for rapid exploration of code optimizations. Experimental results show that TVM delivers performance across hardware back-ends that are competitive with state-of-the-art, hand-tuned libraries for low-power CPU, mobile GPU, and server-class GPUs. We also demonstrate TVM's ability to target new accelerator back-ends, such as the FPGA-based generic deep learning accelerator. The system is open sourced and in production use inside several major companies.},
archivePrefix = {arXiv},
arxivId = {1802.04799},
author = {Chen, Tianqi and Moreau, Thierry and Jiang, Ziheng and Zheng, Lianmin and Yan, Eddie and Cowan, Meghan and Shen, Haichen and Wang, Leyuan and Hu, Yuwei and Ceze, Luis and Guestrin, Carlos and Krishnamurthy, Arvind},
eprint = {1802.04799},
title = {{TVM: An Automated End-to-End Optimizing Compiler for Deep Learning}},
url = {http://arxiv.org/abs/1802.04799},
year = {2018}
}
@article{Author,
author = {Author, Anonymous},
keywords = {data structures,incremental computation,pro-},
title = {{Incrementalization with Data Structures}}
}
@article{Loncaric2016,
author = {Loncaric, Calvin and Ernst, Michael D},
isbn = {9781450342612},
journal = {PLDI '16:Proceedings of the 37th ACM SIGPLAN Conference on Programming Language Design and Implementation},
keywords = {data structure synthesis},
title = {{Calvin Loncaric Emina Torlak Michael D. Ernst}},
year = {2016}
}
@article{Loncaric2016a,
abstract = {Unhandled exceptions crash programs, so a compile-time check that exceptions are handled should in principle make software more reliable. But designers of some recent lan- guages have argued that the benefits of statically checked ex- ceptions are not worth the costs. We introduce a new stati- cally checked exception mechanism that addresses the prob- lems with existing checked-exceptionmechanisms. In partic- ular, it interacts well with higher-order functions and other design patterns. The key insight is that whether an excep- tion should be treated as a “checked” exception is not a prop- erty of its type but rather of the context in which the excep- tion propagates. Statically checked exceptions can “tunnel” through code that is oblivious to their presence, but the type system nevertheless checks that these exceptions are han- dled. Further, exceptions can be tunneled without being acci- dentally caught, by expanding the space of exception identi- fiers to identify the exception-handling context. The resulting mechanism is expressive and syntactically light, and can be implemented efficiently. We demonstrate the expressiveness of the mechanism using significant codebases and evaluate its performance. We have implemented this new exception mechanism as part of the new Genus programming language, but the mechanism could equally well be applied to other pro- gramming languages.},
author = {Loncaric, Calvin and Torlak, Emina and Ernst, Michael D.},
doi = {10.1145/2908080.2908122},
isbn = {9781450342612},
journal = {Proceedings of the ACM SIGPLAN Conference on Programming Language Design and Implementation (PLDI)},
keywords = {Data structure synthesis},
pages = {355--368},
title = {{Fast synthesis of fast collections}},
volume = {13-17-June},
year = {2016}
}
@article{Liu2016,
abstract = {Object queries are significantly easier to write, understand, and maintain than efficient low-level programs. However, a query may involve any number and combination of objects and sets, which can be arbitrarily nested and aliased. The objects and sets involved, starting from the given demand---the given parameter values of interest---can change arbitrarily. How to generate efficient implementations automatically, and furthermore to provide complexity guarantees? This paper describes such an automatic method. The method allows the queries to be written completely declaratively. It transforms demand into relations, based on the same basic idea for transforming objects and sets into relations in a prior work. Most importantly, it defines and incrementally maintains invariants for not only the query results, but also all auxiliary values about the objects and sets involved, starting from the demand. Implementation and experiments with problems from a variety of application areas, including distributed algorithms, confirm the analyzed complexities, trade-offs, and significant improvements over prior works.},
author = {Liu, Yanhong A. and Brandvein, Jon and Stoller, Scott D. and Lin, Bo},
doi = {10.1145/2967973.2968610},
isbn = {9781450341486},
journal = {Proceedings of the 18th International Symposium on Principles and Practice of Declarative Programming, PPDP 2016},
keywords = {Complexity guarantees,Demand-driven incremental computation,Object queries,Program transformation},
pages = {228--241},
title = {{Demand-driven incremental object queries}},
year = {2016}
}
@article{Raychev2019,
abstract = {We present a new approach for predicting program properties from massive codebases (aka "Big Code"). Our approach first learns a probabilistic model from existing data and then uses this model to predict properties of new, unseen programs. The key idea of our work is to transform the input program into a representation which allows us to phrase the problem of inferring program properties as structured prediction in machine learning. This formulation enables us to leverage powerful probabilistic graphical models such as conditional random fields (CRFs) in order to perform joint prediction of program properties. As an example of our approach, we built a scalable prediction engine called JSNice for solving two kinds of problems in the context of JavaScript: predicting (syntactic) names of identifiers and predicting (semantic) type annotations of variables. Experimentally, JSNice predicts correct names for 63{\%} of name identifiers and its type annotation predictions are correct in 81{\%} of the cases. In the first week since its release, JSNice was used by more than 30,000 developers and in only few months has become a popular tool in the JavaScript developer community. By formulating the problem of inferring program properties as structured prediction and showing how to perform both learning and inference in this context, our work opens up new possibilities for attacking a wide range of difficult problems in the context of "Big Code" including invariant generation, decompilation, synthesis and others.},
author = {Raychev, Veselin and Vechev, Martin and Krause, Andreas},
doi = {10.1145/3306204},
isbn = {9781450333009},
issn = {15577317},
journal = {Communications of the ACM},
number = {3},
pages = {99--107},
title = {{Predicting program properties from 'big code'}},
volume = {62},
year = {2019}
}
@article{Decker2018,
abstract = {Modern machine learning frameworks have one common-ality: the primary interface, for better or worse, is Python. Python is widely appreciated for its low barrier of entry due to its high-level built-ins and use of dynamic typing. However , these same features are also often attributed to causing the significant performance gap between the front-end in which users are asked to develop, and the highly-optimized back-end kernels which are ultimately called (generally written in a lower-level language like C). This has led to frameworks like TensorFlow requiring programs which consist almost entirely of API calls, with the appearance of only coincidentally being implemented in Python, the language. All recent ML frameworks have recognized this gap between usability and performance as a problem and aim to bridge the gap in generally one of two ways. In the case of tools like PyTorch's JIT compiler, executed tensor operations can be recorded via tracing based on operator overloading. In the case of tools like PyTorch's Torch Script, Python functions can be marked for translation entirely to a low-level language. However, both tracing and wholesale translation in this fashion have significant downsides in the respective inability to capture data-dependent control flow and the missed opportunities for optimization via execution while a low-level IR is built up. In this paper, we demonstrate the ability to overcome these shortcomings by performing a relatively simple source-to-source transformation, that allows for operator overloading techniques to be extended to language built-ins, including control flow operators, function definitions, etc. We utilize a preexisting PLT Redex implementation of Python's core grammar in order to provide assurances that our transformations are semantics preserving with regard to standard Python. We then instantiate our overloading approach to generate code, which enables a form of multi-stage programming in Python. We capture the required transformations in a proof-of-concept, back-end agnostic, system dubbed Snek, and demonstrate their use in a production system released as part of TensorFlow, called AutoGraph. Finally, we provide an empirical evaluation of these systems and show performance benefits even with existing systems like TensorFlow, Torch Script, and Lantern as back-ends.},
author = {Decker, James M and Moldovan, Dan and Wei, Guannan and Bhardwaj, Vritant and Essertel, Gregory and Wang, Fei and Wiltschko, Alexander B and Rompf, Tiark and Brain, Google},
number = {November},
pages = {1--14},
title = {{The 800 Pound Python in the Machine Learning Room}},
volume = {1},
year = {2018}
}
@article{Li2015,
abstract = {Graph-structured data appears frequently in domains including chemistry, natural language semantics, social networks, and knowledge bases. In this work, we study feature learning techniques for graph-structured inputs. Our starting point is previous work on Graph Neural Networks (Scarselli et al., 2009), which we modify to use gated recurrent units and modern optimization techniques and then extend to output sequences. The result is a flexible and broadly useful class of neural network models that has favorable inductive biases relative to purely sequence-based models (e.g., LSTMs) when the problem is graph-structured. We demonstrate the capabilities on some simple AI (bAbI) and graph algorithm learning tasks. We then show it achieves state-of-the-art performance on a problem from program verification, in which subgraphs need to be matched to abstract data structures.},
archivePrefix = {arXiv},
arxivId = {1511.05493},
author = {Li, Yujia and Tarlow, Daniel and Brockschmidt, Marc and Zemel, Richard},
eprint = {1511.05493},
number = {1},
pages = {1--20},
title = {{Gated Graph Sequence Neural Networks}},
url = {http://arxiv.org/abs/1511.05493},
year = {2015}
}
@article{Frostig2018,
abstract = {We describe JAX, a domain-specific tracing JIT compiler for generating high-performance accelerator code from pure Python and Numpy machine learning programs. JAX uses the XLA compiler infrastructure to generate optimized code for the program subrou-tines that are most favorable for acceleration, and these optimized subroutines can be called and orchestrated by arbitrary Python. Because the system is fully compatible with Autograd, it allows forward-and reverse-mode automatic differentiation of Python functions to arbitrary order. Because JAX supports structured control flow, it can generate code for sophisticated machine learning algorithms while maintaining high performance. We show that by combining JAX with Autograd and Numpy we get an easily pro-grammable and highly performant ML system that targets CPUs, GPUs, and TPUs, capable of scaling to multi-core Cloud TPUs.},
archivePrefix = {arXiv},
arxivId = {arXiv:1603.04467},
author = {Frostig, Roy and Johnson, Matthew James and Leary, Chris},
doi = {10.1016/j.agrformet.2015.03.011},
eprint = {arXiv:1603.04467},
isbn = {0168-1923},
issn = {01681923},
journal = {SysML 2018},
keywords = {piling machine learning programs,via high-level tracing},
pmid = {16411492},
title = {{Compiling machine learning programs via high-level tracing}},
url = {http://www.sysml.cc/doc/146.pdf},
year = {2018}
}
@article{Si2018,
abstract = {A fundamental problem in program verification concerns inferring loop invariants. The problem is undecidable and even practical instances are challenging. Inspired by how human experts construct loop invariants, we propose a reasoning framework Code2Inv that constructs the solution by multi-step decision making and querying an external program graph memory block. By training with reinforcement learning, Code2Inv captures rich program features and avoids the need for ground truth solutions as supervision. Compared to previous learning tasks in domains with graph-structured data, it addresses unique challenges, such as a binary objective function and an extremely sparse reward that is given by an automated theorem prover only after the complete loop invariant is proposed. We evaluate Code2Inv on a suite of 133 benchmark problems and compare it to three state-of-the-art systems. It solves 106 problems compared to 73 by a stochastic search-based system, 77 by a heuristic search-based system, and 100 by a decision tree learning-based system. Moreover, the strategy learned can be generalized to new programs: compared to solving new instances from scratch, the pre-trained agent is more sample efficient in finding solutions.},
author = {Si, Xujie and Dai, Hanjun and Raghothaman, Mukund and Naik, Mayur and Song, Le},
issn = {10495258},
journal = {Advances in Neural Information Processing Systems},
number = {Nips},
pages = {7751--7762},
title = {{Learning loop invariants for program verification}},
volume = {2018-Decem},
year = {2018}
}
@article{Hammer2014,
author = {Hammer, Matthew A and Phang, Khoo Yit and Hicks, Michael and Foster, Jeffrey S},
pages = {1--17},
title = {{A DAPTON : Composable , Demand- Driven Incremental Computation ( Extended version )}},
year = {2014}
}
@article{Wang2018,
abstract = {Despite the recent successes of deep neural networks in various fields such as image and speech recognition, natural language processing, and reinforcement learning, we still face big challenges in bringing the power of numeric optimization to symbolic reasoning. Researchers have proposed different avenues such as neural machine translation for proof synthesis, vectorization of symbols and expressions for representing symbolic patterns, and coupling of neural back-ends for dimensionality reduction with symbolic front-ends for decision making. However, these initial explorations are still only point solutions, and bear other shortcomings such as lack of correctness guarantees. In this paper, we present our approach of casting symbolic reasoning as games, and directly harnessing the power of deep reinforcement learning in the style of Alpha(Go) Zero on symbolic problems. Using the Boolean Satisfiability (SAT) problem as showcase, we demonstrate the feasibility of our method, and the advantages of modularity, efficiency, and correctness guarantees.},
archivePrefix = {arXiv},
arxivId = {1802.05340},
author = {Wang, Fei and Rompf, Tiark},
eprint = {1802.05340},
pages = {1--4},
title = {{From Gameplay to Symbolic Reasoning: Learning SAT Solver Heuristics in the Style of Alpha(Go) Zero}},
url = {http://arxiv.org/abs/1802.05340},
year = {2018}
}
@article{Roesch2018,
abstract = {Machine learning powers diverse services in industry including search, translation, recommendation systems, and security. The scale and importance of these models require that they be efficient, expressive, and portable across an array of heterogeneous hardware devices. These constraints are often at odds; in order to better accommodate them we propose a new high-level intermediate representation (IR) called Relay. Relay is being designed as a purely-functional, statically-typed language with the goal of balancing efficient compilation, expressiveness, and portability. We discuss the goals of Relay and highlight its important design constraints. Our prototype is part of the open source NNVM compiler framework, which powers Amazon's deep learning framework MxNet.},
author = {Roesch, J. Jared and Lyubomirsky, S. Steven and Weber, L. Logan and Pollock, J. Josh and Kirisame, M. Marisa and Chen, T. Tianqi and Tatlock, Z. Zachary},
doi = {10.1145/3211346.3211348},
isbn = {9781450358347},
journal = {MAPL 2018 - Proceedings of the 2nd ACM SIGPLAN International Workshop on Machine Learning and Programming Languages, co-located with PLDI 2018},
keywords = {Compilers,Differentiable programming,Intermediate representation,Machine learning},
pages = {58--68},
title = {{Relay: A new IR for machine learning frameworks}},
year = {2018}
}
@article{Chen2018b,
abstract = {We introduce a learning-based framework to optimize tensor programs for deep learning workloads. Efficient implementations of tensor operators, such as matrix multiplication and high dimensional convolution, are key enablers of effective deep learning systems. However, existing systems rely on manually optimized libraries such as cuDNN where only a narrow range of server class GPUs are well-supported. The reliance on hardware-specific operator libraries limits the applicability of high-level graph optimizations and incurs significant engineering costs when deploying to new hardware targets. We use learning to remove this engineering burden. We learn domain-specific statistical cost models to guide the search of tensor operator implementations over billions of possible program variants. We further accelerate the search by effective model transfer across workloads. Experimental results show that our framework delivers performance competitive with state-of-the-art hand-tuned libraries for low-power CPU, mobile GPU, and server-class GPU.},
archivePrefix = {arXiv},
arxivId = {arXiv:1805.08166v2},
author = {Chen, Tianqi and Zheng, Lianmin and Yan, Eddie and Jiang, Ziheng and Moreau, Thierry and Ceze, Luis and Guestrin, Carlos and Krishnamurthy, Arvind},
eprint = {arXiv:1805.08166v2},
issn = {10495258},
journal = {Advances in Neural Information Processing Systems},
number = {Nips},
pages = {3389--3400},
title = {{Learning to optimize tensor programs}},
volume = {2018-Decem},
year = {2018}
}
@article{Bui2018,
abstract = {Translating a program written in one programming language to another can be useful for software development tasks that need functionality implementations in different languages. Although past studies have considered this problem, they may be either specific to the language grammars, or specific to certain kinds of code elements (e.g., tokens, phrases, API uses). This paper proposes a new approach to automatically learn cross-language representations for various kinds of structural code elements that may be used for program translation. Our key idea is two folded: First, we normalize and enrich code token streams with additional structural and semantic information, and train cross-language vector representations for the tokens (a.k.a. shared embeddings based on word2vec, a neural-network-based technique for producing word embeddings; Second, hierarchically from bottom up, we construct shared embeddings for code elements of higher levels of granularity (e.g., expressions, statements, methods) from the embeddings for their constituents, and then build mappings among code elements across languages based on similarities among embeddings. Our preliminary evaluations on about 40,000 Java and C{\#} source files from 9 software projects show that our approach can automatically learn shared embeddings for various code elements in different languages and identify their cross-language mappings with reasonable Mean Average Precision scores. When compared with an existing tool for mapping library API methods, our approach identifies many more mappings accurately. The mapping results and code can be accessed at https://github.com/bdqnghi/hierarchical-programming-language-mapping. We believe that our idea for learning cross-language vector representations with code structural information can be a useful step towards automated program translation.},
archivePrefix = {arXiv},
arxivId = {arXiv:1803.04715v1},
author = {Bui, Nghi D.Q. and Jiang, Lingxiao},
doi = {10.1145/3183399.3183427},
eprint = {arXiv:1803.04715v1},
isbn = {9781450356626},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
keywords = {language mapping,program translation,software maintenance,syntactic structure,word2vec},
number = {2},
pages = {33--36},
title = {{Hierarchical learning of cross-language mappings through distributed vector representations for code}},
year = {2018}
}
@article{Ernst2007,
author = {Ernst, Michael D},
isbn = {9781595937865},
keywords = {1,5,6,example error-revealing test,fails on sun 1,java,public static void test1,random testing},
pages = {6--7},
title = {{Randoop : Feedback-Directed Random Testing for Java}},
volume = {5},
year = {2007}
}
@article{EllDocuMENTe2014,
abstract = {Many researchers have proposed programming languages that sup-port incremental computation (IC), which allows programs to be efficiently re-executed after a small change to the input. However, existing implementations of such languages have two important drawbacks. First, recomputation is oblivious to specific demands on the program output; that is, if a program input changes, all de-pendencies will be recomputed, even if an observer no longer re-quires certain outputs. Second, programs are made incremental as a unit, with little or no support for reusing results outside of their original context, e.g., when reordered. To address these problems, we present $\lambda$ cdd ic , a core calculus that applies a demand-driven semantics to incremental computa-tion, tracking changes in a hierarchical fashion in a novel demanded computation graph. $\lambda$ cdd ic also formalizes an explicit separation be-tween inner, incremental computations and outer observers. This combination ensures $\lambda$ cdd ic programs only recompute computations as demanded by observers, and allows inner computations to be reused more liberally. We present ADAPTON, an OCaml library implementing $\lambda$ cdd ic . We evaluated ADAPTON on a range of bench-marks, and found that it provides reliable speedups, and in many cases dramatically outperforms state-of-the-art IC approaches.},
author = {{Ell Docu M E N Te}, W and To, Easy and {V A Lu Ate}, E and Di, Pl and Hammer, Matthew A and Phang, Khoo Yit and Hicks, Michael and Foster, Jeffrey S},
doi = {10.1145/2594291.2594324},
isbn = {9781450327848},
keywords = {D33 [Programming Languages],F32 [Logics and Meanings of Programs],Formal Definitions and Theory,Language Constructs and Features,call-by-push-value (CBPV),self-adjusting computation,thunks},
title = {{Con sis te n t * Comple te ADAPTON: Composable, Demand- Driven Incremental Computation}},
year = {2014}
}
@article{Rasthofer2014,
author = {Rasthofer, Siegfried and Arzt, Steven and Bodden, Eric},
isbn = {1891562355},
number = {February},
pages = {23--26},
title = {{A Machine-learning Approach for Classifying and Categorizing Android Sources and Sinks}},
year = {2014}
}
@article{Arzt,
author = {Arzt, Steven},
isbn = {9781450339001},
keywords = {framework model,library,model,static analysis,summary},
title = {{StubDroid : Automatic Inference of Precise Data-flow Summaries for the Android Framework Categories and Subject Descriptors}}
}
@article{Padhye,
author = {Padhye, Rohan},
isbn = {9781450322010},
keywords = {call graph,context-sensitive analysis,interprocedural analysis,points-to analysis},
title = {{Interprocedural Data Flow Analysis in Soot using Value Contexts}}
}
@article{Fratantonio,
author = {Fratantonio, Yanick and Bianchi, Antonio and Robertson, William and Kirda, Engin and Kruegel, Christopher and Vigna, Giovanni},
title = {{TriggerScope : Towards Detecting Logic Bombs in Android Applications}}
}
@article{Pacheco2005,
author = {Pacheco, Carlos and Ernst, Michael D},
pages = {504--527},
title = {{Eclat : Automatic Generation and Classification of Test Inputs}},
year = {2005}
}
@article{Saltzer1991,
author = {Saltzer, J H and Reed, D P and Clark, D D and Science, Computer},
isbn = {0890063370},
pages = {509--512},
title = {{END-TO-END ARGUMENTS IN SYSTEM DESIGN}},
year = {1991}
}
@article{Pacheco2017,
author = {Pacheco, Carlos and Lahiri, Shuvendu and Ernst, Michael D and Ball, Thomas},
title = {{Retrospective : Random Test Generation}},
year = {2017}
}
@article{Pacheco,
author = {Pacheco, Carlos and Lahiri, Shuvendu K and Ernst, Michael D and Ball, Thomas},
title = {{Feedback-directed Random Test Generation}}
}
@article{Ferrante1987,
author = {Ferrante, Jeanne and Ottenstein, Karl J and Warren, J O E D},
number = {3},
pages = {319--349},
title = {{The Program Dependence Graph and Its Use in Optimization}},
volume = {9},
year = {1987}
}
@article{Singh,
author = {Singh, Ranjeet and King, Andy},
title = {{Partial Evaluation for Java Malware Detection}}
}
@article{Maiorca,
author = {Maiorca, Davide and Ariu, Davide and Corona, Igino and Aresu, Marco and Giacinto, Giorgio},
title = {{Stealth Attacks : An Extended Insight into the Obfuscation Effects on Android Malware}}
}
@article{Aonzo2018,
author = {Aonzo, Simone and Merlo, Alessio and Tavella, Giulio},
isbn = {9781450356930},
keywords = {2018,acm reference format,alessio merlo,fratantonio,giulio tavella and yanick,instant apps,mobile security,password managers,phishing,simone aonzo},
title = {{Phishing Attacks on Modern Android}},
year = {2018}
}
@article{Dufour,
author = {Dufour, Bruno and Bodden, Eric and Hendren, Laurie and Lam, Patrick},
title = {{Analyzing Java Programs with Soot}}
}
@article{Bichsel2016,
author = {Bichsel, Benjamin and Vechev, Martin},
isbn = {9781450341394},
pages = {343--355},
title = {{Statistical Deobfuscation of Android Applications}},
year = {2016}
}
@article{Nielsen,
author = {Nielsen, Janus Dam},
pages = {1--47},
title = {{A Survivor ' s Guide to Java Program Analysis with Soot}}
}
@article{Analysis2017,
author = {Analysis, Static and Mobile, O F},
number = {February},
title = {{STATIC ANALYSIS OF MOBILE PROGRAMS}},
year = {2017}
}
@article{Fan2019a,
author = {Fan, Ming and Luo, Xiapu and Liu, Jun and Wang, Meng and Nong, Chunyin and Zheng, Qinghua and Liu, Ting},
doi = {10.1109/ICSE.2019.00085},
keywords = {-android malware,familial anal-,graph embedding,unsupervised learning,ysis},
title = {{Graph Embedding based Familial Analysis of Android Malware using Unsupervised Learning}},
year = {2019}
}
@article{Android-anwendungen2017,
author = {Android-anwendungen, Statische Datenflussanalyse},
title = {{Static Data Flow Analysis for Android Applications}},
year = {2017}
}
@article{Rasthofer2016,
author = {Rasthofer, Siegfried and Arzt, Steven and Miltenberger, Marc},
isbn = {189156241X},
number = {February},
pages = {21--24},
title = {{Harvesting Runtime Values in Android Applications That Feature Anti-Analysis Techniques}},
year = {2016}
}
@article{Shin2009,
author = {Shin, Kang G},
isbn = {9781605583525},
keywords = {graph similarity,malware indexing,multi-resolution indexing},
title = {{Large-Scale Malware Indexing Using Function-Call Graphs}},
year = {2009}
}
@article{Hammad,
author = {Hammad, Mahmoud and Garcia, Joshua and Malek, Sam},
isbn = {9781450356381},
title = {{A Large-Scale Empirical Study on the Effects of Code Obfuscations on Android Apps and Anti-Malware Products}}
}
@article{Barros,
author = {Barros, Paulo and Vines, Paul and Ernst, Michael D},
title = {{Static Analysis of Implicit Control Flow : Resolving Java Reflection and Android Intents}}
}
@article{Mariconti2017,
archivePrefix = {arXiv},
arxivId = {arXiv:1612.04433v3},
author = {Mariconti, Enrico and Onwuzurike, Lucky and Andriotis, Panagiotis and Cristofaro, Emiliano De and Ross, Gordon and Stringhini, Gianluca},
eprint = {arXiv:1612.04433v3},
number = {Ndss},
title = {{M A M A D ROID : Detecting Android Malware by Building Markov Chains of Behavioral Models ∗}},
year = {2017}
}
@article{Zhou,
author = {Zhou, Yajin},
keywords = {-android malware,smartphone security},
number = {4},
title = {{Dissecting Android Malware : Characterization and Evolution}}
}
@article{Yang,
author = {Yang, Chao and Xu, Zhaoyan and Gu, Guofei and Yegneswaran, Vinod and Porras, Phillip},
keywords = {android malware analysis and,detection,mobile security},
title = {{DroidMiner : Automated Mining and Characterization of Fine-grained Malicious Behaviors in Android Applications}}
}
@article{Arp,
author = {Arp, Daniel and Spreitzenbarth, Michael and Malte, H and Gascon, Hugo and Rieck, Konrad},
isbn = {1891562355},
title = {{D REBIN : Effective and Explainable Detection of Android Malware in Your Pocket}}
}
@article{Poeplau2014,
author = {Poeplau, Sebastian and Fratantonio, Yanick and Bianchi, Antonio and Kruegel, Christopher and Vigna, Giovanni},
isbn = {1891562355},
number = {February},
pages = {23--26},
title = {{Execute This ! Analyzing Unsafe and Malicious Dynamic Code Loading in Android Applications}},
year = {2014}
}
@article{Zhang,
author = {Zhang, Mu and Duan, Yue and Yin, Heng and Zhao, Zhiruo},
isbn = {9781450329576},
keywords = {all or part of,android,anomaly detection,graph similar-,ity,malware classification,or,or hard copies of,permission to make digital,semantics-aware,signature detection,this work for personal},
title = {{Semantics-Aware Android Malware Classification Using Weighted Contextual API Dependency Graphs Categories and Subject Descriptors}}
}
@article{Fan2018a,
author = {Fan, Ming and Liu, Jun and Luo, Xiapu and Chen, Kai and Tian, Zhenzhou and Zheng, Qinghua and Liu, Ting},
doi = {10.1109/TIFS.2018.2806891},
journal = {IEEE Transactions on Information Forensics and Security},
number = {8},
pages = {1890--1905},
publisher = {IEEE},
title = {{Android Malware Familial Classification and Representative Sample Selection via Frequent Subgraph Analysis}},
volume = {13},
year = {2018}
}
@article{Wang,
author = {Wang, Pei and Wang, Li and Wang, Shuai and Chen, Zhaofeng},
isbn = {9781450356381},
keywords = {empirical study,mobile app,obfuscation,reverse engineering},
title = {{Software Protection on the Go : A Large-Scale Empirical Study on Mobile App Obfuscation}}
}
@article{Arzta,
author = {Arzt, Steven and Rasthofer, Siegfried and Fritz, Christian and Bodden, Eric and Bartel, Alexandre and Klein, Jacques and Traon, Yves Le and Octeau, Damien and Mcdaniel, Patrick},
isbn = {9781450327848},
title = {{FlowDroid : Precise Context , Flow , Field , Object-sensitive and Lifecycle-aware Taint Analysis for Android Apps}}
}
@article{Zhu2016,
author = {Zhu, Ziyun and Dumitras, Tudor},
isbn = {9781450341394},
title = {{FeatureSmith : Automatically Engineering Features for Malware Detection by Mining the Security Literature}},
year = {2016}
}
@article{Bastani2017a,
author = {Bastani, Osbert and Martins, Ruben},
number = {March},
title = {{Automated Synthesis of Semantic Malware Signatures using Maximum Satisfiability}},
year = {2017}
}
@article{Aiken2014,
author = {Aiken, Alex},
isbn = {9781450330565},
keywords = {android,inter-component call graph,taint analysis},
title = {{Apposcopy : Semantics-Based Detection of Android Malware through Static Analysis ∗}},
year = {2014}
}
@article{Tripp,
author = {Tripp, Omer},
title = {{A Bayesian Approach to Privacy Enforcement in Smartphones}}
}
@article{It,
author = {It, M},
pages = {6--8},
title = {{Thrust II : Behavioral Deobfuscation}}
}
@article{Gascon,
author = {Gascon, Hugo and Yamaguchi, Fabian and Rieck, Konrad and Arp, Daniel},
isbn = {9781450324885},
keywords = {all or part of,classroom use is granted,copies are not made,graph kernels,machine learning,malware detection,or,or distributed,or hard copies of,permission to make digital,this work for personal,without fee provided that},
title = {{Structural Detection of Android Malware using Embedded Call Graphs Categories and Subject Descriptors}}
}
@article{Bodden2012,
author = {Bodden, Eric},
keywords = {flow-sensitive,inter-procedural static analysis},
title = {{Inter-procedural Data-flow Analysis with IFDS / IDE and Soot ∗}},
year = {2012}
}
@article{Monperrus2012,
author = {Monperrus, Martin},
isbn = {9781450314909},
keywords = {android,dalvik bytecode,jimple,soot,static},
title = {{Dexpler : Converting Android Dalvik Bytecode to Jimple for Static Analysis with Soot}},
year = {2012}
}
@article{Balachandran2018,
author = {Balachandran, Vivek and Tan, Darell J J and Thing, Vrizlynn L L},
doi = {10.1016/j.cose.2016.05.003},
issn = {0167-4048},
journal = {Computers {\&} Security},
number = {2016},
pages = {72--93},
publisher = {Elsevier Ltd},
title = {{Control flow obfuscation for Android applications}},
url = {http://dx.doi.org/10.1016/j.cose.2016.05.003},
volume = {61},
year = {2018}
}
@article{Meyerovich2013,
author = {Meyerovich, Leo A and Torok, Matthew E and Atkinson, Eric and Bod{\'{i}}k, Rastislav},
isbn = {9781450319225},
keywords = {attribute grammars,css,functional specification,layout,scheduling,sketching},
pages = {187--196},
title = {{Parallel Schedule Synthesis for Attribute Grammars}},
year = {2013}
}
@article{Si,
author = {Si, Xujie and Lee, Woosuk and Zhang, Richard},
isbn = {9781450355735},
keywords = {Syntax-guided synthesis, Datalog, Active learning,,acm reference format,active learning,datalog,mentation,program analysis,syntax-guided synthesis,template aug-},
pages = {515--527},
title = {{Syntax-Guided Synthesis of Datalog Programs}}
}
@book{Synthesis,
author = {Synthesis, Program},
isbn = {9781680832921},
title = {{Program Synthesis}}
}
@article{Mador-haim,
author = {Mador-haim, Sela and Martin, Milo M K and Deshmukh, Jyotirmoy V},
isbn = {9781450320146},
keywords = {cache coherence protocols,distributed protocol synthesis,program synthesis,programming by example},
pages = {1--10},
title = {{T RANSIT : Specifying Protocols with Concolic Snippets ∗}}
}
@article{Alur,
author = {Alur, Rajeev and Bodik, Rastislav and Dallal, Eric and Fisman, Dana},
keywords = {constraint solving,counterexamples,machine,program synthesis},
title = {{Syntax-Guided Synthesis}}
}
@article{Schulman2016,
archivePrefix = {arXiv},
arxivId = {arXiv:1506.02438v6},
author = {Schulman, John and Moritz, Philipp and Levine, Sergey and Jordan, Michael I and Abbeel, Pieter},
eprint = {arXiv:1506.02438v6},
pages = {1--14},
title = {{H -d c c u g a e}},
year = {2016}
}
@article{Yang2015a,
author = {Yang, Wei and Xiao, Xusheng and Andow, Benjamin and Li, Sihan and Xie, Tao and Enck, William},
doi = {10.1109/ICSE.2015.50},
isbn = {9781479919345},
journal = {2015 IEEE/ACM 37th IEEE International Conference on Software Engineering},
pages = {303--313},
publisher = {IEEE},
title = {{AppContext : Differentiating Malicious and Benign Mobile App Behaviors Using Context}},
volume = {1},
year = {2015}
}
@article{Spath2019,
author = {Sp{\"{a}}th, Johannes and Iem, Fraunhofer and Ali, Karim},
number = {January},
title = {{using Synchronized Pushdown Systems}},
volume = {3},
year = {2019}
}
@article{Kellogg,
author = {Kellogg, Martin and Maus, Everett},
title = {{Synthesizing Static Analyses from Examples}}
}
@article{Reps,
author = {Reps, Thomas and Horwitz, Susan and Sagiv, Mooly},
title = {{Precise Interprocedural Dataflow Analysis via Graph Reachability}}
}
@article{Pauck,
archivePrefix = {arXiv},
arxivId = {arXiv:1804.02903v1},
author = {Pauck, Felix and Bodden, Eric},
eprint = {arXiv:1804.02903v1},
keywords = {android taint analysis,benchmarks,empirical studies,re-,tools},
title = {{Do Android Taint Analysis Tools Keep their Promises ?}}
}
@article{Koeplinger,
author = {Koeplinger, David and Feldman, Matthew and Prabhakar, Raghu and Zhang, Yaqi and Kozyrakis, Christos and Olukotun, Kunle},
isbn = {9781450356985},
keywords = {acm reference format,architectures,cgras,compilers,domain-specific languages,fpgas,hard-,high-level synthesis,reconfigurable,ware accelerators},
title = {{Spatial : A Language and Compiler for Application Accelerators}}
}
@article{Beckett2018,
archivePrefix = {arXiv},
arxivId = {arXiv:1806.08744v1},
author = {Beckett, Ryan and Walker, David},
eprint = {arXiv:1806.08744v1},
title = {{Control Plane Compression Extended Version of the SIGCOMM 2018 Paper}},
year = {2018}
}
@article{Paper,
author = {Paper, Invited Talk and Gulwani, Sumit},
keywords = {belief,deductive synthesis,genetic programming,inductive synthesis,machine learning,ming by examples,probabilistic inference,program-,programming by demonstration,propagation,sat solving,smt solving},
title = {{Dimensions in Program Synthesis}}
}
@article{Sachdev,
author = {Sachdev, Saksham and Kim, Seohyun and Chandra, Satish},
isbn = {9781450358347},
keywords = {2018,a neu-,acm reference format,and satish chandra,code search,embedding,hongyu li,idf,koushik,retrieval on source code,saksham sachdev,sen,seohyun kim,sifei luan,tf,word},
title = {{Retrieval on Source Code : A Neural Code Search}}
}
@article{Hawkins,
author = {Hawkins, Peter and Aiken, Alex and Fisher, Kathleen and Rinard, Martin},
isbn = {9781450306638},
keywords = {composite data structures,synthesis},
title = {{Data Representation Synthesis ∗}}
}
@article{Neubig2018,
author = {Neubig, Graham},
title = {{Towards Open-domain Generation of Programs from Natural Language}},
year = {2018}
}
@article{Ernst,
author = {Ernst, Michael D},
keywords = {2017,4,4230,and phrases natural language,digital object identifier 10,lipics,processing,program analysis,snapl,software development},
number = {4},
pages = {1--4},
title = {{Natural language is a programming language : Applying natural language processing to software development}}
}
@article{Raychev2014,
author = {Raychev, Veselin and Vechev, Martin and Eth, Z and Yahav, Eran},
isbn = {9781450327848},
title = {{Code Completion with Statistical Language Models}},
year = {2014}
}
@article{Xu2017,
archivePrefix = {arXiv},
arxivId = {arXiv:1711.04436v1},
author = {Xu, Xiaojun and Liu, Chang and Song, Dawn},
eprint = {arXiv:1711.04436v1},
pages = {1--13},
title = {{SQLNet : G}},
year = {2017}
}
@article{Zhong1995,
archivePrefix = {arXiv},
arxivId = {arXiv:1709.00103v7},
author = {Zhong, Victor and Xiong, Caiming and Socher, Richard},
eprint = {arXiv:1709.00103v7},
pages = {1--12},
title = {{FROM N ATURAL L ANGUAGE USING R EINFORCEMENT}},
year = {1995}
}
@article{Wua,
archivePrefix = {arXiv},
arxivId = {arXiv:1809.01357v2},
author = {Wu, Mike and Mosse, Milan and Goodman, Noah and Piech, Chris},
eprint = {arXiv:1809.01357v2},
title = {{Zero Shot Learning for Code Education : Rubric Sampling with Deep Learning Inference}}
}
@article{Vasic2019,
author = {Vasic, Marko and Kanade, Aditya and Maniatis, Petros and Bieber, David and Singh, Rishabh and Brain, Google},
pages = {1--12},
title = {{N EURAL P ROGRAM R EPAIR TO L OCALIZE AND R EPAIR BY}},
year = {2019}
}
@article{Goffi,
author = {Goffi, Alberto and Universit{\`{a}}, U S I and Kuznetsov, Konstantin and Gorla, Alessandra and Ernst, Michael D},
isbn = {9781450356992},
keywords = {acm reference format,automatic test case generation,ing,natural language processing,software test-,specification inference,test oracle generation},
pages = {242--253},
title = {{Translating Code Comments to Procedure Specifications}}
}
@article{Iyer,
archivePrefix = {arXiv},
arxivId = {arXiv:1808.09588v1},
author = {Iyer, Srinivasan and Konstas, Ioannis and Cheung, Alvin and Zettlemoyer, Luke},
eprint = {arXiv:1808.09588v1},
title = {{Mapping Language to Code in Programmatic Context}}
}
@article{Gu,
author = {Gu, Xiaodong and Zhang, Hongyu and Kim, Sunghun},
isbn = {9781450356381},
keywords = {2018,3,acm reference format,and sunghun kim 1,code search,deep code,deep learning,hongyu zhang 2,joint embedding,xiaodong gu 1},
title = {{Deep Code Search 1}}
}
@article{Balog2017,
author = {Balog, Matej and Gaunt, Alexander L and Brockschmidt, Marc and Nowozin, Sebastian and Tarlow, Daniel},
title = {{D c : l w p}},
year = {2017}
}
@article{Dong2016,
author = {Dong, Li and Lapata, Mirella},
pages = {33--43},
title = {{Language to Logical Form with Neural Attention}},
year = {2016}
}
@article{Lee2018,
author = {Lee, Mina and Kim, Sonia},
title = {{Neural Contextual Code Search}},
year = {2018}
}
@article{Li2015a,
author = {Li, Jian and Wang, Yue and Lyu, Michael R and King, Irwin},
keywords = {Machine Learning: Deep Learning,Multidisciplinary Topics and Applications: Knowled,Natural Language Processing: NLP Applications and},
pages = {4159--4165},
title = {{Code Completion with Neural Attention and Pointer Networks}},
year = {2015}
}
@article{Kenton2017,
archivePrefix = {arXiv},
arxivId = {arXiv:1810.04805v1},
author = {Kenton, Ming-wei Chang and Kristina, Lee and Devlin, Jacob},
eprint = {arXiv:1810.04805v1},
title = {{BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding}},
year = {2017}
}
@article{Vinyals,
author = {Vinyals, Oriol and Koo, Terry and Hinton, Geoffrey},
pages = {1--9},
title = {{Grammar as a Foreign Language}}
}
@article{Allamanis2013,
author = {Allamanis, Miltiadis and Sutton, Charles},
doi = {10.1109/MSR.2013.6624004},
isbn = {9781467329361},
journal = {2013 10th Working Conference on Mining Software Repositories (MSR)},
number = {Table I},
pages = {53--56},
publisher = {IEEE},
title = {{Why , When , and What : Analyzing Stack Overflow Questions by Topic , Type , and Code}},
year = {2013}
}
@article{Polosukhin2018,
archivePrefix = {arXiv},
arxivId = {arXiv:1802.04335v1},
author = {Polosukhin, Illia and Skidanov, Alex},
eprint = {arXiv:1802.04335v1},
pages = {1--11},
title = {{N eural P rogram S earch : S olving P rogramming T asks from D escription and E xamples}},
year = {2018}
}
@article{Murali2018,
archivePrefix = {arXiv},
arxivId = {arXiv:1703.05698v5},
author = {Murali, Vijayaraghavan and Qi, Letao and Chaudhuri, Swarat and Jermaine, Chris},
eprint = {arXiv:1703.05698v5},
number = {2017},
pages = {1--17},
title = {{N EURAL S KETCH L EARNING FOR C ONDITIONAL}},
year = {2018}
}
@misc{,
title = {codenn.pdf}
}
@article{Bielik,
author = {Bielik, Pavol and Raychev, Veselin and Vechev, Martin},
keywords = {and phrases probabilistic tools,open-source software,probabilistic inference and learning,program analysis},
pages = {1--10},
title = {{Programming with “ Big Code ”: Lessons , Techniques and Applications}}
}
@article{Allamanis,
archivePrefix = {arXiv},
arxivId = {arXiv:1709.06182v2},
author = {Allamanis, Miltiadis and Barr, Earl T and Devanbu, Premkumar and Sutton, Charles},
eprint = {arXiv:1709.06182v2},
number = {1414172},
pages = {1--36},
title = {{A Survey of Machine Learning for Big Code and Naturalness}}
}
@article{Schkufza2013,
author = {Schkufza, Eric and Aiken, Alex},
isbn = {9781450318709},
keywords = {64-bit,binary,carlo,ecx,edx,markov chain monte,mcmc,r8,rdi,rsi,smt,stochastic search,superoptimization,x86,x86-64},
title = {{Stochastic Superoptimization}},
year = {2013}
}
@article{Kanvar2015,
archivePrefix = {arXiv},
arxivId = {arXiv:1403.4910v5},
author = {Kanvar, Vini and Khedker, Uday P},
eprint = {arXiv:1403.4910v5},
title = {{Heap Abstractions for Static Analysis}},
year = {2015}
}
@article{Gottschlich,
archivePrefix = {arXiv},
arxivId = {arXiv:1803.07244v2},
author = {Gottschlich, Justin and Solar-lezama, Armando and Carbin, Michael and Rinard, Martin and Barzilay, Regina and Tenenbaum, Joshua B and Mattson, Tim},
eprint = {arXiv:1803.07244v2},
keywords = {intention,inven-,machine programming,program synthesis,soft-,software maintenance,ware development},
title = {{The Three Pillars of Machine Programming}}
}
@article{Vilk2018,
author = {Vilk, John and Berger, Emery D},
title = {{: Automatically Debugging Memory Leaks in Web Applications}},
year = {2018}
}
@article{Beschastnikh2016,
author = {Beschastnikh, Ivan},
number = {april},
pages = {1--20},
title = {{Debugging Distributed Systems}},
year = {2016}
}
@article{Harman,
author = {Harman, Mark and Hearn, Peter O},
pages = {1--23},
title = {{From Start-ups to Scale-ups : Opportunities and Open Problems for Static and Dynamic Program Analysis}}
}
@article{Huang,
author = {Huang, Waylon},
title = {{Evaluating the Effectiveness of Components of Guided Random Testing}}
}
@article{Gua,
archivePrefix = {arXiv},
arxivId = {arXiv:1605.08535v3},
author = {Gu, Xiaodong and Zhang, Hongyu and Zhang, Dongmei and Kim, Sunghun},
doi = {10.1145/1235},
eprint = {arXiv:1605.08535v3},
isbn = {9781450321389},
keywords = {api,api usage,code search,deep learning,rnn},
title = {{Deep API Learning}}
}
@article{Devlin2017,
archivePrefix = {arXiv},
arxivId = {arXiv:1710.04157v1},
author = {Devlin, Jacob and Hausknecht, Matthew},
eprint = {arXiv:1710.04157v1},
number = {Nips},
title = {{Neural Program Meta-Induction}},
year = {2017}
}
@article{Zavershynskyi2018,
archivePrefix = {arXiv},
arxivId = {arXiv:1807.03168v1},
author = {Zavershynskyi, Maksym and Skidanov, Alex and Polosukhin, Illia},
eprint = {arXiv:1807.03168v1},
title = {{NAPS: Natural Program Synthesis Dataset}},
year = {2018}
}
@article{Simmons-edler,
archivePrefix = {arXiv},
arxivId = {arXiv:1806.02932v1},
author = {Simmons-edler, Riley and Miltner, Anders and Seung, Sebastian},
eprint = {arXiv:1806.02932v1},
title = {{Guided Tree Search}}
}
@article{Martins,
author = {Martins, Ruben and Bastani, Osbert},
isbn = {9781450356985},
keywords = {au-,conflict-driven learning,program synthesis},
title = {{Program Synthesis using Conflict-Driven Learning}}
}
@article{Khademi2017,
author = {Khademi, Mahmoud and Brockschmidt, Marc},
pages = {1--16},
title = {{L r p g}},
year = {2017}
}
@article{Bodik2015,
author = {Bodik, Rastislav},
isbn = {9781450336697},
pages = {2789052},
title = {{Program Synthesis : Opportunities for the Next Decade}},
year = {2015}
}
@article{Nelson,
author = {Nelson, Greg and Ave, Lytton and Alto, Palo and Systems, C Computer and General, Organization},
isbn = {1581134630},
keywords = {optimizing compiler,superoptimizer},
title = {{Denali : A Goal-directed Superoptimizer}}
}
@article{Noble2016,
author = {Noble, James and Black, Andrew P and Bruce, Kim B and Homer, Michael and Miller, Mark S},
isbn = {9781450340762},
keywords = {abstraction,equality,identity,object-orientation},
pages = {224--237},
title = {{The Left Hand of Equals}},
year = {2016}
}
@article{Wrenn2018,
author = {Wrenn, John and Fisler, Kathi},
isbn = {9781450356282},
title = {{Who Tests the Testers ? ∗ Avoiding the Perils of Automated Testing}},
year = {2018}
}
@article{Detlefs2005,
author = {Detlefs, David and Nelson, Greg and Saxe, James B},
number = {3},
pages = {365--473},
title = {{Simplify : A Theorem Prover for Program Checking}},
volume = {52},
year = {2005}
}
@article{Becker2017,
archivePrefix = {arXiv},
arxivId = {arXiv:1709.05703v1},
author = {Becker, Kory and Gottschlich, Justin},
eprint = {arXiv:1709.05703v1},
keywords = {artificial intelli-,code gen-,eration and optimization,evolutionary computation,gence,genetic,genetic algorithm,machine learning,program synthesis,programming,programming languages},
title = {{AI Programmer : Autonomously Creating Software Programs Using Genetic Algorithms}},
year = {2017}
}
@article{Henderson2012,
author = {Henderson, Keith and Gallagher, Brian and Eliassi-rad, Tina and Berkeley, U C},
isbn = {9781450314626},
keywords = {graph mining,network classifica-,sense-making,similarity search,structural role discovery,tion},
title = {{RolX : Structural Role Extraction {\&} Mining in Large Graphs}},
year = {2012}
}
@article{Chaudhuri2017,
author = {Chaudhuri, Swarat},
title = {{Deep Learning for Program Synthesis}},
year = {2017}
}
@article{Alon,
archivePrefix = {arXiv},
arxivId = {arXiv:1803.09473v5},
author = {Alon, U R I and Zilberstein, Meital and Yahav, Eran},
eprint = {arXiv:1803.09473v5},
title = {{code2vec : Learning Distributed Representations of Code}}
}
@misc{,
title = {vfix-icse2019.pdf}
}
@misc{,
title = {dps-fhpc17.pdf}
}
@article{Murray2018,
abstract = {—Given recent high-profile successes in formal ver-ification of security-related properties (e.g., for seL4), and the rising popularity of applying formal methods to cryptographic libraries and security protocols like TLS, we revisit the meaning of security-related proofs about software. We re-examine old issues, and identify new questions that have escaped scrutiny in the formal methods literature. We consider what value proofs about software systems deliver to end-users (e.g., in terms of net assurance benefits), and at what cost in terms of side effects (such as changes made to software to facilitate the proofs, and assumption-related deployment restrictions imposed on software if these proofs are to remain valid in operation). We consider in detail, for the first time to our knowledge, possible relationships between proofs and side effects. To make our discussion concrete, we draw on tangible examples, experience, and the literature.},
author = {Murray, Toby and {Van Oorschot}, Paul},
doi = {10.1109/SecDev.2018.00009},
isbn = {9781538676622},
journal = {Proceedings - 2018 IEEE Cybersecurity Development Conference, SecDev 2018},
keywords = {Computer security,Formal verification,Software engineering},
number = {June},
pages = {1--10},
title = {{BP: Formal proofs, the fine print and side effects}},
year = {2018}
}
@article{Chajed2018,
abstract = {Writing concurrent systems software is error-prone, because multiple processes or threads can interleave in many ways, and it is easy to forget about a subtle corner case. This paper introduces CSPEC, a framework for formal verification of concurrent software, which ensures that no corner cases are missed. The key challenge is to reduce the number of interleavings that developers must consider. CSPEC uses mover types to reorder commuta-tive operations so that usually it's enough to reason about only sequential executions rather than all possible inter-leavings. CSPEC also makes proofs easier by making them modular using layers, and by providing a library of reusable proof patterns. To evaluate CSPEC, we implemented and proved the correctness of CMAIL, a simple concurrent Maildir-like mail server that speaks SMTP and POP3. The results demonstrate that CSPEC's movers and patterns allow reasoning about sophisticated concurrency styles in CMAIL.},
author = {Chajed, Tej and Kaashoek, Frans and Lampson, Butler and Zeldovich, Nickolai},
journal = {Osdi},
pages = {306--322},
title = {{Verifying concurrent software using movers in {\{}CSPEC{\}}}},
url = {https://www.usenix.org/conference/osdi18/presentation/chajed},
year = {2018}
}
@article{Mokhov2018,
author = {Mokhov, Andrey and Mitchell, Neil and Jones, Simon Peyton},
keywords = {build systems, functional programming, algorithms},
number = {September},
title = {{Build Systems {\`{a}} la Carte}},
volume = {2},
year = {2018}
}
@article{Polikarpova2018,
archivePrefix = {arXiv},
arxivId = {arXiv:1807.07022v2},
author = {Polikarpova, Nadia and Diego, San},
eprint = {arXiv:1807.07022v2},
number = {1},
title = {{arXiv : 1807 . 07022v2 [ cs . PL ] 9 Nov 2018}},
volume = {1},
year = {2018}
}
@article{HUET1997,
abstract = {Almost every programmer has faced the problem of representing a tree together with a subtree that is the focus of attention, where that focus may move left, right, up or down the tree. The Zipper is Huet's nifty name for a nifty data structure which fulfills this need. I wish I had known of it when I faced this task, because the solution I came up with was not quite so efficient or elegant as the Zipper.},
author = {HUET, G{\'{E}}RARD},
doi = {10.1017/S0956796897002864},
issn = {09567968},
journal = {Journal of Functional Programming},
number = {5},
pages = {S0956796897002864},
title = {{The Zipper}},
url = {http://www.journals.cambridge.org/abstract{\_}S0956796897002864},
volume = {7},
year = {1997}
}
@article{Felleisen1989,
abstract = {The assignment statement is a ubiquitous building block of programming languages. In functionally oriented programming languages, the assignment is the facility for modeling and expressing state changes. Given that functional languages are directly associated with the equational $\lambda$-calculus-theory, it is natural to wonder whether this syntactic proof system is extensible to imperative variants of functional languages including state variables and side-effects. In this paper, we show that such an extension exists, and that it satisfies variants of the conventional consistency and standardization theorems. With a series of examples, we also demonstrate the system's capabilities for reasoning about imperative-functional programs and illustrate some of its advantages over alternative models. {\textcopyright} 1989.},
author = {Felleisen, Matthias and Friedman, Daniel P.},
doi = {10.1016/0304-3975(89)90069-8},
issn = {03043975},
journal = {Theoretical Computer Science},
number = {3},
pages = {243--287},
title = {{A syntactic theory of sequential state}},
volume = {69},
year = {1989}
}
@article{Krebbers2014,
author = {Krebbers, Robbert and Wiedijk, Freek},
doi = {10.1145/2676724.2693571},
isbn = {9781450332965},
keywords = {caused by it,coq,executable,incomplete,interactive theorem proving,iso c11 standard,light weight methods like,operational semantics,semantics,static analysis,such approaches range from,to systems where a,which is by nature},
pages = {1--12},
title = {{A typed C11 semantics for interactive theorem proving}},
year = {2014}
}
@article{Chlipala2013,
author = {Chlipala, Adam},
doi = {10.1145/2500365.2500592},
isbn = {9781450323260},
journal = {{\ldots} international conference on Functional programming},
keywords = {functional programming,generative metaprogramming,interactive proof assis-,low-level programming languages,tants},
pages = {391--402},
title = {{The Bedrock structured programming system: Combining generative metaprogramming and Hoare logic in an extensible program verifier}},
url = {http://dl.acm.org/citation.cfm?id=2500592},
year = {2013}
}
@article{Nordio2009,
abstract = {Object-oriented languages provide advantages such as reuse and modularity, but they also raise new challenges for program verification. Program logics have been developed for languages such as C{\#} and Java. However, these logics do not cover the specifics of the Eiffel language. This paper presents a program logic for Eiffel that handles exceptions, once routines, and multiple inheritance. The logic is proven sound and complete w.r.t. an operational semantics. Lessons on language design learned from the experience are discussed. ? 2009 Springer Berlin Heidelberg.},
author = {Nordio, Martin and Calcagno, Cristiano and M{\"{u}}ller, Peter and Meyer, Bertrand},
doi = {10.1007/978-3-642-02571-6_12},
isbn = {9783642025709},
issn = {18651348},
journal = {Lecture Notes in Business Information Processing},
keywords = {Eiffel,Operational semantics,Program proofs,Software verification},
pages = {195--214},
title = {{A sound and complete program logic for eiffel}},
volume = {33 LNBIP},
year = {2009}
}
@article{Feng2009,
abstract = {Hardware interrupts are widely used in the world's critical software systems to support preemptive threads, device drivers, operating system kernels, and hypervisors. Handling interrupts properly is an essential component of low-level system programming. Unfortunately, interrupts are also extremely hard to reason about: they dramatically alter the program control flow and complicate the invariants in low-level concurrent code (e.g., implementation of synchronization primitives). Existing formal verification techniques—including Hoare logic, typed assembly language, concurrent separation logic, and the assume-guarantee method—have consistently ignored the issues of interrupts; this severely limits the applicability and power of today's program verification systems. In this paper we present a novel Hoare-logic-like framework for certifying low-level system programs involving both hardware interrupts and preemptive threads. We show that enabling and disabling interrupts can be formalized precisely using simple ownership-transfer semantics, and the same technique also extends to the concurrent setting. By carefully reasoning about the interaction among interrupt handlers, context switching, and synchronization libraries, we are able to—for the first time—successfully certify a preemptive thread implementation and a large number of common synchronization primitives. Our work provides a foundation for reasoning about interrupt-based kernel programs and makes an important advance toward building fully certified operating system kernels and hypervisors.},
author = {Feng, Xinyu and Shao, Zhong and Guo, Yu and Dong, Yuan},
doi = {10.1007/s10817-009-9118-9},
isbn = {978-1-59593-860-2},
issn = {01687433},
journal = {Journal of Automated Reasoning},
keywords = {Hardware interrupts,Modularity,Operating system verification,Preemptive threads,Separation logic,Synchronization primitives,Thread libraries},
number = {2-4},
pages = {301--347},
title = {{Certifying low-level programs with hardware interrupts and preemptive threads}},
volume = {42},
year = {2009}
}
@article{Appel2011,
abstract = {The software toolchain includes static analyzers to check assertions$\backslash$nabout programs; optimizing compilers to translate programs to machine$\backslash$nlanguage; operating systems and libraries to supply context for$\backslash$nprograms. Our Verified Software Too/chain verifies with machine-checked$\backslash$nproofs that the assertions claimed at the top of the toolchain really$\backslash$nhold in the machine-language program, running in the operating-system$\backslash$ncontext, on a weakly-consistent-shared-memory machine.$\backslash$nOur verification approach is modular, in that proofs about operating$\backslash$nsystems or concurrency libraries are oblivious of the programming$\backslash$nlanguage or machine language, proofs about compilers are oblivious of$\backslash$nthe program logic used to verify static analyzers, and so on. The$\backslash$napproach is scalable, in that each component is verified in the semantic$\backslash$nidiom most natural for that component.$\backslash$nFinally, the verification is foundational: the trusted base for proofs$\backslash$nof observable properties of the machine-language program includes only$\backslash$nthe operational semantics of the machine language, not the source$\backslash$nlanguage, the compiler, the program logic, or any other part of the$\backslash$ntoolchain even when these proofs are carried out by source-level static$\backslash$nanalyzers.$\backslash$nIn this paper I explain some semantic techniques for building a verified$\backslash$ntoolchain.},
author = {Appel, Andrew W.},
doi = {10.1007/978-3-642-19718-5_1},
isbn = {9783642197178},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
number = {March},
pages = {1--17},
title = {{Verified software toolchain (Invited talk)}},
volume = {6602 LNCS},
year = {2011}
}
@article{Liang2016a,
author = {Liang, Hongjin and Feng, Xinyu and Shao, Zhong},
title = {{Compositional Verification of Termination-Preserving Refinement of Concurrent Programs ( Technical Report )}},
year = {2016}
}
@article{Leroy2009,
abstract = {This article describes the development and formal verification (proof of semantic preservation) of a compiler back-end from Cminor (a simple imperative intermediate language) to PowerPC assembly code, using the Coq proof assistant both for programming the compiler and for proving its soundness. Such a verified compiler is useful in the context of formal methods applied to the certification of critical software: the verification of the compiler guarantees that the safety properties proved on the source code hold for the executable compiled code as well.},
archivePrefix = {arXiv},
arxivId = {0902.2137},
author = {Leroy, Xavier},
doi = {10.1007/s10817-009-9155-4},
eprint = {0902.2137},
issn = {01687433},
journal = {Journal of Automated Reasoning},
keywords = {Compiler transformations and optimizations,Compiler verification,Formal methods,Program proof,Semantic preservation,The Coq theorem prover},
number = {4},
pages = {363--446},
title = {{A formally verified compiler back-end}},
volume = {43},
year = {2009}
}
@article{Liang2014,
author = {Liang, H and Feng, X and Shao, Z},
doi = {10.1145/2603088.2603123},
isbn = {9781450328869},
journal = {Proceedings of the Joint Meeting of the 23rd EACSL Annual Conference on Computer Science Logic, CSL 2014 and the 29th Annual ACM/IEEE Symposium on Logic in Computer Science, LICS 2014},
keywords = {concurrency,refinement,rely-guarantee reasoning,simulation,termination preservation},
title = {{Compositional verification of termination-preserving refinement of concurrent programs}},
url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84905990125{\&}partnerID=40{\&}md5=019721a74ee1260c1b637bffecedb181},
year = {2014}
}
@article{Cohen2009,
abstract = {Abstract. VCC is an industrial-strength verification environment for low-level concurrent system code written in C. VCC takes a program (annotated with function contracts, state assertions, and type invariants) and attempts to prove the correctness of these annotations. It includes tools for monitoring proof attempts and constructing partial counterex- ample executions for failed proofs. This paper motivates VCC, describes our verification methodology, describes the architecture of VCC, and reports on our experience using VCC to verify the Microsoft Hyper-V hypervisor.5},
author = {Cohen, Ernie and Dahlweid, Markus and Hillebrand, Mark and Leinenbach, Dirk and Moskal, Micha{\l} and Santen, Thomas and Schulte, Wolfram and Tobies, Stephan},
doi = {10.1007/978-3-642-03359-9_2},
isbn = {364203358X},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {23--42},
title = {{VCC: A practical system for verifying concurrent C}},
volume = {5674 LNCS},
year = {2009}
}
@article{Chaki2004,
abstract = {There has been considerable progress in the domain of software verification$\backslash$nover the last few years. This advancement has been driven, to a large$\backslash$nextent, by the emergence of powerful yet automated abstraction techniques$\backslash$nsuch as predicate abstraction. However, the state-space explosion$\backslash$nproblem in model checking remains the chief obstacle to the practical$\backslash$nverification of real-world distributed systems. Even in the case$\backslash$nof purely sequential programs, a crucial requirement to make predicate$\backslash$nabstraction effective is to use as few predicates as possible. This$\backslash$nis because, in the worst case, the state-space of the abstraction$\backslash$ngenerated (and consequently the time and memory complexity of the$\backslash$nabstraction process) is exponential in the number of predicates involved.$\backslash$nIn addition, for concurrent programs, the number of reachable states$\backslash$ncould grow exponentially with the number of components.},
author = {Chaki, S. and Clarke, E. and Groce, A. and Ouaknine, J. and Strichman, O. and Yorav, K.},
doi = {10.1023/B:FORM.0000040026.56959.91},
issn = {09259856},
journal = {Formal Methods in System Design},
keywords = {Abstraction refinement,Concurrency,Predicate abstraction,Process algebra,Software verification},
number = {2-3},
pages = {129--166},
title = {{Efficient verification of sequential and concurrent C Programs}},
volume = {25},
year = {2004}
}
@book{Symposium2015,
author = {Symposium, Asian and Hutchison, David},
isbn = {9783319265285},
title = {{Languages}},
year = {2015}
}
@article{Siskind2016,
author = {Siskind, Mark},
number = {April},
title = {{Efficient Implementation of a Higher-Order Language with Built-In AD}},
year = {2016}
}
@article{Felleisen1992,
abstract = {The syntactic theories of control and state are conservative extensions of the $\lambda$$\upsilon$-calculus for equational reasoning about imperative programming facilities in higher-order languages. Unlike the simple $\lambda$$\upsilon$-calculus, the extended theories are mixtures of equivalence relations and compatible congruence relations on the term language, which significantly complicates the reasoning process. In this paper we develop fully compatible equational theories of the same imperative higher-order programming languages. The new theories subsume the original calculi of control and state and satisfy the usual Church-Rosser and Standardization Theorems. With the new calculi, equational reasoning about imperative programs becomes as simple as reasoning about functional programs. {\textcopyright} 1992.},
author = {Felleisen, Matthias and Hieb, Robert},
doi = {10.1016/0304-3975(92)90014-7},
issn = {03043975},
journal = {Theoretical Computer Science},
number = {2},
pages = {235--271},
title = {{The revised report on the syntactic theories of sequential control and state}},
volume = {103},
year = {1992}
}
@article{,
abstract = {Fault-tolerant distributed algorithms play an important role in many critical/high-availability applications. These algorithms are notori-ously difficult to implement correctly, due to asynchronous com-munication and the occurrence of faults, such as the network drop-ping messages or computers crashing. We introduce PSYNC, a domain specific language based on the Heard-Of model, which views asynchronous faulty systems as syn-chronous ones with an adversarial environment that simulates asyn-chrony and faults by dropping messages. We define a runtime sys-tem for PSYNC that efficiently executes on asynchronous networks. We formalize the relation between the runtime system and PSYNC in terms of observational refinement. The high-level lockstep ab-straction introduced by PSYNC simplifies the design and imple-mentation of fault-tolerant distributed algorithms and enables auto-mated formal verification. We have implemented an embedding of PSYNC in the SCALA programming language with a runtime system for asynchronous networks. We show the applicability of PSYNC by implementing several important fault-tolerant distributed algorithms and we com-pare the implementation of consensus algorithms in PSYNC against implementations in other languages in terms of code size, runtime efficiency, and verification.},
archivePrefix = {arXiv},
arxivId = {arXiv:1603.09436},
doi = {10.1145/nnnnnnn.nnnnnnn},
eprint = {arXiv:1603.09436},
isbn = {9781450335492},
issn = {15232867},
keywords = {and they must coordinate,automated verification,consensus,fault-tolerant distributed algorithms,only a limited view,over the entire system,partially synchrony,round model,to achieve global goals},
number = {1},
pages = {1--16},
title = {{A Logical Relation for Monadic Encapsulation of State Proving contextual equivalences in the presence of runST}},
url = {http://www.di.ens.fr/{~}cezarad/popl16.pdf},
volume = {1},
year = {2016}
}
@article{Vaynberg2012,
abstract = {A virtual memory manager (VMM) is a part of an operating system that provides the rest of the kernel with an abstract model of memory. Although small in size, it involves complicated and interdependent invariants that make monolithic verification of the VMM and the kernel running on top of it difficult. In this paper, we make the observation that a VMM is constructed in layers: physical page allocation, page table drivers, address space API, etc., each layer providing an abstraction that the next layer utilizes. We use this layering to simplify the verification of individual modules of VMM and then to link them together by composing a series of small refinements. The compositional verification also supports function calls from less abstract layers into more abstract ones, allowing us to simplify the verification of initialization functions as well. To facilitate such compositional verification, we develop a framework that assists in creation of verification systems for each layer and refinements between the layers. Using this framework, we have produced a certification of BabyVMM, a small VMM designed for simplified hardware. The same proof also shows that a certified kernel using BabyVMM's virtual memory abstraction can be refined following a similar sequence of refinements, and can then be safely linked with BabyVMM. Both the verification framework and the entire certification of BabyVMM have been mechanized in the Coq Proof Assistant.},
author = {Vaynberg, Alexander and Shao, Zhong},
doi = {10.1007/978-3-642-35308-6_13},
isbn = {9783642353079},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
number = {Vmm},
pages = {143--159},
title = {{Compositional verification of a baby virtual memory manager}},
volume = {7679 LNCS},
year = {2012}
}
@article{Klein2004,
author = {Klein, Gerwin and Tuch, Harvey},
journal = {TPHOLs Emerging Trends},
title = {{Towards Verified Virtual Memory in L4}},
year = {2004}
}
@article{Tuch2004,
author = {Tuch, Harvey and Klein, Gerwin},
pages = {73--97},
title = {{Verifying the L4 virtual memory subsystem}},
url = {http://w.doclsf.de/papers/os-verify-04.pdf{\#}page=79{\%}5Cnhttp://ftjpbcx.doclsf.de/papers/os-verify-04.pdf{\%}23page=105},
year = {2004}
}
@article{Meijer,
author = {Meijer, Erik},
pages = {1--27},
title = {{Functional Programming with Bananas , Lenses , Envelopes and Barbed Wire 1 Introduction 2 The data type of lists}}
}
@article{Appel2006,
abstract = {Separation logic is a Hoare logic for programs that alter pointer data structures. One can do machine-checked separation-logic proofs of interesting programs by a semantic embedding of separation logic in a higher-order logic such as Coq or Isabelle/HOL. However, since separation is a linear logic—actually, a mixture of linear and nonlinear logic—the usual methods that Coq or Isabelle use to manipulate hypotheses don't work well. On the other hand, one does not want to duplicate in linear logic the entire libraries of lemmas and tactics that are an important strength of the Coq and Isabelle systems. Here I demonstrate a set of tactics for moving cleanly between classical natural deduction and linear implication.},
author = {Appel, A.W.},
journal = {Unpublished draft, http://www. cs. princeton. edu/appel/papers/septacs. pdf},
title = {{Tactics for separation logic}},
url = {http://scholar.google.com/scholar?hl=en{\&}btnG=Search{\&}q=intitle:Tactics+for+Separation+Logic{\#}0},
year = {2006}
}
@article{Chlipala2011,
abstract = {Several recent projects have shown the feasibility of verifying low- level systems software. Verifications based on automated theorem- proving have omitted reasoning about first-class code pointers, which is critical for tasks like certifying implementations of threads and processes. Conversely, verifications that deal with first-class code pointers have featured long, complex, manual proofs. In this paper, we introduce the Bedrock framework, which supports mostly-automated proofs about programs with the full range of features needed to implement, e.g., language runtime systems.$\backslash$nThe heart of our approach is in mostly-automated discharge of verification conditions inspired by separation logic. Our take on separation logic is computational, in the sense that function speci- fications are usually written in terms of reference implementations in a purely functional language. Logical quantifiers are the most challenging feature for most automated verifiers; by relying on functional programs (written in the expressive language of the Coq proof assistant), we are able to avoid quantifiers almost entirely. This leads to some dramatic improvements compared to both past work in classical verification, which we compare against with im- plementations of data structures like binary search trees and hash tables; and past work in verified programming with code pointers, which we compare against with examples like function memoiza- tion and a cooperative threading library.},
author = {Chlipala, Adam},
doi = {10.1145/2345156.1993526},
isbn = {978-1-4503-0663-8},
issn = {1450306632},
journal = {ACM SIGPLAN Notices},
keywords = {functional programming,interactive proof assistants,low-level,programming languages,separation logic},
number = {6},
pages = {234--245},
title = {{Mostly-automated verification of low-level programs in computational separation logic}},
url = {http://dl.acm.org/citation.cfm?id=1993526{\%}5Cnpapers2://publication/uuid/F5EC55C5-B456-4483-8830-2FA851655CE0},
volume = {46},
year = {2011}
}
@article{Nanevski2014,
author = {Nanevski, Aleksandar and Ley-Wild, Ruy and Sergey, Ilya and Delbianco, Germ{\'{a}}n Andr{\'{e}}s},
doi = {10.1007/978-3-642-54833-8_16},
isbn = {9783642548321},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {290--310},
title = {{Communicating state transition systems for fine-grained concurrent resources}},
volume = {8410 LNCS},
year = {2014}
}
@article{DaRochaPinto2014,
abstract = {To avoid data races, concurrent operations should either be at distinct times or on distinct data. Atomicity is the abstraction that an operation takes effect at a single, discrete instant in time, with linearisability being a well-known correctness condition which asserts that concurrent operations appear to behave atomically. Disjointness is the abstraction that operations act on distinct data resource, with concurrent separation logics enabling reasoning about threads that appear to operate independently on disjoint resources.We present TaDA, a program logic that combines the benefits of abstract atomicity and abstract disjointness. Our key contribution is the introduction of atomic triples, which offer an expressive approach to specifying program modules. By building up examples, we show that TaDA supports elegant modular reasoning in a way that was not previously possible.},
author = {{Da Rocha Pinto}, Pedro and Dinsdale-Young, Thomas and Gardner, Philippa},
doi = {10.1007/978-3-662-44202-9_9},
isbn = {9783662442012},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {207--231},
title = {{TaDA: A logic for time and data abstraction}},
volume = {8586 LNCS},
year = {2014}
}
@article{Svendsen2014,
author = {Svendsen, Kasper and Birkedal, Lars},
doi = {10.1007/978-3-642-54833-8_9},
isbn = {9783642548321},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {149--168},
title = {{Impredicative concurrent abstract predicates}},
volume = {8410 LNCS},
year = {2014}
}
@article{Timany2016,
author = {Timany, Amin and Krebbers, Robbert and Birkedal, Lars},
pages = {1--2},
title = {{Logical Relations in Iris}},
year = {2016}
}
@misc{Ma1992,
abstract = {The concept of relations over sets is generalized to relations over an arbitrary category, and used to investigate the abstraction (or logical-relations) theorem, the identity extension lemma, and parametric polymorphism, for Cartesian-closed-category models of the simply typed lambda calculus and PL-category models of the polymorphic typed lambda calculus. Treatments of Kripke relations and of complete relations on domains are included.},
author = {Ma, QingMing and Reynolds, John C},
booktitle = {Mathematical Foundations of Programming Semantics},
doi = {10.1007/3-540-55511-0_1},
isbn = {9780444867292},
number = {7597},
pages = {1--40},
title = {{Types , Abstraction , and Parametric Polymorphism}},
url = {http://www.cs.cmu.edu/afs/cs.cmu.edu/user/qma/www/papers/mfps.pdf},
volume = {598},
year = {1992}
}
@article{Carter2016,
author = {Carter, Adam S and Hundhausen, Christopher D},
isbn = {9781450344494},
keywords = {achievement,activity streams,educational data mining,learning analytics,performance and,predictive models of student,social},
pages = {201--209},
title = {{With a Little Help From My Friends : An Empirical Study of the Interplay of Students ' Social Activities , Programming Activities , and Course Success}},
year = {2016}
}
@article{Radermacher,
author = {Radermacher, Alex and Walia, Gursimran and Knudson, Dean},
isbn = {9781450327688},
keywords = {agogy,all or part of,computer science education,computer science ped-,or,or hard copies of,permission to make digital,required skills,software developer,this work for personal},
pages = {291--300},
title = {{Investigating the Skill Gap between Graduating Students and Industry Expectations Categories and Subject Descriptors}}
}
@article{Wu2014,
author = {Wu, Huiting and Wang, Yi},
keywords = {- ability,practice teaching,reform,training goal},
number = {Ictcs},
pages = {154--157},
title = {{Exploration and Research of Practical Teaching System Based on Ability Training}},
year = {2014}
}
@article{Radermacher2013,
author = {Radermacher, Alex},
isbn = {9781450318686},
keywords = {agogy,computer science education,computer science ped-,knowledge deficiency,required skills,software developer},
pages = {525--530},
title = {{Gaps Between Industry Expectations and the Abilities of Graduates}},
year = {2013}
}
@article{Alur2013a,
abstract = {—The classical formulation of the program-synthesis problem is to find a program that meets a correctness specifica-tion given as a logical formula. Recent work on program synthesis and program optimization illustrates many potential benefits of allowing the user to supplement the logical specification with a syntactic template that constrains the space of allowed implementations. Our goal is to identify the core computational problem common to these proposals in a logical framework. The input to the syntax-guided synthesis problem (SyGuS) consists of a background theory, a semantic correctness specification for the desired program given by a logical formula, and a syntactic set of candidate implementations given by a grammar. The computational problem then is to find an implementation from the set of candidate expressions so that it satisfies the specification in the given theory. We describe three different instantiations of the counter-example-guided-inductive-synthesis (CEGIS) strategy for solving the synthesis problem, report on prototype implementations, and present experimental results on an initial set of benchmarks.},
author = {Alur, Rajeev and Bodik, Rastislav and Juniwal, Garvit and Martin, Milo M. K. and Raghothaman, Mukund and Seshia, Sanjit A. and Singh, Rishabh and Solar-Lezama, Armando and Torlak, Emina and Udupa, Abhishek},
doi = {10.1109/FMCAD.2013.6679385},
isbn = {978-0-9835678-3-7},
journal = {2013 Formal Methods in Computer-Aided Design},
pages = {1--8},
title = {{Syntax-guided synthesis}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6679385},
year = {2013}
}
@article{Anderson2015,
author = {Anderson, Paul V and Heckman, Sarah and Vouk, Mladen and Wright, David and Burge, Janet E and Gannod, Gerald C},
doi = {10.1109/ICSE.2015.178},
isbn = {9781479919345},
keywords = {communication across the curriculum, software engi},
title = {{CS / SE Instructors Can Improve Student Writing without Reducing Class Time Devoted to Technical Content : Experimental Results}},
year = {2015}
}
@article{Park2016,
author = {Park, Thomas H and Kim, Meen Chul and Chhabra, Sukrit and Lee, Brian and Forte, Andrea},
isbn = {9781450342315},
keywords = {assessment,computational thinking,program,web development},
pages = {302--307},
title = {{Reading Hierarchies in Code : Assessment of a Basic Computational Skill}},
year = {2016}
}
@article{Mao2016,
author = {Mao, Yanyan and Feng, Yanli and Cheng, Dapeng and Xie, Qingsong},
isbn = {9781509022182},
keywords = {Education Reform and Innovation},
number = {Iccse},
pages = {907--910},
title = {{Computer Curriculum System Reform Based On System Ability Training}},
year = {2016}
}
@article{Krutz1998,
author = {Krutz, Daniel E and Malachowsky, Samuel A and Reichlmayr, Thomas},
isbn = {9781450326056},
keywords = {software,software engineering education,software testing},
pages = {49--54},
title = {{Using a Real World Project in a Software Testing Course Categories and Subject Descriptors}},
year = {1998}
}
@article{Morrison2015,
author = {Morrison, Briana B and Margulieux, Lauren E and Street, Cherry},
isbn = {9781450336307},
keywords = {cognitive load,contextual transfer,subgoal labels},
pages = {21--29},
title = {{Subgoals , Context , and Worked Examples in Learning Computing Problem Solving}},
year = {2015}
}
@article{Manolios,
author = {Manolios, Panagiotis and Pais, Jorge and Papavasileiou, Vasilis},
title = {{The Inez Mathematical Programming Modulo Theories Framework}}
}
@article{Manoliosa,
author = {Manolios, Panagiotis and Papavasileiou, Vasilis},
title = {{ILP Modulo Theories}}
}
@article{Jain,
author = {Jain, Mitesh and Manolios, Panagiotis},
title = {{Skipping Refinement}}
}
@article{Learn2016,
author = {Learn, Chinese Students},
number = {3},
title = {{Learning Computer Science : Dimensions of Variation Within What}},
volume = {16},
year = {2016}
}
@article{Giantamidis,
archivePrefix = {arXiv},
arxivId = {arXiv:1605.07805v2},
author = {Giantamidis, Georgios and Tripakis, Stavros},
eprint = {arXiv:1605.07805v2},
pages = {1--19},
title = {{Learning Moore Machines from Input-Output Traces}}
}
@article{Manoliosb,
author = {Manolios, Panagiotis and Subramanian, Gayatri and Vroon, Daron and Drive, Ferst},
isbn = {9781595937346},
keywords = {component-based software de-,integrated modular avionics,pseudo-boolean,system assembly problem,velopment},
pages = {61--71},
title = {{Automating Component-Based System Assembly}}
}
@article{Chamarthi,
author = {Chamarthi, Harsh Raju and Dillinger, Peter and Manolios, Panagiotis and Vroon, Daron},
title = {{The ACL2 Sedan Theorem Proving System}}
}
@article{Solar-Lezama2006,
abstract = {Sketching is a software synthesis approach where the programmer develops a partial implementation - a sketch - and a separate specification of the desired functionality. The synthesizer then completes the sketch to behave like the specification. The correctness of the synthesized implementation is guaranteed by the compiler, which allows, among other benefits, rapid development of highly tuned implementations without the fear of introducing bugs.We develop SKETCH, a language for finite programs with linguistic support for sketching. Finite programs include many highperformance kernels, including cryptocodes. In contrast to prior synthesizers, which had to be equipped with domain-specific rules, SKETCH completes sketches by means of a combinatorial search based on generalized boolean satisfiability. Consequently, our combinatorial synthesizer is complete for the class of finite programs: it is guaranteed to complete any sketch in theory, and in practice has scaled to realistic programming problems.Freed from domain rules, we can now write sketches as simpleto-understand partial programs, which are regular programs in which difficult code fragments are replaced with holes to be filled by the synthesizer. Holes may stand for index expressions, lookup tables, or bitmasks, but the programmer can easily define new kinds of holes using a single versatile synthesis operator.We have used SKETCH to synthesize an efficient implementation of the AES cipher standard. The synthesizer produces the most complex part of the implementation and runs in about an hour.},
author = {Solar-Lezama, Armando and Tancau, Liviu and Bodik, Rastislav and Seshia, Sanjit and Saraswat, Vijay},
doi = {10.1145/1168919.1168907},
isbn = {1595934510},
issn = {01635964},
journal = {ACM SIGARCH Computer Architecture News},
keywords = {design,languages,performance},
number = {5},
pages = {404},
title = {{Combinatorial sketching for finite programs}},
volume = {34},
year = {2006}
}
@article{Xu2007,
author = {Xu, By Zhiwei and Li, Guojie},
doi = {10.1145/2001269.2001298},
title = {{Computing for the Masses}},
year = {2007}
}
@article{Lamport1988,
author = {Lamport, Leslie},
journal = {Director},
title = {{The Existence of Refinement Mappings}},
year = {1988}
}
@article{Soh2011,
author = {Soh, D O I Leen-kiat and Shell, Duane F and Ingraham, Elizabeth and Ramsay, Stephen and Moore, Brian},
doi = {10.1145/2699391},
pages = {7--9},
title = {{Viewpoint Learning Through Computational Creativity}},
year = {2011}
}
@article{Soh2012,
author = {Soh, Leen-kiat and Shell, Duane F and Ingraham, Elizabeth and Ramsay, Stephen and Moore, Brian},
doi = {10.1145/2699391},
pages = {33--35},
title = {观点 通过计算创造性来学习},
year = {2012}
}
@misc{Lamport1983,
abstract = {Temporal logic is a formal system for specifying and reasoning about concurrent programs.  It provides a uniform framework for describing a system at any level of abstraction, thereby supporting hierarchical specification and verification.},
author = {Lamport, Leslie},
booktitle = {Information Processing 83: Proceedings of the IFIP 9th World Congress},
isbn = {0444867295},
pages = {657--668},
title = {{What Good is Temporal Logic?}},
year = {1983}
}
@article{Kafai,
author = {Kafai, Yasmin B},
doi = {10.1145/2955114},
title = {{Education From Computational Thinking to Computational Participation in K–12 Education}}
}
@article{Lo2010,
author = {Lo, Virginia M},
isbn = {9781605588858},
keywords = {century,china,computer science education,electronics engineering and computer,every,in the 21 st,information technology has permeated,peking university,school of,science},
pages = {396--400},
title = {{Undergraduate Computer Science Education in China}},
year = {2010}
}
@article{Chen2004,
author = {Chen, David Yunchao},
keywords = {3 years,administration,been doubled over just,been taking a bold,drive towards mass higher,education,higher education,it that during recent,mass expansion of higher,of great interest is,the total enrolments have,years china has},
number = {1},
pages = {23--33},
title = {{China ' s Mass Higher Education : Problem , Analysis , and Solutions}},
volume = {5},
year = {2004}
}
@article{Wing2006,
author = {Wing, Jeannette M},
number = {3},
pages = {33--35},
title = {{Computational Thinking}},
volume = {49},
year = {2006}
}
@article{Education2014,
author = {Education, Undergraduate I T},
number = {3},
pages = {49--55},
title = {{Education in China}},
volume = {5},
year = {2014}
}
@article{Denning,
author = {Denning, Peter J},
doi = {10.1145/2535915},
pages = {29--31},
title = {{The Profession of IT Design Thinking}}
}
@article{Hu2011,
author = {Hu, Chenglie},
isbn = {9781450306973},
keywords = {computation,computational thinking,thinking model},
pages = {223--227},
title = {{Computational Thinking – What It Might Mean and What We Might Do About It}},
year = {2011}
}
@misc{,
title = {cacm200603-wing-cn.pdf}
}
@article{Otero2015,
author = {Otero, Rafael Rom{\'{a}}n and George, Prince and Aravind, Alex A and George, Prince},
isbn = {9781450329668},
pages = {430--435},
title = {{MiniOS : An Instructional Platform for Teaching Operating Systems Projects}},
year = {2015}
}
@article{Tafliovich2015,
author = {Tafliovich, Anya and Petersen, Andrew and Campbell, Jennifer},
isbn = {9781450329668},
keywords = {evaluation,moti-,perspective,student teamwork,students,undergraduate software development project,vating students},
pages = {494--499},
title = {{On the Evaluation of Student Team Software Development Projects}},
year = {2015}
}
@article{Sys2014,
author = {Sys, Maciej M},
isbn = {9781450334402},
pages = {2014},
title = {{From Algorithmic to Computational Thinking : On the Way for Computing for all Students}},
year = {2014}
}
@article{Coffey,
author = {Coffey, John W},
pages = {39--45},
title = {{RELATIONSHIP BETWEEN DESIGN AND PROGRAMMING SKILLS IN AN ADVANCED COMPUTER PROGRAMMING}}
}
@article{Lamagna,
author = {Lamagna, Edmund A},
pages = {45--52},
title = {{Algorithmic thinking unplugged *}}
}
@article{Harper,
author = {Harper, Robert and Plotkin, Gordon},
title = {{A Framework for Defining Logics}}
}
@article{Preoteasa,
author = {Preoteasa, Viorel and Tripakis, Stavros},
isbn = {9781450330527},
title = {{Refinement Calculus of Reactive Systems}}
}
@article{Dickson2012,
author = {Dickson, Paul E},
isbn = {9781450310987},
keywords = {android,appinventor,apps,cabana,iphone,mobile de-,smartphone,vices,xcode},
pages = {529--534},
title = {{Cabana : A Cross-platform Mobile Development System}},
year = {2012}
}
@article{Schuurman,
author = {Schuurman, Derek C},
isbn = {9781450318686},
keywords = {computer organization,cpu simulation,education},
pages = {335--339},
title = {{Step-by-Step Design and Simulation of a Simple CPU Architecture Categories and Subject Descriptors}}
}
@article{Wiedijk,
author = {Wiedijk, Freek},
pages = {1--14},
title = {{Comparing mathematical provers}}
}
@article{Chong2016,
abstract = {Report on the NSF Workshop on Formal Methods for Security, held 19-20 November 2015.},
archivePrefix = {arXiv},
arxivId = {1608.00678},
author = {Chong, Stephen and Guttman, Joshua and Datta, Anupam and Myers, Andrew and Pierce, Benjamin and Schaumont, Patrick and Sherwood, Tim and Zeldovich, Nickolai},
eprint = {1608.00678},
month = {aug},
title = {{Report on the NSF Workshop on Formal Methods for Security}},
url = {http://arxiv.org/abs/1608.00678},
year = {2016}
}
@inproceedings{Vasudevan2013,
author = {Vasudevan, Amit and Chaki, S. and {Limin Jia} and McCune, J. and Newsome, James and Datta, A.},
booktitle = {2013 IEEE Symposium on Security and Privacy},
doi = {10.1109/SP.2013.36},
isbn = {978-0-7695-4977-4},
keywords = {2-,applications,dynamic root of trust,extensible modular hypervisor framework,hypapps,hypervisor-based,nested},
month = {may},
pages = {430--444},
publisher = {IEEE},
title = {{Design, Implementation and Verification of an eXtensible and Modular Hypervisor Framework}},
url = {http://ieeexplore.ieee.org/document/6547125/},
year = {2013}
}
@article{Dahlin2011,
author = {Dahlin, Mike and Johnson, Ryan and Krug, Robert Bellarmine and McCoyd, Michael and Young, William},
doi = {10.4204/EPTCS.70.3},
issn = {2075-2180},
journal = {Electronic Proceedings in Theoretical Computer Science},
month = {oct},
pages = {28--45},
title = {{Toward the Verification of a Simple Hypervisor}},
url = {http://arxiv.org/abs/1110.4672v1},
volume = {70},
year = {2011}
}
@misc{,
title = {{F-Bounded Polymorphism for Object-Oriented Program}}
}
@article{Ozeri,
author = {Ozeri, Or and Padon, Oded and Rinetzky, Noam and Sagiv, Mooly},
title = {{Conjunctive Abstract Interpretation using Paramodulation}}
}
@article{Reynolds,
author = {Reynolds, Andrew and Iosif, Radu and Serban, Cristina},
pages = {1--18},
title = {{Reasoning in the Bernays-Sch{\"{o}}nfinkel Fragment of Separation Logic}}
}
@article{Delaware2015,
abstract = {We present Fiat, a library for the Coq proof assistant supporting$\backslash$nrefinement of declarative specifications into efficient functional$\backslash$nprograms with a high degree of automation. Each refinement process$\backslash$nleaves a proof trail, checkable by the normal Coq kernel, justifying$\backslash$nits soundness. We focus on the synthesis of abstract data types that$\backslash$npackage methods with private data. We demonstrate the utility of$\backslash$nour framework by applying it to the synthesis of $\backslash$textit{\{}query structures{\}}$\backslash$n-- abstract data types with SQL-like query and insert operations.$\backslash$nFiat includes a library for writing specifications of query structures$\backslash$nin SQL-inspired notation, expressing operations over relations (tables)$\backslash$nin terms of mathematical sets. This library includes a set of tactics$\backslash$nfor automating the refinement of these specifications into efficient,$\backslash$ncorrect-by-construction OCaml code. Using these tactics, a programmer$\backslash$ncan generate such an implementation completely automatically by only$\backslash$nspecifying the equivalent of SQL indexes, data structures capturing$\backslash$nuseful views of the abstract data. We conclude by speculating on$\backslash$nthe new programming modularity possibilities enabled by an automated$\backslash$nrefinement system with proved-correct rules.},
author = {Delaware, Benjamin and Pit-Claudel, Cl{\'{e}}ment and Gross, Jason and Chlipala, Adam},
doi = {10.1145/2775051.2677006},
isbn = {9781450333009},
issn = {0362-1340},
journal = {SIGPLAN Not.},
keywords = {deductive synthesis,mechanized derivation of abstract data types},
number = {1},
pages = {689--700},
title = {{Fiat: Deductive Synthesis of Abstract Data Types in a Proof Assistant}},
url = {http://doi.acm.org/10.1145/2775051.2677006},
volume = {50},
year = {2015}
}
@article{Silva,
author = {Silva, Vijay D and Sousa, Marcelo},
pages = {1--18},
title = {{Complete Abstractions and Subclassical Modal Logics}}
}
@article{Bloem,
author = {Bloem, Roderick and Chockler, Hana and Ebrahimi, Masoud and Strichman, Ofer},
title = {{Synthesizing Non-Vacuous Systems}}
}
@article{Cohen,
author = {Cohen, Ernie},
title = {{Verified Concurrent Code : Tricks of the Trade}}
}
@article{Seladji,
author = {Seladji, Yassamine},
title = {{Finding Relevant Templates via the Principal Component Analysis}}
}
@article{Mukherjee,
author = {Mukherjee, Suvam and Kumar, Arun and Souza, Deepak D},
title = {{Detecting All High-Level Dataraces in an RTOS Kernel}}
}
@article{Vizel,
author = {Vizel, Yakir and Gurfinkel, Arie and Shoham, Sharon and Malik, Sharad},
title = {{IC3 - Flipping the E in ICE}}
}
@article{Silvaa,
author = {Silva, Vijay D and Kroening, Daniel and Sousa, Marcelo},
pages = {1--18},
title = {{Independence Abstractions and Models of Concurrency}}
}
@article{Ahmed,
author = {Ahmed, Zara and Benque, David and Berezin, Sergey and Dahl, Anna Caroline E and Fisher, Jasmin and Hall, Benjamin A and Ishtiaq, Samin and Nanavati, Jay and Riechert, Maik and Skoblov, Nikita},
title = {{Bringing LTL Model Checking to Biologists}}
}
@article{Monat,
author = {Monat, Rapha{\"{e}}l and Min{\'{e}}, Antoine},
keywords = {abstract inter-,concurrent programs,invariant generation,numeric,pretation,program verification,rely-guarantee methods,thread-modular analyses},
title = {{Precise Thread-Modular Abstract Interpretation of Concurrent Programs using Relational Interference Abstractions}}
}
@article{Sharma,
author = {Sharma, Tushar and Reps, Thomas},
pages = {1--19},
title = {{Sound Bit-Precise Numerical Domains}}
}
@article{Chakraborty,
author = {Chakraborty, Supratik and Gupta, Ashutosh and Jain, Rahul},
pages = {1--19},
title = {{Matching Multiplications in Bit-Vector Formulas}}
}
@article{Henning,
author = {Henning, G and Laarman, Alfons and Sokolova, Ana and Weissenbacher, Georg},
title = {{Dynamic Reductions for Model Checking Concurrent Software}},
volume = {23}
}
@article{Cuoq,
author = {Cuoq, Pascal},
keywords = {c,static analysis,strict aliasing,type-based alias analysis},
title = {{Detecting Strict Aliasing Violations in the Wild}}
}
@article{Ferrara,
author = {Ferrara, Pietro and Tripp, Omer and Liu, Peng and Koskinen, Eric and Srl, Julia},
pages = {1--20},
title = {{Using Abstract Interpretation to Correct Synchronization Faults}}
}
@article{Abal,
author = {Abal, Iago and Brabrand, Claus and Andrzej, W},
keywords = {bug finding,c,double lock,linux,model checking,type and effects},
title = {{Effective Bug Finding in C Programs with Shape and Effect Abstractions}}
}
@article{Bride,
author = {Bride, Hadrien and Kouchnarenko, Olga and Peureux, Fabien},
title = {{Reduction of Workflow Nets for Generalised Soundness Verification}}
}
@article{Muscholl,
author = {Muscholl, Anca and Seidl, Helmut and Walukiewicz, Igor},
title = {{Reachability for dynamic parametric processes}}
}
@article{Hru,
author = {Hruˇ, Martin and Rogalewicz, Adam},
title = {{Counterexample Validation and Interpolation-Based Refinement for Forest Automata}}
}
@article{Jiang,
author = {Jiang, Jiahong and Chen, Liqian and Wu, Xueguang and Wang, Ji},
keywords = {abstract domains,abstract interpretation,block en-,smt},
pages = {1--19},
title = {{Block-wise abstract interpretation by combining abstract domains with SMT}}
}
@article{Wanga,
author = {Wang, Wei and Barrett, Clark and Wies, Thomas},
title = {{Partitioned Memory Models for Program Analysis}}
}
@article{Blazy,
author = {Blazy, Sandrine and B{\"{u}}hler, David and Yakobowski, Boris},
title = {{Structuring Abstract Interpreters through State and Value Abstractions}}
}
@article{Botbol,
author = {Botbol, Vincent and Chailloux, Emmanuel and Gall, Tristan Le},
title = {{Static Analysis of Communicating Processes using Symbolic Transducers}}
}
@article{Konnov,
author = {Konnov, Igor and Widder, Josef and Spegni, Francesco and Spalazzi, Luca},
title = {{Accuracy of Message Counting Abstraction in Fault-Tolerant Distributed Algorithms}}
}
@article{Frumkin,
author = {Frumkin, Asya and Feldman, Yotam M Y},
title = {{Property Directed Reachability for Proving Absence of Concurrent Modification Errors}}
}
@article{Programming2013,
author = {Programming, Functional},
number = {July},
title = {{The GHC Runtime System}},
year = {2013}
}
@article{Møller2016,
author = {M{\o}ller, Anders},
isbn = {9781450339001},
pages = {1--12},
title = {{Feedback-Directed Instrumentation for Deployed JavaScript Applications}},
year = {2016}
}
@article{Chandra,
author = {Chandra, Satish and Gordon, Colin S and Cole, Jean-baptiste Jeannin and Sridharan, Manu and Tip, Frank and Choi, Youngil},
isbn = {9781450344449},
keywords = {object-oriented type systems,type inference},
pages = {410--429},
title = {{Type Inference for Static Compilation of JavaScript}}
}
@article{Andreasen2016,
archivePrefix = {arXiv},
arxivId = {arXiv:1605.01362v1},
author = {Andreasen, Esben and Gordon, Colin S and Chandra, Satish},
eprint = {arXiv:1605.01362v1},
keywords = {and phrases retrofitted type,systems,trace typing,type system design},
title = {{Trace Typing : An Approach for Evaluating Retrofitted Type Systems ( Extended Version )}},
year = {2016}
}
@article{Axelsson2012,
author = {Axelsson, Emil},
isbn = {9781450310543},
keywords = {at the expression problem,ded domain-specific languages,developed several embedded,embed-,generic programming,is highly,our motivation for looking,our research group has,practical,the expression problem},
title = {{A Generic Abstract Syntax Model for Embedded Languages}},
year = {2012}
}
@article{Turon2013,
author = {Turon, Aaron and Thamsborg, Jacob and Ahmed, Amal and Birkedal, Lars and Dreyer, Derek},
isbn = {9781450318327},
keywords = {as fcds are very,carefully designed to be,contextual refinements of their,course-,data abstraction,directly,fine-grained concurrency,linearizability,local state,logical relations,refinement,separation logic,they are,tricky to reason about},
pages = {1--14},
title = {{Logical Relations for Fine-Grained Concurrency}},
year = {2013}
}
@article{Sridharan,
author = {Sridharan, Manu and Dolby, Julian and Chandra, Satish and Sch{\"{a}}fer, Max and Tip, Frank},
keywords = {call graph construction,javascript,points-to analysis},
pages = {1--25},
title = {{Correlation Tracking for Points-To Analysis of JavaScript}}
}
@article{Cox,
author = {Cox, Arlen and Chang, Bor-yuh Evan and Li, Huisong and Rival, Xavier},
title = {{Abstract Domains and Solvers for Sets Reasoning}}
}
@article{Beyer2008,
abstract = {We present and evaluate a framework and tool for combining multiple program analyses which allows the dynamic (on-line) adjustment of the precision of each analysis depending on the accumulated results. For example, the explicit tracking of the values of a variable may be switched off in favor of a predicate abstraction when and where the number of different variable values that have been encountered has exceeded a specified threshold. The method is evaluated on verifying the SSH client/server software and shows significant gains compared with predicate abstraction-based model checking.},
author = {Beyer, Dirk and Henzinger, Thomas A. and Th{\'{e}}oduloz, Gr{\'{e}}gory},
doi = {10.1109/ASE.2008.13},
isbn = {9781424421886},
issn = {1938-4300},
journal = {ASE 2008 - 23rd IEEE/ACM International Conference on Automated Software Engineering, Proceedings},
pages = {29--38},
title = {{Program analysis with dynamic precision adjustment}},
year = {2008}
}
@article{Tiwari2007,
abstract = {This paper presents the foundations for using automated deduction technology in static program analysis. The central principle is the use of logical lattices – a class of lattices defined on logical formulas in a logical theory – in an abstract interpretation framework. Abstract interpretation over logical lattices, called logical interpretation, raises new challenges for theorem proving. We present an overview of some of the existing results in the field of logical interpretation and outline some requirements for building expressive and scalable logical interpreters.},
author = {Tiwari, Ashish and Gulwani, Sumit},
doi = {10.1007/978-3-540-73595-3},
isbn = {978-3-540-73594-6},
issn = {0302-9743},
journal = {Automated Deduction},
pages = {147--166},
title = {{Logical Interpretation: Static Program Analysis Using Theorem Proving}},
url = {http://www.springerlink.com/content/d3n28r6x748v00p7/},
volume = {4603},
year = {2007}
}
@article{Madhavapeddy2014,
abstract = {What if all the software layers in a virtual appliance were compiled within the same safe, high-level language framework?},
author = {Madhavapeddy, Anil and Scott, David J.},
doi = {10.1145/2541883.2541895},
issn = {00010782},
journal = {Communications of the ACM},
month = {jan},
number = {1},
pages = {61--69},
title = {{Unikernels}},
url = {http://doi.acm.org/10.1145/2541883.2541895{\%}5Cnhttp://dl.acm.org/citation.cfm?doid=2541883.2541895 http://dl.acm.org/citation.cfm?doid=2541883.2541895},
volume = {57},
year = {2014}
}
@book{Krebbers2015,
author = {Krebbers, Robbert Jan},
isbn = {9789462599031},
title = {{The C standard formalized in Coq}},
year = {2015}
}
@article{Klein2009,
abstract = {Complete formal verification is the only known way to guarantee that a system is free of programming errors. We present our experience in performing the for- mal, machine-checked verification of the seL4 mi- crokernel from an abstract specification down to its C implementation. We assume correctness of com- piler, assembly code, and hardware, and we used a unique design approach that fuses formal and oper- ating systems techniques. To our knowledge, this is the first formal proof of functional correctness of a complete, general-purpose operating-system kernel. Functional correctness means here that the implemen- tation always strictly follows our high-level abstract specification of kernel behaviour. This encompasses traditional design and implementation safety proper- ties such as the kernel will never crash, and it will never perform an unsafe operation. It also proves much more: we can predict precisely how the kernel will behave in every possible situation. seL4, a third-generation microkernel of L4 prove- nance, comprises 8,700 lines of C code and 600 lines of assembler. Its performance is comparable to other high-performance L4 kernels. 1},
author = {Klein, Gerwin and Elphinstone, Kevin and Heiser, Gernot and Andronick, June and Cock, David and Derrin, Philip and Elkaduwe, Dhammika and Engelhardt, Kai and Kolanski, Rafal and Norrish, Michael and Sewell, Thomas and Tuch, Harvey and Winwood, Simon},
doi = {10.1145/1629575.1629596},
isbn = {9781605587523},
issn = {00010782},
journal = {Proceedings of the ACM SIGOPS 22nd Symposium on Operating System Principles},
keywords = {hol,isabelle,l4,microkernel,sel4},
pages = {207--220},
title = {{seL4: Formal verification of an OS kernel}},
url = {http://dl.acm.org/citation.cfm?id=1629596},
year = {2009}
}
@article{Wang2014,
author = {Wang, Xi and Lazar, David and Zeldovich, Nickolai and Chlipala, Adam and Csail, M I T and Tatlock, Zachary},
isbn = {9781931971164},
title = {{Jitk : A Trustworthy In-Kernel Interpreter Infrastructure}},
year = {2014}
}
@article{Maus2011,
author = {Maus, Stefan},
number = {September},
title = {{Verification of Hypervisor Subroutines written in Assembler}},
year = {2011}
}
@article{Xu,
author = {Xu, Fengwei and Fu, Ming and Feng, Xinyu},
title = {{A Practical Verification Framework for Preemptive OS Kernels ( Technical Report )}}
}
@article{Degenbaev2012,
author = {Degenbaev, Ulan},
title = {{Formal specification of the x86 instruction set architecture}},
year = {2012}
}
@misc{,
title = {{Formal Nova interface specification}}
}
@article{Leinenbach2009,
abstract = {VCC is an industrial-strength verification suite for the formal verification of concurrent, low-level C code. It is being developed by Microsoft Research, Redmond, and the European Microsoft Innovation Center, Aachen. The development is driven by two applications from the Verisoft{\~{}}XT project: the Microsoft Hyper-V Hypervisor and SYSGO's PikeOS micro kernel.$\backslash$n$\backslash$nThis paper gives a brief overview on the Hypervisor with a special focus on verification related challenges this kind of low-level software poses. It discusses how$\backslash$nthe design of VCC addresses these challenges, and highlights some specific issues of the Hypervisor verification and how they can be solved with VCC.},
author = {Leinenbach, Dirk and Santen, Thomas},
doi = {10.1007/978-3-642-05089-3_51},
isbn = {3642050883},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {806--809},
title = {{Verifying the microsoft hyper-V hypervisor with VCC}},
volume = {5850 LNCS},
year = {2009}
}
@article{Freitas2011,
abstract = {This paper reports on the Xenon project's use of formal methods. Xenon is a higher-assurance secure hypervisor based on re-engineering the Xen open-source hypervisor. The Xenon project used formal specifications both for assurance and as guides for security re-engineering. We formally modelled the fundamental definition of security, the hypercall interface behaviour, and the internal modular design. We used three formalisms: CSP , Z, and Circus for this work. Circus is a combination of Standard Z, CSP with its semantics given in Hoare and He's unifying theories of programming. Circus is suited for both event-based and state-based modelling. Here, we report our experiences to date with using these formalisms for assurance.},
author = {Freitas, Leo and McDermott, John},
doi = {10.1007/s10009-011-0195-9},
isbn = {1000901101959},
issn = {14332779},
journal = {International Journal on Software Tools for Technology Transfer},
keywords = {Circus,Formal modelling,Separation kernel,Theorem proving,Virtualisation,Xenon},
number = {5},
pages = {463--489},
title = {{Formal methods for security in the Xenon hypervisor}},
volume = {13},
year = {2011}
}
@book{Hutchison,
author = {Hutchison, David and Mitchell, John C},
isbn = {9783642214363},
title = {{Lecture Notes in Computer Science}}
}
@article{Li,
author = {Li, Bojie and Tan, Kun and Chen, Enhong},
isbn = {9781450341936},
keywords = {compiler,network function virtualization,reconfigurable},
title = {{ClickNP : Highly Flexible and High Performance Network Processing with Reconfigurable Hardware}}
}
@book{Wester,
author = {Wester, Rinse},
isbn = {9789036538879},
title = {{A transformation-based approach to hardware design using higher-order functions}}
}
@incollection{Xu2016,
abstract = {Although testing is the most widely used technique to control the quality of software systems, it is a topic that, until relatively recently, has received scant attention from the computer research community. Although some pioneering work was already done a considerable time ago [Cho78,GG83,How78,Mye79], the testing of software systems has never become a mainstream activity of scientific research. The reasons that are given to explain this situation usually include arguments to the effect that testing as a technique is inferior to verification — testing can show only the presence of errors, not their absence — and that we should therefore concentrate on developing theory and tools for the latter. It has also been frequently said that testing is by its very nature a non-formal activity, where formal methods and related tools are at best of little use. The first argument is incorrect in the sense that it gives an incomplete picture of the situation. Testing is inferior to verification if the verification model can be assumed to be correct and if its complexity can be handled correctly by the person and or tool involved in the verification task. If these conditions are not fulfilled, which is frequently the case, then testing is often the only available technique to increase the confidence in the correctness of a system. In this talk we will show that the second argument is flawed as well. It is based on the identification of testing with robustness testing, where it is precisely the objective to find out how the system behaves under unspecified circumstances. This excludes the important activity of conformance testing, which tries to test the extent to which system behaviour conforms to its specification. It is precisely in this area where formal methods and tools can help to derive tests systematically from specifications, which is a great improvement over laborious, error-prone and costly manual test derivation. In our talk we show how the process algebraic testing theory due to De Nicola and Hennessy [DNH84,DeN87], originally conceived out of semantic considerations, may be used to obtain principles for test derivation. We will give an overview of the evolution of these ideas over the past ten years or so, starting with the conformance testing theory of simple synchronously communicating reactive systems [Bri88,Lan90] and leading to realistic systems that involve sophisticated asynchronous message passing mechanisms [Tre96,HT97]. Written accounts can be found in [BHT97,He98]. We discuss how such ideas have been used to obtain modern test derivation tools, such as TVEDA and TGV [Pha94, CGPT96,FJJV96], and the tool set that is currently being developed in the C{\^{o}}te-de-Resyste project [STW96]. The advantage of a test theory that is based on well-established process algebraic theory is that in principle there exists a clear link between testing and verification, which allows the areas to share ideas and algorithms [FJJV96,VT98]. Time allowing, we look at some of the methodological differences and commonalities between model checking techniques and testing, one of the differences being that of state space coverage, and an important commonality that of test property selection. In recent years the research into the use of formal methods and tools for testing reactive systems has seen a considerable growth. An overview of different approaches and school of thought can be found in [BPS98], reporting on the first ever Dagstuhl seminar devoted to testing. The formal treatment of conformance testing based on process algebra and/or concurrency theory is certainly not the only viable approach. An important school of thought is the FSM-testing theory grown out of the seminal work of Chow [Cho78], of which a good overview is given in [LY96]. Another interesting formal approach to testing is based on abstract data type theory [Gau95,BGM91].},
archivePrefix = {arXiv},
arxivId = {1301.4779},
author = {Xu, Fengwei and Fu, Ming and Feng, Xinyu and Zhang, Xiaoran and Zhang, Hui and Li, Zhaohui},
booktitle = {Computer Aided Verification},
doi = {10.1007/978-3-319-41540-6_4},
eprint = {1301.4779},
isbn = {9783540272311},
issn = {0018-9235},
number = {July},
pages = {59--79},
pmid = {4520227},
title = {{A Practical Verification Framework for Preemptive OS Kernels}},
url = {http://link.springer.com/10.1007/978-3-319-41540-6{\_}4},
volume = {575},
year = {2016}
}
@article{Hall,
author = {Hall, Link},
keywords = {privacy,secure multi-party computation},
title = {{Secure Multi-Party Computation Problems and Their Applications : A Review and Open Problems}}
}
@article{Tassarotti,
author = {Tassarotti, Joseph and Jung, Ralf and Harper, Robert},
title = {{A Higher-Order Logic for Concurrent Termination-Preserving Refinement}}
}
@article{Aydemir,
author = {Aydemir, Brian and Chargu, Arthur and Pierce, Benjamin C and Pollack, Randy and Weirich, Stephanie},
isbn = {9781595936899},
keywords = {binding,coq,locally nameless},
title = {{Engineering Formal Metatheory}}
}
@article{,
doi = {10.1017/S0956796814000227},
number = {4},
pages = {423--433},
title = {{Deletion: The curse of the red-black tree}},
volume = {24},
year = {2014}
}
@article{Koeplingera,
author = {Koeplinger, David and Delimitrou, Christina and Kozyrakis, Christos},
title = {{Automatic Generation of Efficient Accelerators for Reconfigurable Hardware}}
}
@article{Motara2011,
archivePrefix = {arXiv},
arxivId = {arXiv:1201.5728v1},
author = {Motara, Yusuf Moosa},
eprint = {arXiv:1201.5728v1},
pages = {1--14},
title = {{Functional Programming and Security}},
year = {2011}
}
@article{Yang2015b,
abstract = {As a solution to the problem of information leaks, I propose a policy-agnostic pro-gramming paradigm that enforces security and privacy policies by construction. I present the implementation of this paradigm in a new language, Jeeves, that auto-matically enforces information flow policies describing how sensitive values may flow through computations. In Jeeves, the programmer specifies expressive information flow policies separately from other functionality and relies on the language runtime to customize program behavior based on the policies. Jeeves allows programmers to implement information flow policies once instead of as repeated checks and filters across the program. To provide strong guarantees about Jeeves programs, I present a formalization of the dynamic semantics of Jeeves, define non-interference and policy compliance properties, and provide proofs that Jeeves enforces these properties. To demonstrate the practical feasibility of policy-agnostic programming, I present Jacqueline, a web framework built on Jeeves that enforces policies in database-backed web applications. I provide a formalization of Jacqueline as an extension of Jeeves to include relational operators and proofs that this preserves the policy compliance guarantees. Jacqueline enforces information flow policies end-to-end and runs using an unmodified Python interpreter and SQL database. I show, through several case studies, that Jacqueline reduces the amount of policy code required while incurring limited overheads.},
author = {Yang, Jean},
title = {{Preventing Information Leaks with Policy-Agnostic Programming}},
year = {2015}
}
@article{Fournet2011,
abstract = {Type systems are effective tools for verifying the security of crypto-graphic programs. They provide automation, modularity and scala-bility, and have been applied to large security protocols. However, they traditionally rely on abstract assumptions on the underlying cryptographic primitives, expressed in symbolic models. Cryptog-raphers usually reason on security assumptions using lower level, computational models that precisely account for the complexity and success probability of attacks. These models are more real-istic, but they are harder to formalize and automate. We present the first modular automated program verification me-thod based on standard cryptographic assumptions. We show how to verify ideal functionalities and protocols written in ML by typing them against new cryptographic interfaces using F7, a refinement type checker coupled with an SMT-solver. We develop a proba-bilistic core calculus for F7 and formalize its type safety in COQ. We build typed module and interfaces for MACs, signatures, and encryptions, and establish their authenticity and secrecy proper-ties. We relate their ideal functionalities and concrete implemen-tations, using game-based program transformations behind typed interfaces. We illustrate our method on a series of protocol imple-mentations.},
author = {Fournet, C{\'{e}}dric},
doi = {10.1145/2046707.2046746},
isbn = {9781450309486},
issn = {15437221},
keywords = {cryptography,refinement types,security protocols},
pages = {341--350},
title = {{Modular Code-Based Cryptographic Verification}},
year = {2011}
}
@article{Dreyer2005,
abstract = {CMU-CS-05-131 and Evoling the ML Module System. Derek . May 2005. Ph.D. Thesis. In this dissertation I contribute to the and evolution of the ML module system by: (1) developing a unifying account of the ML module system in which},
author = {Dreyer, D},
journal = {Reports-Archive.Adm.Cs.Cmu.Edu},
number = {May},
title = {{Understanding and Evolving the ML Module System}},
url = {http://reports-archive.adm.cs.cmu.edu/anon/anon/usr/ftp/2005/abstracts/05-131.html{\%}5Cnpapers://cff96cb1-96b7-4b11-a3e3-f4947c1d45b9/Paper/p4889},
year = {2005}
}
@article{Swamy2013,
abstract = {Modern programming languages, ranging from Haskell and ML, to JavaScript, C{\{}{\#}{\}} and Java, all make extensive use of higher-order state. This paper advocates a new verification methodology for higher-order stateful programs, based on a new monad of predicate transformers called the Dijkstra monad. Using the Dijkstra monad has a number of benefits. First, the monad naturally yields a weakest pre-condition calculus. Second, the computed specifications are structurally simpler in several ways, e.g., single-state post-conditions are sufficient (rather than the more complex two-state post-conditions). Finally, the monad can easily be varied to handle features like exceptions and heap invariants, while retaining the same type inference algorithm. We implement the Dijkstra monad and its type inference algorithm for the F* programming language. Our most extensive case study evaluates the Dijkstra monad and its F* implementation by using it to verify JavaScript programs. Specifically, we describe a tool chain that translates programs in a subset of JavaScript decorated with assertions and loop invariants to F*. Once in F*, our type inference algorithm computes verification conditions and automatically discharges their proofs using an SMT solver. We use our tools to prove that a core model of the JavaScript runtime in F* respects various invariants and that a suite of JavaScript source programs are free of runtime errors.},
author = {Swamy, Nikhil and Weinberger, Joel and Schlesinger, Cole and Chen, Juan and Livshits, Benjamin},
doi = {10.1145/2491956.2491978},
isbn = {9781450320146},
issn = {03621340},
journal = {ACM SIGPLAN Conference on Programming Language Design and Implementation},
keywords = {hoare monad,predicate transformer,refinement types},
pages = {387},
title = {{Verifying higher-order programs with the dijkstra monad}},
url = {http://research.microsoft.com/apps/pubs/default.aspx?id=189686},
year = {2013}
}
@article{Swamy2014,
abstract = {JavaScript's flexible semantics makes writing correct code hard and writing secure code extremely difficult. To address the former prob- lem, various forms of gradual typing have been proposed, such as Closure and TypeScript. However, supporting all common pro- gramming idioms is not easy; for example, TypeScript deliberately gives up type soundness for programming convenience. In this pa- per, we propose a gradual type system and implementation techniques that provide important safety and security guarantees. We present TS?, a gradual type system and source-to-source compiler for JavaScript. In contrast to prior gradual type systems, TS? features full runtime reflection over three kinds of types: (1) simple types for higher-order functions, recursive datatypes and dictionary-based extensible records; (2) the type any, for dynami- cally type-safe TS? expressions; and (3) the type un, for untrusted, potentially malicious JavaScript contexts in which TS? is embed- ded. After type-checking, the compiler instruments the program with various checks to ensure the type safety of TS? despite its interactions with arbitrary JavaScript contexts, which are free to use eval, stack walks, prototype customizations, and other offen- sive features. The proof of our main theorem employs a form of type-preserving compilation, wherein we prove all the runtime in- variants of the translation of TS? to JavaScript by showing that translated programs are well-typed in JS?, a previously proposed dependently typed language for proving functional correctness of JavaScript programs. We describe a prototype compiler, a secure runtime, and sample applications for TS?. Our examples illustrate how web security pat- terns that developers currently program in JavaScript (with much difficulty and still with dubious results) can instead be programmed naturally in TS?, retaining a flavor of idiomatic JavaScript, while providing strong safety guarantees by virtue of typing.},
author = {Swamy, Nikhil and Fournet, C{\'{e}}dric and Rastogi, Aseem and Bhargavan, Karthikeyan and Chen, Juan and Strub, Pierre-Yves and Bierman, Gavin},
doi = {10.1145/2535838.2535889},
isbn = {9781450325448},
issn = {15232867},
journal = {Proceedings of the 41st ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL '14},
keywords = {compilers,language-based security,type systems},
pages = {425--437},
title = {{Gradual typing embedded securely in JavaScript}},
url = {http://dl.acm.org/citation.cfm?doid=2535838.2535889},
year = {2014}
}
@article{Strub2012,
author = {Strub, Pierre-yves and Swamy, Nikhil and Fournet, Cedric and Chen, Juan},
doi = {10.1145/2103621.2103723},
isbn = {9781450310833},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {certification,dependent types,refinement types},
month = {jan},
number = {1},
pages = {571},
title = {{Self-certification}},
url = {http://dl.acm.org/citation.cfm?doid=2103621.2103723},
volume = {47},
year = {2012}
}
@article{Chugh2013,
author = {Chugh, Ravi},
title = {{Nested Refinement Types for JavaScript}},
year = {2013}
}
@article{Fournet2013,
abstract = {Many tools allow programmers to develop applications in high- level languages and deploy them in web browsers via compilation to JavaScript. While practical and widely used, these compilers are ad hoc: no guarantee is provided on their correctness for whole programs, nor their security for programs executed within arbitrary JavaScript contexts. This paper presents a compiler with such guarantees. We compile an ML-like language with higher-order func- tions and references to JavaScript, while preserving all source program properties. Relying on type-based invariants and applicative bisimilarity, we show full abstraction: two programs are equivalent in all source contexts if and only if their wrapped translations are equivalent in all JavaScript contexts. We evaluate our compiler on sample programs, including a series of secure libraries.},
author = {Fournet, C{\'{e}}dric and Swamy, Nikhil and Chen, Juan and Dagand, Pierre-Evariste and Strub, Pierre-Yves and Livshits, Benjamin},
doi = {10.1145/2429069.2429114},
isbn = {9781450318327},
issn = {0362-1340},
journal = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming languages - POPL '13},
keywords = {full abstraction,program equivalence,refinement types},
pages = {371},
title = {{Fully abstract compilation to JavaScript}},
url = {http://dl.acm.org/citation.cfm?doid=2429069.2429114},
year = {2013}
}
@article{Wangb,
author = {Wang, Peng and Parno, Bryan},
title = {{Extracting from F * to C : a progress report}}
}
@article{Liu,
author = {Liu, Chang and Harris, Austin and Maas, Martin and Hicks, Michael and Tiwari, Mohit and Shi, Elaine},
isbn = {9781450328357},
title = {{GhostRider : A Hardware-Software System for Memory Trace Oblivious Computation}}
}
@article{Wadler1998,
abstract = {John Hughes has made pretty printers one of the prime demonstrations of using combinators to develop a library, and algebra to implement it. This note presents a new design for pretty printers which improves on Hughes's classic design. The new design is based on a single concatenation operator which is associative and has a left and right unit. Hughes's design requires two separate operators for concatenation, where horizontal concatenation has a right unit but no left unit, and vertical concatenation has neither unit.},
author = {Wadler, Philip},
journal = {Journal of Functional Programming},
number = {1980},
pages = {223--244},
title = {{A prettier printer}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.635{\&}rep=rep1{\&}type=pdf},
year = {1998}
}
@article{Landin1966,
abstract = {A family of unimplemented computing languages is described that is intended to span differences of application area by a unified framework. This framework dictates the rules about the uses of user-coined names, and the conventions about characterizing functional relationships. Within this framework the design of a specific language splits into two independent parts. One is the choice of written appearances of programs (or more generally, their physical representation). The other is the choice of the abstract entities (such as numbers, character-strings, list of them, functional relations among them) that can be referred to in the language.$\backslash$nThe system is biased towards “expressions” rather than “statements.” It includes a nonprocedural (purely functional) subsystem that aims to expand the class of users' needs that can be met by a single print-instruction, without sacrificing the important properties that make conventional right-hand-side expressions easy to construct and understand.},
author = {Landin, P. J.},
doi = {10.1145/365230.365257},
issn = {0001-0782},
journal = {Commun. ACM},
number = {3},
pages = {157--166},
title = {{The Next 700 Programming Languages}},
url = {http://doi.acm.org/10.1145/365230.365257{\%}5Cnhttp://dl.acm.org/ft{\_}gateway.cfm?id=365257{\&}type=pdf},
volume = {9},
year = {1966}
}
@article{Cadar2011,
abstract = {We present results for the "Impact Project Focus Area" on the topic of symbolic execution as used in software testing. Symbolic execution is a program analysis technique introduced in the 70s that has received renewed interest in recent years, due to algorithmic advances and increased availability of computational power and constraint solving technology. We review classical symbolic execution and some modern extensions such as generalized symbolic execution and dynamic test generation. We also give a preliminary assessment of the use in academia, research labs, and industry.},
author = {Cadar, Cristian and Godefroid, Patrice and Khurshid, Sarfraz and Pasareanu, Corina S. and Sen, Koushik and Tillmann, Nikolai and Visser, Willem},
doi = {10.1145/1985793.1985995},
isbn = {978-1-4503-0445-0},
issn = {0270-5257},
journal = {2011 33rd International Conference on Software Engineering (ICSE)},
keywords = {dynamic test generation,generalized symbolic execution},
pages = {1066--1071},
title = {{Symbolic execution for software testing in practice: preliminary assessment}},
year = {2011}
}
@article{Albarghouthi2016,
abstract = {Many problems in program analysis, verification, and synthesis require inferring specifications of unknown procedures. Motivated by a broad range of applications, we formulate the problem of maximal specification inference: Given a postcondition ϕ and a program P calling a set of unknown procedures F1, . . . , Fn, what are the most permissive specifications of procedures Fi that ensure correctness of P ? In other words, we are looking for the smallest number of assumptions we need to make about the behaviours of Fi in order to prove that P satisfies its postcondition. To solve this problem, we present a novel approach that utilizes a counterexample-guided inductive synthesis loop and reduces the maximal specification inference problem to multi-abduction. We formulate the novel notion of multi-abduction as a generalization of classical logical abduction and present an algorithm for solving multi-abduction problems. On the practical side, we evaluate our specification inference technique on a range of benchmarks and demonstrate its ability to synthesize specifications of kernel rou-tines invoked by device drivers.},
author = {Albarghouthi, Aws and Dillig, Isil and Gurfinkel, Arie},
doi = {10.1145/2914770.2837628},
isbn = {978-1-4503-3549-2},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {specification,synthesis,verification},
number = {1},
pages = {789--801},
title = {{Maximal specification synthesis}},
url = {http://dl.acm.org/citation.cfm?id=2914770.2837628},
volume = {51},
year = {2016}
}
@article{Cao2015,
author = {Cao, Jingyuan and Fu, Ming and Feng, Xinyu},
doi = {10.1145/2676724.2693162},
isbn = {978-1-4503-3296-5},
journal = {Proceedings of the 2015 Conference on Certified Programs and Proofs},
keywords = {c program verification,interactive proof assistants,practical tactics,separation logic},
pages = {97--108},
title = {{Practical Tactics for Verifying C Programs in Coq}},
url = {http://doi.acm.org/10.1145/2676724.2693162},
year = {2015}
}
@article{Lampropoulos2016,
abstract = {Property-based random testing in the style of QuickCheck demands efficient generators for well-distributed random data satisfying complex logical predicates, but writing these generators can be difficult and error prone. We propose a better alternative: a domain-specific language in which generators are expressed by decorating predicates with lightweight annotations to control both the distribution of generated values as well as the amount of constraint solving that happens before each variable is instantiated. This language, called Luck, makes generators easier to write, read, and maintain. We give Luck a formal semantics and prove several fundamental properties, including the soundness and completeness of random generation with respect to a standard predicate semantics. We evaluate Luck on common examples from the property-based testing literature and on two significant case studies; we show that it can be used in complex domains with comparable bug-finding effectiveness and a significant reduction in testing code size, compared to handwritten generators.},
archivePrefix = {arXiv},
arxivId = {1607.05443},
author = {Lampropoulos, Leonidas and Gallois-Wong, Diane and Hritcu, Catalin and Hughes, John and Pierce, Benjamin C. and Xia, Li-yao},
eprint = {1607.05443},
pages = {1--28},
title = {{Beginner's Luck: A Language for Property-Based Generators}},
url = {http://arxiv.org/abs/1607.05443},
year = {2016}
}
@article{DSilva2014,
abstract = {This article introduces an abstract interpretation framework that codifies the operations in SAT and SMT solvers in terms of lattices, transformers and fixed points. We develop the idea that a formula denotes a set of models in a universe of structures. This set of models has characterizations as fixed points of deduction, abduction and quantification transformers. A wide range of satisfiability procedures can be understood as computing and refining approximations of such fixed points. These include procedures in the DPLL family, those for preprocessing and inprocessing in SAT solvers, decision procedures for equality logics, weak arithmetics, and procedures for approximate quantification. Our framework provides a unified, mathematical basis for studying and combining program analysis and satisfiability procedures. A practical benefit of our work is a new, logic-agnostic architecture for implementing solvers.},
author = {D'Silva, Vijay and Haller, Leopold and Kroening, Daniel},
doi = {10.1145/2535838.2535868},
isbn = {9781450325448},
issn = {07308566},
journal = {Principles of Programming Languages},
keywords = {a problem is to,abstract interpretation,and design an algorithm,approximations,by fixed points,characterise solu-,decision procedures,fixed point,identify a space of,logic,plying abstract interpretation to,tions to the problem,to compute these approx-},
pages = {139--150},
title = {{Abstract Satisfaction}},
url = {http://dl.acm.org/citation.cfm?doid=2535838.2535868},
volume = {49},
year = {2014}
}
@article{Rompf2015,
abstract = {Scala's type system unifies aspects of ML-style module systems, object-oriented, and functional programming paradigms. The DOT (Dependent Object Types) family of calculi has been proposed as a new theoretic foundation for Scala and similar expressive languages. Unfortunately, type soundness has only been established for a very restricted subset of DOT (muDOT), and it has been shown that adding important Scala features such as type refinement or extending subtyping to a lattice breaks at least one key metatheoretic property such as narrowing or subtyping transitivity, which are usually required for a type soundness proof. The first main contribution of this paper is to demonstrate how, perhaps surprisingly, even though these properties are lost in their full generality, a richer DOT calculus that includes both type refinement and a subtyping lattice with intersection types can still be proved sound. The key insight is that narrowing and subtyping transitivity only need to hold for runtime objects, but not for code that is never executed. Alas, the dominant method of proving type soundness, Wright and Felleisen's syntactic approach, is based on term rewriting, which does not make an adequate distinction between runtime and type assignment time. The second main contribution of this paper is to demonstrate how type soundness proofs for advanced, polymorphic, type systems can be carried out with an operational semantics based on high-level, definitional interpreters, implemented in Coq. We present the first mechanized soundness proof for System F{\textless}: based on a definitional interpreter. We discuss the challenges that arise in this setting, in particular due to abstract types, and we illustrate in detail how DOT-like calculi emerge from straightforward generalizations of the operational aspects of F{\textless}:.},
archivePrefix = {arXiv},
arxivId = {1510.05216},
author = {Rompf, Tiark and Amin, Nada},
eprint = {1510.05216},
number = {July},
pages = {1--13},
title = {{From F to DOT: Type Soundness Proofs with Definitional Interpreters}},
url = {http://arxiv.org/abs/1510.05216},
year = {2015}
}
@article{Konnov2016,
abstract = {Distributed algorithms have many mission-critical applications ranging from embedded systems and replicated databases to cloud computing. Due to asynchronous communication, process faults, or network failures, these algorithms are difficult to design and verify. Many algorithms achieve fault tolerance by using threshold guards that, for instance, ensure that a process waits until it has received an acknowledgment from a majority of its peers. Consequently, domain-specific languages for fault-tolerant distributed systems offer language support for threshold guards. We introduce an automated method for model checking of safety and liveness of threshold-guarded distributed algorithms in systems where the number of processes and the fraction of faulty processes are parameters. Our method is based on a short counterexample property: if a distributed algorithm violates a temporal specification (in a fragment of LTL), then there is a counterexample whose length is bounded and independent of the parameters. We prove this property by (i) characterizing executions depending on the structure of the temporal formula, and (ii) using commutativity of transitions to accelerate and shorten executions. We extended the ByMC toolset (Byzantine Model Checker) with our technique, and verified liveness and safety of 10 prominent fault-tolerant distributed algorithms, most of which were out of reach for existing techniques.},
archivePrefix = {arXiv},
arxivId = {1608.05327},
author = {Konnov, Igor and Lazic, Marijana and Veith, Helmut and Widder, Josef},
doi = {10.1145/3009837.3009860},
eprint = {1608.05327},
keywords = {byzantine faults,fault-,parameterized model checking,reliable broadcast,tolerant distributed algorithms},
title = {{A Short Counterexample Property for Safety and Liveness Verification of Fault-tolerant Distributed Algorithms}},
url = {http://arxiv.org/abs/1608.05327{\%}0Ahttp://dx.doi.org/10.1145/3009837.3009860},
year = {2016}
}
@article{Hankin1996,
author = {Hankin, Chris and Palsberg, Jens and Al, E T},
number = {4},
pages = {644--652},
title = {{Strategic Directions in Research on Programming Languages}},
volume = {28},
year = {1996}
}
@article{Takikawa2016,
abstract = {Programmers have come to embrace dynamically-typed languages for prototyping and delivering large and complex systems. When it comes to maintaining and evolving these systems, the lack of explicit static typing becomes a bottleneck. In response, researchers have explored the idea of gradually-typed programming languages which allow the incremental addition of type annotations to software written in one of these untyped languages. Some of these new, hybrid languages insert run-time checks at the boundary between typed and untyped code to establish type soundness for the overall system. With sound gradual typing, programmers can rely on the language implementation to provide meaningful error messages when type invariants are violated. While most research on sound gradual typing remains theoretical, the few emerging implementations suffer from performance overheads due to these checks. None of the publications on this topic comes with a comprehensive performance evaluation. Worse, a few report disastrous numbers. In response, this paper proposes a method for evaluating the performance of gradually-typed programming languages. The method hinges on exploring the space of partial conversions from untyped to typed. For each benchmark, the performance of the different versions is reported in a synthetic metric that associates runtime overhead to conversion effort. The paper reports on the results of applying the method to Typed Racket, a mature implementation of sound gradual typing, using a suite of real-world programs of various sizes and complexities. Based on these results the paper concludes that, given the current state of implementation technologies, sound gradual typing faces significant challenges. Conversely, it raises the question of how implementations could reduce the overheads associated with soundness and how tools could be used to steer programmers clear from pathological cases.},
author = {Takikawa, Asumu and Feltey, Daniel and Greenman, Ben and New, Max S. and Vitek, Jan and Felleisen, Matthias},
doi = {10.1145/2914770.2837630},
isbn = {978-1-4503-3549-2},
issn = {03621340},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL 2016},
keywords = {Gradual typing,performance evaluation},
number = {1},
pages = {456--468},
title = {{Is sound gradual typing dead?}},
url = {http://dl.acm.org/citation.cfm?id=2914770.2837630},
volume = {51},
year = {2016}
}
@article{Dinsdale-Young2013,
abstract = {Compositional abstractions underly many reasoning principles for concurrent programs: the concurrent environment is abstracted in order to reason about a thread in isolation; and these abstractions are composed to reason about a program consisting of many threads. For instance, separation logic uses formulae that describe part of the state, abstracting the rest; when two threads use disjoint state, their specifications can be composed with the separating conjunction. Type systems abstract the state to the types of variables; threads may be composed when they agree on the types of shared variables. In this paper, we present the "Concurrent Views Framework", a metatheory of concurrent reasoning principles. The theory is parameterised by an abstraction of state with a notion of composition, which we call views. The metatheory is remarkably simple, but highly applicable: the rely-guarantee method, concurrent separation logic, concurrent abstract predicates, type systems for recursive references and for unique pointers, and even an adaptation of the Owicki-Gries method can all be seen as instances of the Concurrent Views Framework. Moreover, our metatheory proves each of these systems is sound without requiring induction on the operational semantics.},
author = {Dinsdale-Young, Thomas and Birkedal, Lars and Gardner, Philippa and Parkinson, Matthew and Yang, Hongseok},
doi = {10.1145/2429069.2429104},
isbn = {978-1-4503-1832-7},
issn = {15232867},
journal = {POPL: Principles of Programming Languages},
keywords = {axiomatic semantics,compositional rea-,concurrency},
pages = {287--300},
title = {{Views: compositional reasoning for concurrent programs}},
url = {http://doi.acm.org/10.1145/2429069.2429104{\%}5Cnhttp://dl.acm.org/ft{\_}gateway.cfm?id=2429104{\&}type=pdf},
year = {2013}
}
@article{Johnson2015,
author = {Johnson, J. Ian},
number = {April},
title = {{Automating Abstract Interpretation}},
year = {2015}
}
@article{Cimini2016,
author = {Cimini, Matteo and Siek, Jeremy G.},
doi = {10.1145/2837614.2837632},
isbn = {9781450335492},
issn = {0362-1340},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL 2016},
keywords = {gradual typing,methodology,semantics,type systems},
number = {1},
pages = {443--455},
title = {{The Gradualizer: a methodology and algorithm for generating gradual type systems}},
url = {http://dl.acm.org/citation.cfm?id=2837614.2837632},
volume = {51},
year = {2016}
}
@article{Lorenzen2016,
author = {Lorenzen, Florian and Erdweg, Sebastian},
doi = {10.1145/2837614.2837644},
isbn = {9781450335492},
issn = {15232867},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL 2016},
keywords = {19,26,6,and scala,automatic verification,desugaring,language extensibility,macros,metaprogramming,sugarj,templatehaskell with quasiquoting,type soundness,type-dependent},
pages = {204--216},
title = {{Sound type-dependent syntactic language extension}},
url = {http://dl.acm.org/citation.cfm?doid=2837614.2837644},
year = {2016}
}
@inproceedings{Detlefs1992,
author = {Detlefs, David},
booktitle = {Proceedings of the C++ Conference. Portland, OR, USA, August 1992},
title = {{Garbage Collection and Run-time Typing as a C++ Library}},
year = {1992}
}
@inproceedings{Zhang2016,
address = {New York, New York, USA},
author = {Zhang, Zhen},
booktitle = {Companion Proceedings of the 2016 ACM SIGPLAN International Conference on Systems, Programming, Languages and Applications: Software for Humanity - SPLASH Companion 2016},
doi = {10.1145/2984043.2998545},
isbn = {9781450344371},
keywords = {data-flow analysis,definition language,interface,javascript,program verification,static analysis,webidl},
pages = {63--64},
publisher = {ACM Press},
title = {{xWIDL: modular and deep JavaScript API misuses checking based on extended WebIDL}},
url = {http://dl.acm.org/citation.cfm?doid=2984043.2998545},
year = {2016}
}
@article{Cadar2008,
abstract = {We present a new symbolic execution tool, KLEE, capable of automatically generating tests that achieve high coverage on a diverse set of complex and environmentally-intensive programs. We used KLEE to thoroughly check all 89 stand-alone programs in the GNU COREUTILS utility suite, which form the core user-level environment installed on millions of Unix systems, and arguably are the single most heavily tested set of open-source programs in existence. KLEE-generated tests achieve high line coverage - on average over 90{\%} per tool (median: over 94{\%}) - and significantly beat the coverage of the developers' own hand-written test suite. When we did the same for 75 equivalent tools in the BUSYBOX embedded system suite, results were even better, including 100{\%} coverage on 31 of them. We also used KLEE as a bug finding tool, applying it to 452 applications (over 430K total lines of code), where it found 56 serious bugs, including three in COREUTILS that had been missed for over 15 years. Finally, we used KLEE to crosscheck purportedly identical BUSYBOX and COREUTILS utilities, finding functional correctness errors and a myriad of inconsistencies.},
author = {Cadar, Cristian and Dunbar, Daniel and Engler, Dawson R.},
doi = {10.1.1.142.9494},
isbn = {978-1-931971-65-2},
issn = {{\textless}null{\textgreater}},
journal = {Proceedings of the 8th USENIX conference on Operating systems design and implementation},
pages = {209--224},
title = {{KLEE: Unassisted and Automatic Generation of High-Coverage Tests for Complex Systems Programs}},
url = {http://portal.acm.org/citation.cfm?id=1855756},
year = {2008}
}
@article{Altenkirch2016,
author = {Altenkirch, Thorsten and Kaposi, Ambrus},
doi = {10.1145/2837614.2837638},
isbn = {9781450335492},
issn = {15232867},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL 2016},
keywords = {higher inductive types,homotopy type theory,ical relations,log-,metaprogramming},
pages = {18--29},
title = {{Type theory in type theory using quotient inductive types}},
url = {http://dl.acm.org/citation.cfm?doid=2837614.2837638},
year = {2016}
}
@article{Chlipala2010,
abstract = {Dependent types provide a strong foundation for specifying and verifying rich properties of programs through type-checking. The earliest implementations combined dependency, which allows types to mention program variables; with type-level computation, which facilitates expressive specifications that compute with recursive functions over types. While many recent applications of dependent types omit the latter facility, we argue in this paper that it deserves more attention, even when implemented without dependency. In particular, the ability to use functional programs as specifications enables statically-typed metaprogramming: programs write programs, and static type-checking guarantees that the generating process never produces invalid code. Since our focus is on generic validity properties rather than full correctness verification, it is possible to engineer type inference systems that are very effective in narrow domains. As a demonstration, we present Ur, a programming language designed to facilitate metaprogramming with first-class records and names. On top of Ur, we implement Ur/Web, a special standard library that enables the development of modern Web applications. Ad-hoc code generation is already in wide use in the popular Web application frameworks, and we show how that generation may be tamed using types, without forcing metaprogram authors to write proofs or forcing metaprogram users to write any fancy types.},
author = {Chlipala, Adam},
doi = {10.1145/1809028.1806612},
isbn = {978-1-4503-0019-3},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {dependent types,metaprogramming},
number = {6},
pages = {122},
title = {{Ur: Statically-Typed Metaprogramming with Type-Level Record Computation}},
url = {http://portal.acm.org/citation.cfm?doid=1809028.1806612},
volume = {45},
year = {2010}
}
@article{Kennedy2013,
abstract = {We describe a Coq formalization of a subset of the x86 architecture. One emphasis of the model is brevity: using dependent types, type classes and notation we give the x86 semantics a makeover that counters its reputation for baroqueness. We model bits},
author = {Kennedy, Andrew and Benton, Nick and Jensen, Jonas B and Dagand, Pierre-Evariste},
doi = {10.1145/2505879.2505897},
isbn = {9781450321549},
issn = {00220000},
journal = {PPDP '13: Proceedings of the 15th Symposium on Principles and Practice of Declarative Programming},
pages = {13--24},
title = {{Coq: the world's best macro assembler?}},
url = {http://dl.acm.org/citation.cfm?doid=2505879.2505897{\%}5Cnpapers3://publication/doi/10.1145/2505879.2505897},
year = {2013}
}
@article{Jones2007,
abstract = {Haskell's popularity has driven the need for ever more expressive type system features, most of which threaten the decidability and practicality of Damas-Milner type inference. One such feature is the ability to write functions with higher-rank types—that is, functions that take polymorphic functions as their arguments. Complete type inference is known to be undecidable for higher-rank (impredicative) type systems, but in practice programmers are more than willing to add type annotations to guide the type inference engine, and to document their code. However, the choice of just what annotations are required, and what changes are required in the type system and its inference algorithm, has been an ongoing topic of research. We take as our starting point a $\lambda$-calculus proposed by Odersky and L¨ aufer. Their sys- tem supports arbitrary-rank polymorphism through the exploitation of type annotations on $\lambda$-bound arguments and arbitrary sub-terms. Though elegant, and more convenient than some other proposals, Odersky and L¨ aufer's system requires many annotations. We show how to use local type inference (invented by Pierce and Turner) to greatly reduce the annotation burden, to the point where higher-rank types become eminently usable. Higher-rank types have a very modest impact on type inference. We substantiate this claim in a very concrete way, by presenting a complete type-inference engine, written in Haskell, for a traditional Damas-Milner type system, and then showing how to extend it for higher-rank types. We write the type-inference engine using a monadic framework: it turns out to be a particularly compelling example of monads in action. The paper is long, but is strongly tutorial in style. Although we use Haskell as our example source language, and our implementation language, much of our work is directly applicable to any ML-like functional language.},
author = {Jones, Simon Peyton and Vytiniotis, Dimitrios and Weirich, Stephanie and Shields, Mark},
doi = {10.1017/S0956796806006034},
issn = {0956-7968},
journal = {Journal of Functional Programming},
number = {01},
pages = {1},
title = {{Practical type inference for arbitrary-rank types}},
volume = {17},
year = {2007}
}
@article{Feng2007,
abstract = {Software systems usually use many different computation features and span different abstraction levels (e.g., user code level and the runtime system level). To build foundational certified systems, it is hard to have one verification system supporting all computation features. In this paper we present an open framework for foundational proof-carrying code (FPCC). It allows program modules to be specified and certified separately using different type systems or program logics. Certified modules (code + proof) can be linked to compose fully certified systems. The framework supports modular verification and proof reuse. It is extensible, and is expressive enough to allow invariants established in verification systems to be maintained when they are embedded in. Our framework is the first FPCC framework that systematically supports interoperation between different verification systems. It is fully mechanized in the Coq proof assistant with machine-checkable soundness proof.},
author = {Feng, Xinyu and Ni, Zhaozhong and Shao, Zhong and Guo, Yu},
doi = {10.1145/1190315.1190325},
isbn = {159593393X},
journal = {Proceedings of the 2007 ACM SIGPLAN international workshop on Types in languages design and implementation - TLDI '07},
keywords = {foundational proof-carrying code,interoperability,modularity,open framework,program verifica-,tion},
pages = {67},
title = {{An open framework for foundational proof-carrying code}},
url = {http://portal.acm.org/citation.cfm?doid=1190315.1190325},
year = {2007}
}
@article{Sparks,
author = {Sparks, Zachary},
isbn = {9781605587684},
pages = {1--8},
title = {{Typestate-Oriented Programming}}
}
@inproceedings{JonesSimonPeytonMarkJones1997,
author = {{Jones, Simon Peyton, Mark Jones}, and Erik Meijer},
booktitle = {Haskell workshop},
title = {{Type Classes: an exploration of the design space}},
year = {1997}
}
@article{Guha2010,
abstract = {We reduce JavaScript to a core calculus structured as a small-step operational semantics. We present several peculiarities of the language and show that our calculus models them. We explicate the desugaring process that turns JavaScript programs into ones in the core. We demonstrate faithfulness to JavaScript using real-world test suites. Finally, we illustrate utility by defining a security property, implementing it as a type system on the core, and extending it to the full language.},
author = {Guha, Arjun and Saftoiu, Claudiu and Krishnamurthi, Shriram},
doi = {10.1007/978-3-642-14107-2_7},
isbn = {3642141064},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {126--150},
title = {{The essence of javascript}},
volume = {6183 LNCS},
year = {2010}
}
@article{Horn2011,
abstract = {We design a family of program analyses for JavaScript that make no approximation in matching calls with returns, exceptions with handlers, and breaks with labels. We do so by starting from an established reduction semantics for JavaScript and systematically deriving its intensional abstract interpretation. Our first step is to transform the semantics into an equivalent low-level abstract machine: the JavaScript Abstract Machine (JAM). We then give an infinite-state yet decidable pushdown machine whose stack precisely models the structure of the concrete program stack. The precise model of stack structure in turn confers precise control-flow analysis even in the presence of control effects, such as exceptions and finally blocks. We give pushdown generalizations of traditional forms of analysis such as k-CFA, and prove the pushdown framework for abstract interpretation is sound and computable.},
archivePrefix = {arXiv},
arxivId = {arXiv:1109.4467v2},
author = {Horn, Davind Van and Might, Matthew},
eprint = {arXiv:1109.4467v2},
journal = {Not Published?},
pages = {24},
title = {{Pushdown Abstractions of JavaScript}},
url = {http://www.ccs.northeastern.edu/home/dvanhorn/pubs/vanhorn-might-preprint11.pdf{\%}5Cnpapers://874939df-abad-4158-938c-02f412bafaf9/Paper/p5},
year = {2011}
}
@article{Stefanovic1993,
abstract = {IntroductionThe UMass Garbage Collection Toolkit[4] was designed to facilitate language implementation by providing alanguage-independent library of collection algorithms and policies, and auxiliary data structures.Having integrated the toolkit collector into Standard ML of New Jersey, we found that the functionality ofthe toolkit allowed us to perform experiments revealing the nature of object allocation and object dynamics inthe SML/NJ system. We explored ways to visualize the large quantities of data our instrumentation gathers. Webelieve that the techniques developed can be of use to the language implementor in reviewing overall performance,and to the application writer in tracking down the space behavior of the program (which, for functional languages,is often not intimately related to the source program).In the following we briefly describe the collector interface with SML, discuss the methodology of experiments,and outline the outcome of the experiments. Altho},
author = {Stefanovic, D},
keywords = {{\&}absolute {\&}account {\&}address {\&}age {\&}algorithm {\&}alloc},
pages = {1--7},
title = {{The Garbage Collection Toolkit as an Experimentation Tool}},
url = {http://citeseer.ist.psu.edu/70158{\%}5Cnpapers://b6c7d293-c492-48a4-91d5-8fae456be1fa/Paper/p1317{\%}5Cnfile:///C:/Users/Serguei/OneDrive/Documents/Papers/The Garbage Collection Toolkit as-1993-09-17.pdf},
year = {1993}
}
@article{DeLine2004,
abstract = {Page 1. Typestates for Objects Robert DeLine and Manuel F{\"{a}}hndrich Microsoft Research One Microsoft Way Redmond, WA 98052-6399 USA {\{}rdeline,maf{\}}@microsoft. com Abstract. Today's mainstream object-oriented compilers},
author = {DeLine, Robert and F{\"{a}}hndrich, Manuel},
doi = {10.1007/b98195},
isbn = {978-3-540-22159-3},
issn = {03029743},
journal = {European conference on object-oriented programming},
pages = {465--490},
title = {{Typestates for Objects}},
url = {http://link.springer.com/10.1007/b98195},
volume = {3086},
year = {2004}
}
@article{Jeon2016,
abstract = {Symbolic execution is a powerful program analysis tech-nique, but it is difficult to apply to programs built using frameworks such as Swing and Android, because the frame-work code itself is hard to symbolically execute. The stan-dard solution is to manually create a framework model that can be symbolically executed, but developing and maintain-ing a model is difficult and error-prone. In this paper, we present Pasket, a new system that takes a first step toward automatically generating Java framework models to support symbolic execution. Pasket's focus is on creating models by instantiating design patterns. Pasket takes as input class, method, and type information from the framework API, to-gether with tutorial programs that exercise the framework. From these artifacts and Pasket's internal knowledge of de-sign patterns, Pasket synthesizes a framework model whose behavior on the tutorial programs matches that of the origi-nal framework. We evaluated Pasket by synthesizing mod-els for subsets of Swing and Android. Our results show that the models derived by Pasket are sufficient to allow us to use off-the-shelf symbolic execution tools to analyze Java programs that rely on frameworks.},
author = {Jeon, Jinseong and Qiu, Xiaokang and Fetter-Degges, Jonathan and Foster, Jeffrey S. and Solar-Lezama, Armando},
doi = {10.1145/2884781.2884856},
isbn = {9781450339001},
issn = {02705257},
journal = {Proceedings of the 38th International Conference on Software Engineering - ICSE '16},
keywords = {framework model,program synthesis,sketch,symbolic execution},
pages = {156--167},
title = {{Synthesizing framework models for symbolic execution}},
url = {http://dl.acm.org/citation.cfm?id=2884781.2884856},
year = {2016}
}
@incollection{Wilson1995,
author = {Wilson, Paul R. and Johnstone, Mark S. and Neely, Michael and Boles, David},
booktitle = {Memory Management},
doi = {10.1007/3-540-60368-9_19},
pages = {1--116},
title = {{Dynamic storage allocation: A survey and critical review}},
url = {http://link.springer.com/10.1007/3-540-60368-9{\_}19},
year = {1995}
}
@article{Varghese1987,
abstract = {Conventional algorithms to implement an Operating System timer module take O(n) time to start or maintain a timer, where n is the number of outstanding timers: this is expensive for large n. This paper begins by exploring the relationship between timer algorithms, time flow mechanisms used in discrete event simulations, and sorting techniques. Next a timer algorithm for small timer intervals is presented that is similar to the timing wheel technique used in logic simulations. By using a circular buffer or timing wheel, it takes O(1) time to start, stop, and maintain timers within the range of the wheel. Two extensions for larger values of the interval are decribed. In the first, the timer interval is hashed into a slot on the timing wheel. In the second, a hierarchy of timing wheels with different granularities is used to span a greater range of intervals. The performance of these two schemes and various implementation trade-offs are discussed.},
author = {Varghese, G. and Lauck, T.},
doi = {10.1145/37499.37504},
isbn = {089791242X},
issn = {01635980},
journal = {ACM SIGOPS Operating Systems Review},
number = {5},
pages = {25--38},
title = {{Hashed and hierarchical timing wheels: data structures for the efficient implementation of a timer facility}},
volume = {21},
year = {1987}
}
@article{Benton2009,
author = {Benton, Nick},
title = {{Step-Indexing : The Good , the Bad and the Ugly}},
year = {2009}
}
@article{Inala2015,
abstract = {In this paper, we show how synthesis can help implement interesting functions involving pattern matching and algebraic data types. One of the novel aspects of this work is the combination of type inference and counterexample-guided inductive synthesis (CEGIS) in order to support very high-level notations for describing the space of possible implementations that the synthesizer should consider. The paper also describes a set of optimizations that significantly improve the performance and scalability of the system. The approach is evaluated on a set of case studies which most notably include synthesizing desugaring functions for lambda calculus that force the synthesizer to discover Church encodings for pairs and boolean operations, as well as a procedure to generate constraints for type inference.},
archivePrefix = {arXiv},
arxivId = {1507.05527},
author = {Inala, Jeevana Priya and Qiu, Xiaokang and Lerner, Ben and Solar-Lezama, Armando},
eprint = {1507.05527},
journal = {Pldi Src},
title = {{Type Assisted Synthesis of Recursive Transformers on Algebraic Data Types}},
url = {http://arxiv.org/abs/1507.05527},
year = {2015}
}
@article{Nanevski2010,
abstract = {Most systems based on separation logic consider only restricted forms of implication or non-separating conjunction, as full support for these connectives requires a non-trivial notion of variable context, inherited from the logic of bunched implications (BI). We show that in an expressive type theory such as Coq, one can avoid the intricacies of BI, and support full separation logic very efficiently, using the native structuring primitives of the type theory. Our proposal uses reflection to enable equational reasoning about heaps, and Hoare triples with binary postconditions to further facilitate it. We apply these ideas to Hoare Type Theory, to obtain a new proof technique for verification of higher-order imperative programs that is general, extendable, and supports very short proofs, even without significant use of automation by tactics. We demonstrate the usability of the technique by verifying the fast congruence closure algorithm of Nieuwenhuis and Oliveras, employed in the state-of-the-art Barcelogic SAT solver. Copyright {\textcopyright} 2010 ACM.},
author = {Nanevski, Aleksandar and Vafeiadis, Viktor and Berdine, Josh},
doi = {10.1145/1707801.1706331},
isbn = {9781605584799},
issn = {03621340},
journal = {Popl},
keywords = {hoare logic,languages,monads,separation logic,type theory,verification},
number = {1},
pages = {261},
title = {{Structuring the verification of heap-manipulating programs}},
volume = {45},
year = {2010}
}
@article{Hicks,
author = {Hicks, Michael},
title = {{Symbolic Execution for finding bugs}}
}
@article{Berdine2005,
abstract = {We describe a sound method for automatically proving Hoare triples for loop-free code in Separation Logic, for certain preconditions and postconditions (symbolic heaps). The method uses a form of symbolic execution, a decidable proof theory for symbolic},
author = {Berdine, Josh and Calcagno, Cristiano and O'Hearn, Peter W.},
doi = {10.1007/11575467_5},
isbn = {3-540-29735-9},
journal = {Programming Languages and {\ldots}},
pages = {52--68},
title = {{Symbolic execution with separation logic}},
url = {http://link.springer.com/chapter/10.1007/11575467{\_}5},
year = {2005}
}
@article{Mesbah2016,
author = {Mesbah, Ali},
doi = {10.1109/SANER.2016.109},
isbn = {9781509018550},
title = {{Software Analysis for the Web : Achievements and Prospects}},
year = {2016}
}
@article{Abadi1991,
abstract = {Statically typed programming languages allow earlier error checking, better enforcement of diciplined programming styles, and the generation of more efficient object code than languages where all type consistency checks are performed at run time. However, even in statically typed languages, there is often the need to deal with data whose type cannot be determined at compile time. To handle such situations safely, we propose to add a type Dynamic whose values are pairs of a value v and a type tag T where v has the type denoted by T. Instances of Dynamic are built with an explicit tagging construct and inspected with a type safe typecase construct. This paper explores the syntax, operational semantics, and denotational semantics of a simple language that includes the type Dynamic. We give examples of how dynamically typed values can be used in programming. Then we discuss an operational semantics for our language and obtain a soundness theorem. We present two formulations of the denotational semantics of this language and relate them to the operational semantics. Finally, we consider the implications of polymorphism and some implementation issues.},
author = {Abadi, Mart{\'{i}}n and Cardelli, Luca and Pierce, Benjamin and Plotkin, Gordon},
doi = {10.1145/103135.103138},
isbn = {0-89791-294-2},
issn = {01640925},
journal = {ACM Transactions on Programming Languages and Systems},
number = {2},
pages = {237--268},
title = {{Dynamic typing in a statically typed language}},
volume = {13},
year = {1991}
}
@article{Levy2015,
abstract = {Rust, a new systems programming language, provides compile-time memory safety checks to help eliminate runtime bugs that manifest from improper memory management. This feature is advantageous for operating system development, and especially for embedded OS development, where recovery and debugging are particularly challenging. However, embedded platforms are highly event-based, and Rust's memory safety mechanisms largely presume threads. In our experience developing an operating system for embedded systems in Rust, we have found that Rust's ownership model prevents otherwise safe resource sharing common in the embedded domain, conflicts with the reality of hardware resources, and hinders using closures for programming asynchronously. We describe these experiences and how they relate to memory safety as well as illustrate our workarounds that preserve the safety guarantees to the largest extent possible. In addition, we draw from our experience to propose a new language extension to Rust that would enable it to provide better memory safety tools for event-driven platforms.},
author = {Levy, Amit and Andersen, Michael P. and Campbell, Bradford and Culler, David and Dutta, Prabal and Ghena, Branden and Levis, Philip and Pannuto, Pat},
doi = {10.1145/2818302.2818306},
isbn = {9781450339421},
journal = {PLOS: Workshop on Programming Languages and Operating Systems},
keywords = {embedded operating systems,linear types,ownership,rust},
pages = {21--26},
title = {{Ownership is Theft: Experiences Building an Embedded OS in Rust}},
url = {http://dl.acm.org/citation.cfm?id=2818302.2818306},
year = {2015}
}
@article{Li2010,
author = {Li, Zhaopeng and Zhuang, Zhong and Chen, Yiyun and Yang, Simin and Zhang, Zhenting and Fan, Dawei},
doi = {10.1109/TASE.2010.8},
isbn = {9780769541488},
journal = {Proceedings - 2010 4th International Symposium on Theoretical Aspects of Software Engineering, TASE 2010},
keywords = {Certifying compiler,Program verification,Proof-Carrying code,Separation logic,Theorem prover},
pages = {47--56},
title = {{A certifying compiler for clike subset of C language}},
year = {2010}
}
@article{Bierhoff2007,
abstract = {Objects often define usage protocols that clients must follow in order for these objects to work properly. Aliasing makes it notoriously difficult to check whether clients and implementations are compliant with such protocols. Accordingly, existing approaches either operate globally or severely restrict aliasing. We have developed a sound modular protocol checking approach, based on typestates, that allows a great deal of flexibility in aliasing while guaranteeing the absence of protocol violations at runtime. The main technical contribution is a novel abstraction, access permissions, that combines typestate and object aliasing information. In our methodology, developers express their protocol design intent through annotations based on access permissions. Our checking approach then tracks permissions through method implementations. For each object reference the checker keeps track of the degree of possible aliasing and is appropriately conservative in reasoning about that reference. This helps developers account for object manipulations that may occur through aliases. The checking approach handles inheritance in a novel way, giving subclasses more flexibility in method overriding. Case studies on Java iterators and streams provide evidence that access permissions can model realistic protocols, and protocol checking based on access permissions can be used to reason precisely about the protocols that arise in practice.},
author = {Bierhoff, Kevin and Aldrich, Jonathan},
doi = {10.1145/1297105.1297050},
isbn = {9781595937865},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {aliasing,behavioral subtyping,linear logic,permissions,typestates},
number = {10},
pages = {301},
title = {{Modular typestate checking of aliased objects}},
volume = {42},
year = {2007}
}
@article{Feng2006,
abstract = {Runtime stacks are critical components of any modern software--they are used to implement powerful control structures such as function call/return, stack cutting and unwinding, coroutines, and thread context switch. Stack operations, however, are very hard to reason about: there are no known formal specifications for certifying C-style setjmp/longjmp, stack cutting and unwinding, or weak continuations (in C--). In many proof-carrying code (PCC) systems, return code pointers and exception handlers are treated as general first-class functions (as in continuation-passing style) even though both should have more limited scopes.In this paper we show that stack-based control abstractions follow a much simpler pattern than general first-class code pointers. We present a simple but flexible Hoare-style framework for modular verification of assembly code with all kinds of stackbased control abstractions, including function call/return, tail call, setjmp/longjmp, weak continuation, stack cutting, stack unwinding, multi-return function call, coroutines, and thread context switch. Instead of presenting a specific logic for each control structure, we develop all reasoning systems as instances of a generic framework. This allows program modules and their proofs developed in different PCC systems to be linked together. Our system is fully mechanized. We give the complete soundness proof and a full verification of several examples in the Coq proof assistant.},
author = {Feng, Xinyu and Shao, Zhong and Vaynberg, Alexander and Xiang, Sen and Ni, Zhaozhong},
doi = {10.1145/1133255.1134028},
isbn = {1-59593-320-4},
issn = {03621340},
journal = {Pldi},
keywords = {assembly code verification,control abstractions,modularity,proof-carrying code,stack-based},
number = {6},
pages = {401},
title = {{Modular verification of assembly code with stack-based control abstractions}},
url = {http://dl.acm.org/citation.cfm?id=1133255.1134028},
volume = {41},
year = {2006}
}
@article{Kiselyov,
author = {Kiselyov, Oleg},
title = {{Typed Tagless Final Interpreters}}
}
@article{Kroning2009,
abstract = {Sets, lists, and maps are elementary data structures used in most programs. Program analysis tools therefore need to decide verification conditions containing variables of such types. We propose a new theory for the SMT-Lib standard as the standard format for such formulae.},
author = {Kr{\"{o}}ning, Daniel and R{\"{u}}mmer, Philipp and Weissenbacher, Georg},
journal = {Informal proceedings 7th},
number = {Vdm},
pages = {1--10},
title = {{A Proposal for a Theory of Finite Sets, Lists, and Maps for the SMT-Lib Standard}},
url = {http://www.kroening.com/smt-lib-lsm.pdf},
year = {2009}
}
@article{Andersen1994,
abstract = {Software engineers are faced with a dilemma. They want to write general and wellstructured programs that are flexible and easy to maintain. On the other hand, generality has a price: efficiency. A specialized program solving a particular problem is often significantly faster than a general program. However, the development of specialized software is time-consuming, and is likely to exceed the production of today's programmers. New techniques are required to solve this so-called software crisis. Partial evaluation is a program specialization technique that reconciles the benefits of generality with efficiency. This thesis presents an automatic partial evaluator for the Ansi C programming language. The content of this thesis is analysis and transformation of C programs. We develop several analyses that support the transformation of a program into its generating extension. A generating extension is a program that produces specialized programs when executed on parts of the input. The thesis contains the following main results.},
author = {Andersen, Lars Ole},
doi = {10.1.1.109.6502},
journal = {PhD thesis, DIKU, University of Copenhagen},
keywords = {pointer analysis},
number = {May},
pages = {111--},
title = {{Program Analysis and Specialization for the C Programming Language}},
url = {http://www-ti.informatik.uni-tuebingen.de/{~}behrend/PaperSeminar/Program Analysis and SpecializationPhD.pdf},
year = {1994}
}
@article{Jones1982,
abstract = {A new approach to data flow analysis of procedural programs and programs with recursive data structures is described. The method depends on simulation of the interpreter for the subject programming language using a retrieval function to approximate a program's data structures.},
author = {Jones, ND and Muchnick, SS},
doi = {10.1145/582153.582161},
isbn = {0897910656},
journal = {Proceedings of the 9th ACM SIGPLAN-SIGACT {\ldots}},
pages = {66--74},
title = {{A flexible approach to interprocedural data flow analysis and programs with recursive data structures}},
url = {http://dl.acm.org/citation.cfm?id=582161},
year = {1982}
}
@article{Li2013,
author = {Li, Zhao Peng and Zhang, Yu and Chen, Yi Yun},
doi = {10.1007/s11390-013-1398-1},
issn = {10009000},
journal = {Journal of Computer Science and Technology},
keywords = {automated theorem proving,loop invariant inference,program verification,shape analysis,shape graph logic},
number = {6},
pages = {1063--1084},
title = {{A shape graph logic and a shape system}},
volume = {28},
year = {2013}
}
@article{Hoare1969,
author = {Hoare, C. A. R.},
doi = {10.1145/363235.363259},
issn = {00010782},
journal = {Communications of the ACM},
month = {oct},
number = {10},
pages = {576--580},
title = {{An axiomatic basis for computer programming}},
url = {http://portal.acm.org/citation.cfm?doid=363235.363259},
volume = {12},
year = {1969}
}
@article{,
title = {{Parametric Shape Analysis via 3-Valued Logic}}
}
@misc{Cousot1977,
abstract = {A program denotes computations in some universe of objects. Abstract interpretation of programs consists in using that denotation to describe computations in another universe of abstract objects, so that the resulta of abstract execution give some informations on the actual computations. An intuitive example (which we borrow from Sintzoff [72]) is the rule of signs. The text -1515*17 may be undestood to denote computations on the abstract universe {\{}(+), (-), (+-){\}} where the semantics of arithmetic operators is defined by the rule of signs. The abstract execution -1515*17 =={\textgreater} -(+)*(+) =={\textgreater} (-)*(+) =={\textgreater} (-), proves that -1515+17 is a negative number. Abstract interpretation is concerned by a particlar underlying structure of the usual universe of computations (the sign, in our example). It gives a summay of some facets of the actual executions of a program. In general this summary is simple to obtain but inacurrate (e.g. -1515+17 =={\textgreater} -(+)+(+) =={\textgreater} (-)+(+) =={\textgreater} (+-)). Despite its fundamental incomplete results abstract interpretation allows the programmer or the compiler to answer questions which do not need full knowledge of program executions or which tolerate an imprecise answer (e.g. partial correctness proofs of programs ignoring the termination problems, type checking, program optimizations which are not carried in the absence of certainty about their feasibility, ...). Section 3 describes the syntax and mathematical semantics of a simple flowchart language, Scott and Strachey[71]. This mathematical semantics is used in section 4 to built a more abstract model of the semantics of programs, in that it ignores the sequencing of control flow. This model is taken to be the most concrete of the abstract interpretations of programs. Section 5 gives the formal definition of the abstract interpretations of a program. Abstract program properties are modeled by a complete semilattice, Birkoff[61]. Elementary program constructs are locally interpreted by order-preserving functions which are used to associate a system of equations with a program. The program global properties are then defined as one of the extreme fixpoints of that system, Tarski[55]. The abstraction process is defined in section 6. It is shown that the program properties obtained by an abstract interpretation of a program are consistent with those obtained by a more refined interpretation of that program. In particular, an abstract interpretation may be shown to be consistent with the formal semantics of the language. Levels of abstraction are formalized by showing that consistent abstract interpretations form a lattice (section 7). Section 8 gives a constructive definition of abstract properties of programs based on constructive definitions of fixpoints. It shows that various classical algorithms such as Kildall[73], Wegbreit[75], compute program properties as limits of finite Kleene[52]'s sequences. Section 9 introduces finite fixpoint approximation methods to be used when Kleene's sequences are infinite, Cousot[76]. They are shown to be consistent with the abstraction process. Practical examples illustrate the various sections. The conclusion points out that the abstract interpretation of programs is a unified approach to apparently unrelated program analysis techniques.},
author = {Cousot, Patrick and Cousot, Radhia},
booktitle = {Principles of Progamming Languages},
doi = {10.1145/512950.512973},
issn = {00900036},
pages = {238--252},
pmid = {21744052},
title = {{Abstract Interpretation: a unified lattice model for static analysis of programs by construction or approximation of fixpoints}},
year = {1977}
}
@article{Shivers1991,
abstract = {Programswritten in powerful, higher-order languages like Scheme,ML, and CommonLisp should run as fast as their FORTRAN and C counterparts. They should, but they don't. A major reason is the level of optimisation applied to these two classes of languages. Many FORTRAN and C compilers employ an arsenal of sophisticated global optimisations that depend upon data-flow analysis: common-subexpression elimination, loop-invariant detection, induction-variable elimination, and many, many more. Compilers for higher- order languages do not provide these optimisations. Without them, Scheme, LISP and ML compilers are doomed to produce code that runs slower than their FORTRAN and C counterparts. The problem is the lack of an explicit control-flow graph at compile time, somethingwhich traditional data-flow analysis techniques require. In this dissertation, I present a technique for recovering the control-flowgraph of aScheme programat compile time. I give examples of how this information can be used to perform several data-flow analysis optimisations, including copy propagation, induction-variable elimination, useless-variable elimination, and type recovery. The analysis is defined in termsof a non-standard semantic interpretation. The denotational semantics is carefully developed, and several theorems establishing the correctness of the semantics and the implementing algorithms are proven.},
author = {Shivers, O.},
journal = {Doctoral dissertation},
number = {May},
pages = {1--186},
title = {{Control-flow analysis of higher-order languages}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.3971{\&}rep=rep1{\&}type=pdf},
year = {1991}
}
@article{Einarsson2008,
abstract = {These notes are written to provide our own documentation for the Soot framework from McGill University. They focus exclusively on the parts of Soot that we have used in various pro jects: parsing class files, performing points-to and null pointer analyses, performing data-flow anal- ysis, and extracting abstract control-flow graphs. The notes also contain the important code snippets that make everything work since it is our experience, that the full Soot API leaves novice users in a state of shock and awe.},
author = {Einarsson, a and Nielsen, Jd},
journal = {{\ldots} , Department of Computer Science, University of {\ldots}},
pages = {1--47},
title = {{A survivor's guide to Java program analysis with soot}},
url = {http://www.brics.dk/SootGuide/sootsurvivorsguide.pdf?origin=publication{\_}detail},
year = {2008}
}
@article{OConnor2011,
abstract = {This paper gives two new categorical characterisations of lenses: one as a coalgebra of the store comonad, and the other as a monoidal natural transformation on a category of a certain class of coalgebras. The store comonad of the first characterisation can be generalized to a Cartesian store comonad, and the coalgebras of this Cartesian store comonad turn out to be exactly the Biplates of the Uniplate generic programming library. On the other hand, the monoidal natural transformations on functors can be generalized to work on a category of more specific coalgebras. This generalization turns out to be the type of compos from the Compos generic programming library. A theorem, originally conjectured by van Laarhoven, proves that these two generalizations are isomorphic, thus the core data types of the Uniplate and Compos libraries supporting generic program on single recursive types are the same. Both the Uniplate and Compos libraries generalize this core functionality to support mutually recursive types in different ways. This paper proposes a third extension to support mutually recursive data types that is as powerful as Compos and as easy to use as Uniplate. This proposal, called Multiplate, only requires rank 3 polymorphism in addition to the normal type class mechanism of Haskell.},
archivePrefix = {arXiv},
arxivId = {1103.2841},
author = {O'Connor, Russell},
eprint = {1103.2841},
keywords = {applicative,coalgebra,comonad,functional reference,generic programming,lens,monoidal functor,monoidal natural transformation},
pages = {1--21},
title = {{Functor is to Lens as Applicative is to Biplate: Introducing Multiplate}},
url = {http://arxiv.org/abs/1103.2841},
year = {2011}
}
@article{Vytiniotis2011,
abstract = {Advanced type system features, such as GADTs, type classes and type families, have proven to be invaluable language extensions for ensuring data invariants and program correctness. Unfortunately, they pose a tough problem for type inference when they are used as local type assumptions. Local type assumptions often result in the lack of principal types and cast the generalisation of local let-bindings prohibitively difficult to implement and specify. User-declared axioms only make this situation worse. In this paper, we explain the problems and – perhaps controversially – argue for abandoning local let-binding generalisation. We give empirical results that local let generalisation is only sporadically used by Haskell programmers. Moving on, we present a novel constraint-based type inference approach for local type assumptions. Our system, called OutsideIn(X), is parameterised over the particular underlying constraint domain X, in the same way as HM(X). This stratification allows us to use a common metatheory and inference algorithm. OutsideIn(X) extends the constraints of X by introducing implication constraints on top. We describe the strategy for solving these implication constraints, which, in turn, relies on a constraint solver for X. We characterise the properties of the constraint solver for X so that the resulting algorithm only accepts programs with principal types, even when the type system specification accepts programs that do not enjoy principal types. Going beyond the general framework, we give a particular constraint solver for X = type classes + GADTs + type families, a non-trivial challenge in its own right. This constraint solver has been implemented and distributed as part of GHC 7.},
author = {Vytiniotis, Dimitrios and {Peyton Jones}, Simon and Schrijvers, Tom and Sulzmann, Martin},
doi = {10.1017/S0956796811000098},
issn = {0956-7968},
journal = {Journal of Functional Programming},
number = {4-5},
pages = {333--412},
title = {{OutsideIn(X) Modular type inference with local assumptions}},
url = {http://www.journals.cambridge.org/abstract{\_}S0956796811000098},
volume = {21},
year = {2011}
}
@article{Kang2016a,
author = {Kang, Jeehoon and Kim, Yoonseung and Hur, Chung-Kil and Dreyer, Derek and Vafeiadis, Viktor},
doi = {10.1145/2837614.2837642},
isbn = {9781450335492},
issn = {15232867},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL 2016},
keywords = {compilation,htweight verification of separate},
pages = {178--190},
title = {{Lightweight verification of separate compilation}},
url = {http://dl.acm.org/citation.cfm?doid=2837614.2837642},
year = {2016}
}
@article{Grigore2016,
abstract = {The core challenge in designing an effective static program analysis is to find a good program abstraction -- one that retains only details relevant to a given query. In this paper, we present a new approach for automatically finding such an abstraction. Our approach uses a pessimistic strategy, which can optionally use guidance from a probabilistic model. Our approach applies to parametric static analyses implemented in Datalog, and is based on counterexample-guided abstraction refinement. For each untried abstraction, our probabilistic model provides a probability of success, while the size of the abstraction provides an estimate of its cost in terms of analysis time. Combining these two metrics, probability and cost, our refinement algorithm picks an optimal abstraction. Our probabilistic model is a variant of the Erdos-Renyi random graph model, and it is tunable by what we call hyperparameters. We present a method to learn good values for these hyperparameters, by observing past runs of the analysis on an existing codebase. We evaluate our approach on an object sensitive pointer analysis for Java programs, with two client analyses (PolySite and Downcast).},
archivePrefix = {arXiv},
arxivId = {1511.01874},
author = {Grigore, Radu and Yang, Hongseok},
doi = {10.1145/2837614.2837663},
eprint = {1511.01874},
isbn = {978-1-4503-3549-2},
issn = {15232867},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages},
keywords = {Datalog,Horn,hypergraph,probability},
pages = {485--498},
title = {{Abstraction Refinement Guided by a Learnt Probabilistic Model}},
url = {http://doi.acm.org/10.1145/2837614.2837663},
year = {2016}
}
@article{Liang,
author = {Liang, Hongjin},
keywords = {-shape graph,analysis,loop invariant inference,pointer logic,program analysis,shape},
title = {{A Shape System and Loop Invariant Inference}}
}
@article{Park2015,
abstract = {This paper presents KJS, the most complete and throughly tested formal semantics of JavaScript to date. Being executable, KJS has been tested against the ECMAScript 5.1 conformance test suite, and passes all 2,782 core language tests. Among the existing implementations of JavaScript, only Chrome V8's passes all the tests, and no other semantics passes more than 90{\%}. In addition to a reference implementation for JavaScript, KJS also yields a simple coverage metric for a test suite: the set of semantic rules it exercises. Our semantics revealed that the ECMAScript 5.1 conformance test suite fails to cover several semantic rules. Guided by the semantics, we wrote tests to exercise those rules. The new tests revealed bugs both in production JavaScript engines (Chrome V8, Safari WebKit, Firefox SpiderMonkey) and in other semantics. KJS is symbolically executable, thus it can be used for formal analysis and verification of JavaScript programs. We verified non-trivial programs and found a known security vulnerability.},
author = {Park, Daejun and Stefănescu, Andrei and Roşu, Grigore},
doi = {10.1145/2737924.2737991},
isbn = {9781450334686},
issn = {15232867},
journal = {Proceedings of the 36th ACM SIGPLAN Conference on Programming Language Design and Implementation - PLDI 2015},
keywords = {javascript,k framework,mechanized semantics},
pages = {346--356},
title = {{KJS: a complete formal semantics of JavaScript}},
url = {http://dl.acm.org/citation.cfm?doid=2737924.2737991},
year = {2015}
}
@article{Diaz2010,
author = {D{\'{i}}az, Jorge Luis Guevara},
keywords = {keyword1,keyword2},
pages = {1--6},
title = {{Typestate-Oriented Design A Coloured Petri Net Approach}},
year = {2010}
}
@misc{Strom1986,
abstract = {We introduce a new programming language concept called typestate, which is a refinement of the concept of type. Whereas the type of a data object determines the set of operations ever permitted on the object, typestate determines the subset of those operations which is permitted in a particular context. Typestate tracking is a program analysis technique which enhances program reliability by detecting at compile-time syntactically legal but semantically undefined execution sequences. These include, for example, reading a variable before it has been initialized, dereferencing a pointer after the dynamic object has been deallocated, etc. Typestate tracking detects errors that cannot be detected by type checking or by conventional static scope rules. Additionally, typestate tracking makes it possible for compilers to insert appropriate finalization of data at exception points and on program termination, eliminating the need to support finalization by means of either garbage collection or unsafe deallocation operations such as Pascal's dispose operation. By enforcing typestate invariants at compile time, it becomes practical to implement a "secure language" - that is, one in which all successfully compiled program modules have fully defined execution-time effects, and the only effects of program errors are incorrect output values. This paper defines typestate, gives examples of its application, and shows how typestate checking may be embedded into a compiler. We discuss the consequences of typestate checking for software reliability and software structure, and conclude with a discussion of our experience using a high-level language incorporating typestate checking.},
author = {Strom, Robert E. and Yemini, Shaula},
booktitle = {IEEE Transactions on Software Engineering},
doi = {10.1109/TSE.1986.6312929},
isbn = {0098-5589},
issn = {00985589},
keywords = {Program analysis,program verification,securitv,software reliability,type checking,typestate},
number = {1},
pages = {157--171},
title = {{Typestate: A Programming Language Concept for Enhancing Software Reliability}},
volume = {SE-12},
year = {1986}
}
@article{Pichon-Pharabod,
abstract = {Despite much research on concurrent programming languages, es-pecially for Java and C/C++, we still do not have a satisfactory defi-nition of their semantics, one that admits all common optimisations without also admitting undesired behaviour. Especially problematic are the " thin-air " examples involving high-performance concurrent accesses, such as C/C++11 relaxed atomics. The C/C++11 model is in a per-candidate-execution style, and previous work has identi-fied a tension between that and the fact that compiler optimisations do not operate over single candidate executions in isolation; rather, they operate over syntactic representations that represent all execu-tions. In this paper we propose a novel approach that circumvents this difficulty. We define a concurrency semantics for a core calculus, including relaxed-atomic and non-atomic accesses, and locks, that admits a wide range of optimisation while still forbidding the classic thin-air examples. It also addresses other problems relating to undefined behaviour. The basic idea is to use an event-structure representation of the current state of each thread, capturing all of its potential execu-tions, and to permit interleaving of execution and transformation steps over that to reflect optimisation (possibly dynamic) of the code. These are combined with a non-multi-copy-atomic storage subsystem, to reflect common hardware behaviour. The semantics is defined in a mechanised and executable form, and designed to be implementable above current relaxed hard-ware and strong enough to support the programming idioms that C/C++11 does for this fragment. It offers a potential way forward for concurrent programming language semantics, beyond the cur-rent C/C++11 and Java models.},
author = {Pichon-Pharabod, Jean and Sewell, Peter},
doi = {10.1145/2837614.2837616},
isbn = {9781450335492},
issn = {15232867},
keywords = {C/C++,D33 [Programming Languages],Formal Definitions and Theory,Relaxed memory models},
title = {{A concurrency semantics for relaxed atomics that permits optimisation and avoids thin-air executions}}
}
@article{Chatterjee2015,
abstract = {In this paper, we consider termination of probabilistic programs with real-valued variables. The questions concerned are: 1. qualitative ones that ask (i) whether the program terminates with probability 1 (almost-sure termination) and (ii) whether the expected termination time is finite (finite termination); 2. quantitative ones that ask (i) to approximate the expected termination time (expectation problem) and (ii) to compute a bound B such that the probability to terminate after B steps decreases exponentially (concentration problem). To solve these questions, we utilize the notion of ranking supermartingales which is a powerful approach for proving termination of probabilistic programs. In detail, we focus on algorithmic synthesis of linear ranking-supermartingales over affine probabilistic programs (APP's) with both angelic and demonic non-determinism. An important subclass of APP's is LRAPP which is defined as the class of all APP's over which a linear ranking-supermartingale exists. Our main contributions are as follows. Firstly, we show that the membership problem of LRAPP (i) can be decided in polynomial time for APP's with at most demonic non-determinism, and (ii) is NP-hard and in PSPACE for APP's with angelic non-determinism; moreover, the NP-hardness result holds already for APP's without probability and demonic non-determinism. Secondly, we show that the concentration problem over LRAPP can be solved in the same complexity as for the membership problem of LRAPP. Finally, we show that the expectation problem over LRAPP can be solved in 2EXPTIME and is PSPACE-hard even for APP's without probability and non-determinism (i.e., deterministic programs). Our experimental results demonstrate the effectiveness of our approach to answer the qualitative and quantitative questions over APP's with at most demonic non-determinism.},
archivePrefix = {arXiv},
arxivId = {1510.08517},
author = {Chatterjee, Krishnendu and Fu, Hongfei and Novotny, Petr and Hasheminezhad, Rouzbeh},
doi = {10.1145/2837614.2837639},
eprint = {1510.08517},
isbn = {9781450335492},
issn = {15232867},
keywords = {concentration,martingale,probabilistic programs,ranking super-,termination},
number = {61532019},
pages = {327--342},
title = {{Algorithmic Analysis of Qualitative and Quantitative Termination Problems for Affine Probabilistic Programs}},
url = {http://arxiv.org/abs/1510.08517},
year = {2015}
}
@article{Yorgey2012,
abstract = {Static type systems strive to be richly expressive while still being simple enough for programmers to use.We describe an experiment that enriches Haskell's kind system with two features promoted from its type system: data types and polymorphism. The new sys- tem has a very good power-to-weight ratio: it offers a significant improvement in expressiveness, but, by re-using concepts that pro- grammers are already familiar with, the system is easy to under- stand and implement.},
author = {Yorgey, Brent a. and Weirich, Stephanie and Cretin, Julien and {Peyton Jones}, Simon and Vytiniotis, Dimitrios and Magalh{\~{a}}es, Jos{\'{e}} Pedro},
doi = {10.1145/2103786.2103795},
isbn = {9781450311205},
issn = {07308566},
journal = {Proceedings of the 8th ACM SIGPLAN workshop on Types in language design and implementation - TLDI '12},
keywords = {haskell,kinds,polymorphism,promotion},
number = {2011/10},
pages = {53},
title = {{Giving Haskell a promotion}},
url = {http://dl.acm.org/citation.cfm?doid=2103786.2103795},
volume = {1},
year = {2012}
}
@article{Ball2001,
author = {Ball, Thomas and Rajamani, Sriram K},
pages = {103--122},
title = {{Properties of Interfaces}},
year = {2001}
}
@article{Robbins2016,
author = {Robbins, Ed and King, Andy and Schrijvers, Tom},
doi = {10.1145/2914770.2837633},
isbn = {1595930566},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
month = {jan},
number = {1},
pages = {191--203},
title = {{From MinX to MinC: semantics-driven decompilation of recursive datatypes}},
url = {http://dl.acm.org/citation.cfm?doid=2914770.2837633},
volume = {51},
year = {2016}
}
@article{Devriese2016,
author = {Devriese, Dominique and Patrignani, Marco and Piessens, Frank},
doi = {10.1145/2837614.2837618},
isbn = {9781450335492},
issn = {15232867},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL 2016},
keywords = {back-translation,ly-abstract compilation by approximate},
pages = {164--177},
title = {{Fully-abstract compilation by approximate back-translation}},
url = {http://dl.acm.org/citation.cfm?doid=2837614.2837618},
year = {2016}
}
@article{Filipovi2010,
abstract = {Concurrent data structures are usually designed to satisfy correctness conditions such as sequential consistency or linearizability. In this paper, we consider the following fundamental question: What guarantees are provided by these conditions for client programs? We formally show that these conditions can be characterized in terms of observational refinement. Our study also provides a new understanding of sequential consistency and linearizability in terms of abstraction of dependency between computation steps of client programs. ?? 2010 Elsevier B.V. All rights reserved.},
author = {Filipovi, Ivana and O'Hearn, Peter and Rinetzky, Noam and Yang, Hongseok},
doi = {10.1016/j.tcs.2010.09.021},
isbn = {9783642005893},
issn = {03043975},
journal = {Theoretical Computer Science},
keywords = {Linearizability,Observational equivalence,Observational refinement,Sequential consistency},
number = {51-52},
pages = {4379--4398},
title = {{Abstraction for concurrent objects}},
volume = {411},
year = {2010}
}
@article{Hackett2012,
abstract = {JavaScript performance is often bound by its dynamically typed nature. Compilers do not have access to static type information, making generation of efficient, type-specialized machine code difficult. We seek to solve this problem by inferring types. In this paper we present a hybrid type inference algorithm for JavaScript based on points-to analysis. Our algorithm is fast, in that it pays for itself in the optimizations it enables. Our algorithm is also precise, generating information that closely reflects the program's actual behavior even when analyzing polymorphic code, by augmenting static analysis with run-time type barriers. We showcase an implementation for Mozilla Firefox's JavaScript engine, demonstrating both performance gains and viability. Through integration with the just-in-time (JIT) compiler in Firefox, we have improved performance on major benchmarks and JavaScript-heavy websites by up to 50{\%}. Inference-enabled compilation is the default compilation mode as of Firefox 9.},
author = {Hackett, Brian and Guo, Shu-yu},
doi = {10.1145/2254064.2254094},
isbn = {9781450312059},
issn = {0362-1340},
journal = {Proceedings of the 33rd ACM SIGPLAN conference on Programming Language Design and Implementation - PLDI '12},
keywords = {hybrid,just-in-time compilation,type inference},
pages = {239},
title = {{Fast and precise hybrid type inference for JavaScript}},
url = {http://dl.acm.org/citation.cfm?id=2345156.2254094{\%}5Cnhttp://dl.acm.org/citation.cfm?doid=2254064.2254094},
year = {2012}
}
@article{Flatt2016,
abstract = {Our new macro expander for Racket builds on a novel approach to hygiene. Instead of basing macro expansion on variable renamings that are mediated by expansion history, our new expander tracks binding through a set of scopes that an identifier acquires from both binding forms and macro expansions. The resulting model of macro expansion is simpler and more uniform than one based on renaming, and it is sufficiently compatible with Racket's old expander to be practical.},
author = {Flatt, Matthew},
doi = {10.1145/2837614.2837620},
isbn = {978-1-4503-3549-2},
issn = {07308566},
keywords = {binding,hygiene,macros,scope},
pages = {705--717},
title = {{Binding As Sets of Scopes}},
url = {http://doi.acm.org/10.1145/2837614.2837620{\%}5Cnhttp://dl.acm.org/ft{\_}gateway.cfm?id=2837620{\&}type=pdf},
year = {2016}
}
@article{Chatterjee2016,
abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural problems that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, a property satisfied by the controlflow graphs of most programs. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis. The study of multiple queries allows us to consider the tradeoff between the resource usage of the one-time preprocessing and for each individual query. The traditional approach constructs the product graph of all components and applies the best-known graph algorithm on the product. In this approach, even the answer to a single query requires the transitive closure, which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results showing that the worst-case running time of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms.},
archivePrefix = {arXiv},
arxivId = {1510.07565},
author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
doi = {10.1145/2837614.2837624},
eprint = {1510.07565},
isbn = {9781450335492},
issn = {15232867},
journal = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL 2016},
keywords = {achieving major breakthroughs in,alge-,be improved without,braic path properties,concurrent systems,constant-treewidth graphs,graph,our algorithms in the,sense that they cannot,shortest path,the algorithmic study of},
pages = {733--747},
title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}},
url = {http://arxiv.org/abs/1510.07565{\%}5Cnhttp://dl.acm.org/citation.cfm?doid=2837614.2837624},
year = {2016}
}
@article{Jensen2009,
abstract = {JavaScript is the main scripting language for Web browsers, and it is essential to modern Web applications. Programmers have started using it for writing complex applications, but there is still little tool support available during development. We present a static program analysis infrastructure that can infer detailed and sound type information for JavaScript programs using abstract interpretation. The analysis is designed to support the full language as defined in the ECMAScript standard, including its peculiar object model and all built-in functions. The analysis results can be used to detect common programming errors – or rather, prove their absence, and for producing type information for program comprehension. Preliminary experiments conducted on real-life JavaScript code indicate that the approach is promising regarding analysis precision on small and medium size programs, which constitute the majority of JavaScript applications. With potential for further improvement, we propose the analysis as a foundation for building tools that can aid JavaScript programmers.},
author = {Jensen, Simon Holm and M{\o}ller, Anders and Thiemann, Peter},
doi = {10.1007/978-3-642-03237-0_17},
isbn = {3642032362},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
number = {274},
pages = {238--255},
title = {{Type analysis for JavaScript}},
volume = {5673 LNCS},
year = {2009}
}
@article{Gardner2012,
abstract = {JavaScript has become the most widely used language for client- side web programming. The dynamic nature of JavaScript makes understanding its code notoriously difficult, leading to buggy pro- grams and a lack of adequate static-analysis tools.We believe that logical reasoning has much to offer JavaScript: a simple description of program behaviour, a clear understanding of module boundaries, and the ability to verify security contracts. We introduce a program logic for reasoning about a broad subset of JavaScript, including challenging features such as prototype inheritance and with. We adapt ideas from separation logic to provide tractable reasoning about JavaScript code: reasoning about easy programs is easy; reasoning about hard programs is possible. We prove a strong soundness result. All libraries written in our subset and proved correct with respect to their specifications will be well-behaved, even when called by arbitrary JavaScript code.},
author = {Gardner, Philippa Anne and Maffeis, Sergio and Smith, Gareth David},
doi = {10.1145/2103621.2103663},
isbn = {9781450310833},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {javascript,reliability,separation logic,theory,verification,web},
number = {1},
pages = {31},
title = {{Towards a program logic for JavaScript}},
volume = {47},
year = {2012}
}
@article{Ahmadi2014,
author = {Ahmadi, Reza},
number = {October},
title = {{Dafny with Traits : Verifying Object Oriented Programs}},
year = {2014}
}
@article{Turner2004,
abstract = {The driving idea of functional programming is to make programming more closely related to mathematics. A program in a functional language such as Haskell or Miranda consists of equations which are both computation rules and a basis for simple algebraic reasoning about the functions and data structures they define. The existing model of functional programming, although elegant and powerful, is compromised to a greater extent than is commonly recognised by the presence of partial functions. We consider a simple discipline of total functional programming designed to exclude the possibility of non-termination. Among other things this requires a type distinction between data, which is finite, and codata, which is potentially infinite.},
author = {Turner, D. A.},
doi = {10.3217/jucs-010-07-0751},
issn = {0958695X},
journal = {Journal of Universal Computer Science},
keywords = {functional programming},
number = {7},
pages = {751--768},
title = {{Total Functional Programming}},
url = {http://www.jucs.org/jucs{\_}10{\_}7/total{\_}functional{\_}programming},
volume = {10},
year = {2004}
}
@article{Bodin2014,
abstract = {JavaScript is the most widely used web language for client-side ap- plications. Whilst the development of JavaScript was initially just led by implementation, there is now increasing momentum behind the ECMA standardisation process. The time is ripe for a formal, mechanised specification of JavaScript, to clarify ambiguities in the ECMA standards, to serve as a trusted reference for high-level lan- guage compilation and JavaScript implementations, and to provide a platform for high-assurance proofs of language properties. We present JSCert, a formalisation of the current ECMA stan- dard in the Coq proof assistant, and JSRef, a reference interpreter for JavaScript extracted from Coq to OCaml.We give a Coq proof that JSRef is correct with respect to JSCert and assess JSRef using test262, the ECMA conformance test suite. Our methodology en- sures that JSCert is a comparatively accurate formulation of the En- glish standard, which will only improve as time goes on. We have demonstrated that modern techniques of mechanised specification can handle the complexity of JavaScript.},
author = {Bodin, Martin and Chargueraud, Arthur and Filaretti, Daniele and Gardner, Philippa and Maffeis, Sergio and Naudziuniene, Daiva and Schmitt, Alan and Smith, Gareth},
doi = {10.1145/2535838.2535876},
isbn = {9781450325448},
issn = {07308566},
journal = {Popl},
keywords = {coq,javascript,mechanised semantics},
pages = {87--100},
title = {{A trusted mechanised JavaSript specification}},
url = {http://dl.acm.org/citation.cfm?id=2535838.2535876},
year = {2014}
}
@inproceedings{Chakravarty2005,
address = {New York, New York, USA},
author = {Chakravarty, Manuel M. T. and Keller, Gabriele and Jones, Simon Peyton and Marlow, Simon},
booktitle = {Proceedings of the 32nd ACM SIGPLAN-SIGACT sysposium on Principles of programming languages - POPL '05},
doi = {10.1145/1040305.1040306},
isbn = {158113830X},
pages = {1--13},
publisher = {ACM Press},
title = {{Associated types with class}},
url = {http://portal.acm.org/citation.cfm?doid=1040305.1040306},
year = {2005}
}
@article{Hudson1991,
abstract = {We describe a memory management toolkit for language implementors. It offers efficient and flexible generation scavenging garbage collection. In addition to providing a core of language-independent algorithms and data structures, the toolkit includes auxiliary components that ease implementation of garbage collection for programming languages. We have detailed designs for Smalltalk and Modula-3 and are confident the toolkit can be used with a wide variety of languages. The toolkit approach is itself novel, and our design includes a number of additional innovations in flexibility, efficiency, accuracy, and cooperation between the compiler and the collector.},
author = {Hudson, Richard L and Moss, J Eliot B and Diwan, Amer and Weight, Christopher F},
journal = {Object Oriented Systems},
title = {{A Language-Independent Garbage Collector Toolkit}},
url = {ftp://ftp.cs.umass.edu/pub/osl/papers/tr9147.ps.Z},
year = {1991}
}
@inproceedings{Mayerhofer2016,
address = {New York, New York, USA},
author = {Mayerhofer, Tanja and Wimmer, Manuel and Vallecillo, Antonio},
booktitle = {Proceedings of the 2016 ACM SIGPLAN International Conference on Software Language Engineering - SLE 2016},
doi = {10.1145/2997364.2997376},
isbn = {9781450344470},
keywords = {dimensions,measurement uncertainty,model-based engineering,modeling quantities,units},
pages = {118--131},
publisher = {ACM Press},
title = {{Adding uncertainty and units to quantity types in software models}},
url = {http://dl.acm.org/citation.cfm?doid=2997364.2997376},
year = {2016}
}
@article{Turon2013a,
author = {Turon, Aaron and Dreyer, Derek and Birkedal, Lars},
doi = {10.1145/2500365.2500600},
isbn = {9781450323260},
issn = {15232867},
journal = {the 18th ACM SIGPLAN international conference},
keywords = {are not,contextual refinement,exploiting the modular design,fine-,grained concurrency,higher-order functions,however,kripke logical relations,of sophisticated concurrent programs,separation logic,that existing concurrency logics,we contend},
pages = {377},
title = {{Unifying refinement and hoare-style reasoning in a logic for higher-order concurrency}},
url = {http://dl.acm.org/citation.cfm?doid=2500365.2500600{\%}5Cnpapers2://publication/doi/10.1145/2500365.2500600},
year = {2013}
}
@inproceedings{Gordon2001,
address = {New York, New York, USA},
author = {Gordon, Andrew D. and Syme, Don},
booktitle = {Proceedings of the 28th ACM SIGPLAN-SIGACT symposium on Principles of programming languages - POPL '01},
doi = {10.1145/360204.360228},
isbn = {1581133367},
pages = {248--260},
publisher = {ACM Press},
title = {{Typing a multi-language intermediate code}},
url = {http://portal.acm.org/citation.cfm?doid=360204.360228},
year = {2001}
}
@techreport{Vafeiadis2013,
author = {Vafeiadis, Viktor},
institution = {University of Cambridge, Computer Laboratory},
number = {726},
title = {{Modular fine-grained concurrency verification}},
url = {http://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-726.pdf},
year = {2013}
}
@article{Reed2015,
abstract = {Rust is a new systems language that uses some advanced type system features, specifically affine types and regions, to statically guarantee memory safety and eliminate the need for a garbage collector. While each individual addition to the type system is well understood in isolation and are known to be sound, the combined system is not known to be sound. Furthermore, Rust uses a novel checking scheme for its regions, known as the Borrow Checker, that is not known to be correct. Since Rust's goal is to be a safer alternative to C/C++, we should ensure that this safety scheme actually works. We present a formal semantics that captures the key features relevant to memory safety, unique pointers and borrowed references, specifies how they guarantee memory safety, and describes the operation of the Borrow Checker. We use this model to prove the soudness of some core operations and justify the conjecture that the model, as a whole, is sound. Additionally, our model provides a syntactic version of the Borrow Checker, which may be more understandable than the non-syntactic version in Rust.},
author = {Reed, Eric},
number = {February},
pages = {1--37},
title = {{Patina: A Formalization of the Rust Programming Language}},
year = {2015}
}
@article{Petricek2015,
author = {Petricek, Tomas},
title = {{F {\#} Data : Accessing structured data made easy}},
year = {2015}
}
@article{Rastogi2014,
abstract = {This paper has two purposes: 1) to clarify the relationship between the quantum theory of radiation, where the electromagnetic field-expansion coefficients satisfy commutation relations, and the semiclassical theory, where the electromagnetic field is considered as a definite function of time rather than as an operator; and 2) to apply some of the results in a study of amplitude and frequency stability in a molecular beam maser. In 1), it is shown that the semiclassical theory, when extended te take into account both the effect of the field on the molecules and the effect of the molecules on the field, reproduces almost quantitatively the same laws of energy exchange and coherence properties as the quantized field theory, even in the limit of one or a few quanta in the field mode. In particular, the semiclassical theory is shown to lead to a prediction of spontaneous emission, with the same decay rate as given by quantum electrodynamics, described by the Einstein A coefficients. In 2), the semiclassical theory is applied to the molecular beam maser. Equilibrium amplitude and frequency of oscillation are obtained for an arbitrary velocity distribution of focused molecules, generalizing the results obtained previously by Gordon, Zeiger, and Townes for a singel-velocity beam, and by Lamb and Helmer for a Maxwellian beam. A somewhat surprising result is obtained; which is that the measurable properties of the maser, such as starting current, effective molecular Q, etc., depend mostly on the slowest 5 to 10 per cent of the molecules. Next we calculate the effect of amplitude and frequency of oscillation, of small systematic perturbations. We obtain a prediction that stability can be improved by adjusting the system so that the molecules emit all their energy h $\Omega$ to the field, then reabsorb part of it, before leaving the cavity. In general, the most stable operation is obtained when the molecules are in the process of absorbing energy from the radiation as they leave the cavity, most unstable when they are still emitting energy at that time. Finally, we consider the response of an oscillating maser to randomly time-varying perturbations. Graphs are given showing predicted response to a small superimposed signal of a frequency near the - oscillation frequency. The existence of "noise enhancing" and "noise quieting" modes of operation found here is a general property of any oscillating system in which amplitude is limited by nonlinearity.},
author = {Rastogi, Aseem and Hammer, Matthew A. and Hicks, Michael},
doi = {10.1109/SP.2014.48},
isbn = {9781479946860},
issn = {10816011},
journal = {Proceedings - IEEE Symposium on Security and Privacy},
keywords = {Dependent type system,Functional language,Secure multi-party computation},
pages = {655--670},
title = {{Wysteria: A programming language for generic, mixed-mode multiparty computations}},
year = {2014}
}
@article{Hammer2015,
abstract = {Predicting the binding mode of flexible polypeptides to proteins is an important task that falls outside the domain of applicability of most small molecule and protein−protein docking tools. Here, we test the small molecule flexible ligand docking program Glide on a set of 19 non-$\alpha$-helical peptides and systematically improve pose prediction accuracy by enhancing Glide sampling for flexible polypeptides. In addition, scoring of the poses was improved by post-processing with physics-based implicit solvent MM- GBSA calculations. Using the best RMSD among the top 10 scoring poses as a metric, the success rate (RMSD ≤ 2.0 {\AA} for the interface backbone atoms) increased from 21{\%} with default Glide SP settings to 58{\%} with the enhanced peptide sampling and scoring protocol in the case of redocking to the native protein structure. This approaches the accuracy of the recently developed Rosetta FlexPepDock method (63{\%} success for these 19 peptides) while being over 100 times faster. Cross-docking was performed for a subset of cases where an unbound receptor structure was available, and in that case, 40{\%} of peptides were docked successfully. We analyze the results and find that the optimized polypeptide protocol is most accurate for extended peptides of limited size and number of formal charges, defining a domain of applicability for this approach.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Hammer, Matthew A. and Dunfield, Joshua and Headley, Kyle and Labich, Nicholas and Foster, Jeffrey S. and Hicks, Michael and {Van Horn}, David},
doi = {10.1145/2858965.2814305},
eprint = {arXiv:1011.1669v3},
isbn = {978-1-4503-3689-5},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {call-by-push-value (CBPV),demanded computation graph (DCG),incremental compu- tation,laziness,memoization,nominal matching,self-adjusting computation,structural matching,thunks},
number = {10},
pages = {748--766},
pmid = {25246403},
title = {{Incremental computation with names}},
url = {http://dl.acm.org/citation.cfm?id=2858965.2814305},
volume = {50},
year = {2015}
}
@article{Pina2016,
abstract = {Abstract—Dynamic software updating (DSU) is a technique for patching running programs, to fix bugs or add new features. DSU avoids the downtime of stop-and-restart updates, but creates new risks—an incorrect or ill-timed dynamic update could result in a crash or misbehavior, defeating the whole purpose of DSU. To reduce such risks, dynamic updates should be carefully tested before they are deployed. This paper presents Tedsuto, a general testing framework for DSU, along with a concrete implementation of it for Rubah, a state-of-the-art Java-based DSU system. Tedsuto uses system-level tests developed for the old and new versions of the updateable software, and systematically tests whether a dynamic update might result in a test failure. Very often this process is fully automated, while in some cases (e.g., to test new-version functionality) some manual annotations are required. To evaluate Tedsuto's efficacy, we applied it to dynamic updates previously developed (and tested in an ad hoc manner) for the H2 SQL database server and the CrossFTP server— two real-world, multithreaded systems. We used three large test suites, totalling 446 tests, and we found a variety of update-related bugs quickly, and at low cost.},
author = {Pina, Luis and Hicks, Michael},
doi = {10.1109/ICST.2016.27},
isbn = {9781509018260},
journal = {Proceedings - 2016 IEEE International Conference on Software Testing, Verification and Validation, ICST 2016},
pages = {278--288},
title = {{Tedsuto: A General Framework for Testing Dynamic Software Updates}},
year = {2016}
}
@article{Rastogi,
author = {Rastogi, Aseem and Swamy, Nikhil and Hicks, Michael},
title = {{WYS : A Verified Language Extension for Secure Multi-party Computations}}
}
@article{McCreight2007,
abstract = {Garbage-collected languages such as Java and C{\#} are becoming more and more widely used in both high-end software and real-time embedded applications. The correctness of the GC implementation is essential to the reliability and security of a large portion of the world's mission-critical software. Unfortunately, garbage collectors-especially incremental and concurrent ones-are extremely hard to implement correctly. In this paper, we present a new uniform approach to verifying the safety of both a mutator and its garbage collector in Hoare-style logic. We define a formal garbage collector interface general enough to reason about a variety of algorithms while allowing the mutator to ignore implementation-specific details of the collector. Our approach supports collectors that require read and write barriers. We have used our approach to mechanically verify assembly implementations of mark-sweep, copying and incremental copying GCs in Coq, as well as sample mutator programs that can be linked with any of the GCs to produce a fully-verified garbage-collected program. Our work provides a foundation for reasoning about complex mutator-collector interaction and makes an important advance toward building fully certified production-quality GCs.},
author = {McCreight, a and Shao, Z and Lin, C and Li, L},
doi = {10.1145/1273442.1250788},
isbn = {0362-1340},
issn = {03621340},
journal = {Pldi},
keywords = {abstract data type,assembly code verification,exercise,garbage collection,programs,proof-carrying code,real-time,separation logic},
number = {6},
pages = {468--479},
title = {{A general framework for certifying garbage collectors and their mutators}},
volume = {42},
year = {2007}
}
@article{Fu2010,
abstract = {Optimistic concurrency algorithms provide good performance for parallel programs but they are extremely hard to reason about. Program logics such as concurrent separation logic and rely-guarantee reasoning can be used to verify these algorithms, but they make heavy uses of history variables which may obscure the high-level intuition underlying the design of these algorithms. In this paper, we propose a novel program logic that uses invariants on history traces to reason about optimistic concurrency algorithms. We use past tense temporal operators in our assertions to specify execution histories. Our logic supports modular program specifications with history information by providing separation over both space (program states) and time. We verify Michael's non-blocking stack algorithm and show that the intuition behind such algorithm can be naturally captured using trace invariants.},
author = {Fu, Ming and Li, Yong and Feng, Xinyu and Shao, Zhong and Zhang, Yu},
doi = {10.1007/978-3-642-15375-4_27},
isbn = {3642153747},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {388--402},
title = {{Reasoning about optimistic concurrency using a program logic for history}},
volume = {6269 LNCS},
year = {2010}
}
@article{Mardziel,
author = {Mardziel, Piotr and Hicks, Michael},
number = {i},
title = {{Quantifying vulnerability of secret generation using hyper-distributions}}
}
@article{Wang2009,
abstract = {Wang et al. (Softw. Pract. Exper. 2007; 37(7):727–745) observed a phenomenon of performance inconsistency in the graphics of Java Abstract Window Toolkit (AWT)/Swing among different Java runtime environments (JREs) on Windows XP. This phenomenon makes it difficult to predict the performance of Java game applications. Therefore, they proposed a portable AWT/Swing architecture, called CYC Window Toolkit (CWT), to provide programmers with high and consistent rendering performance for Java game development among different JREs. They implemented a DirectX version to demonstrate the feasibility of the architecture. This paper extends the above research to other environments in two aspects. First, we evaluate the rendering performance of the original Java AWT with different combinations of JREs, image application programming interfaces, system properties and operating systems (OSs), including Windows XP, Windows Vista, Fedora and Mac OS X. The evaluation results indicate that the performance inconsistency of Java AWT also exists among the four OSs, even if the same hardware configuration is used. Second, we design an OpenGL version of CWT, named CWT-GL, to take advantage of modern 3D graphics cards, and compare the rendering performance of CWT with Java AWT/Swing. The results show that CWT-GL achieves more consistent and higher rendering performance in JREs 1.4 to 1.6 on the four OSs. The results also hint at two approaches: (a) decouple the rendering pipelines of Java AWT/Swing from the JREs for faster upgrading and supporting old JREs and (b) use other graphics libraries, such as CWT, instead of Java AWT/Swing to develop cross-platform Java games with higher and more consistent rendering performance. Copyright {\textcopyright} 2009 John Wiley {\&} Sons, Ltd.},
archivePrefix = {arXiv},
arxivId = {1008.1900},
author = {Wang, Yi Hsien and Wu, I. Chen},
doi = {10.1002/spe},
eprint = {1008.1900},
isbn = {0000000000000},
issn = {00380644},
journal = {Software - Practice and Experience},
keywords = {CYC Window Toolkit,Directx,Linux,Mac OS x,OpenGL,Windows},
number = {7},
pages = {701--736},
pmid = {20926156},
title = {{Achieving high and consistent rendering performance of java AWT/Swing on multiple platforms}},
volume = {39},
year = {2009}
}
@article{Saur2015,
abstract = {NoSQL databases like Redis, Cassandra, and MongoDB are increasingly popular because they are flexible, lightweight, and easy to work with. Applications that use these databases will evolve over time, sometimes necessitating (or preferring) a change to the format or organization of the data. The problem we address in this paper is: How can we support the evolution of high-availability applications and their NoSQL data online, without excessive delays or interruptions, even in the presence of backward-incompatible data format changes? We present KVolve, an extension to the popular Redis NoSQL database, as a solution to this problem. KVolve permits a developer to submit an upgrade specification that defines how to transform existing data to the newest version. This transformation is applied lazily as applications interact with the database, thus avoiding long pause times. We demonstrate that KVolve is expressive enough to support substantial practical updates, including format changes to RedisFS, a Redis-backed file system, while imposing essentially no overhead in general use and minimal pause times during updates.},
archivePrefix = {arXiv},
arxivId = {1506.08800},
author = {Saur, Karla and Dumitraş, Tudor and Hicks, Michael},
eprint = {1506.08800},
journal = {arXiv preprint},
title = {{Evolving NoSQL Databases Without Downtime}},
url = {http://arxiv.org/abs/1506.08800},
year = {2015}
}
@article{Wei2016,
author = {Wei, Shiyi and Mardziel, Piotr and Ruef, Andrew and Foster, Jeffrey S and Hicks, Michael},
title = {{Evaluating Design Tradeoffs in Numeric Static Analysis for Java}},
year = {2016}
}
@article{Antopoulos,
author = {Antopoulos, Timos and Gazzillo, Paul and Hicks, Michael and Koskinen, Eric and Terauchi, Tachio and Wei, Shiyi},
title = {{Decomposition Instead of Self-Composition for k -Safety}}
}
@article{Barthe2015,
author = {Barthe, Gilles and Hicks, Michael and Kerschbaum, Florian and Unruh, Dominique and Hicks, Michael},
doi = {10.4230/DagRep.4.12.29},
journal = {Dagstuhl Reports},
keywords = {12,29,4,4230,and phrases security,dagrep,digital object identifier 10,edited in cooperation with,languages,matthew hammer,theory},
number = {12},
pages = {29--47},
title = {{The Synergy Between Programming Languages and Cryptography (Dagstuhl Seminar 14492)}},
volume = {4},
year = {2015}
}
@inproceedings{Ruef2016,
abstract = {Typical security contests focus on breaking or mitigating the impact of buggy systems. We present the Build-it Break-it Fix-it BIBIFI contest which aims to assess the ability to securely build software not just break it. In BIBIFI teams build specified software with the goal of maximizing correctness performance and security. The latter is tested when teams attempt to break other teams submissions. Winners are chosen from among the best builders and the best breakers. BIBIFI was designed to be open-ended - teams can use any language tool process etc. that they like. As such contest outcomes shed light on factors that correlate with successfully building secure software and breaking insecure software. During we ran three contests involving a total of teams and two different programming problems. Quantitative analysis from these contests found that the most efficient build-it submissions used CC but submissions coded in a statically-typed language were less likely to have a security flaw build-it teams with diverse programming-language knowledge also produced more secure code. Shorter programs correlated with better scores. Break-it teams that were also build-it teams were significantly better at finding security bugs.},
address = {New York, New York, USA},
archivePrefix = {arXiv},
arxivId = {1606.01881},
author = {Ruef, Andrew and Hicks, Michael and Parker, James and Levin, Dave and Mazurek, Michelle L. and Mardziel, Piotr},
booktitle = {Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security - CCS'16},
doi = {10.1145/2976749.2978382},
eprint = {1606.01881},
isbn = {9781450341394},
issn = {15437221},
pages = {690--703},
publisher = {ACM Press},
title = {{Build It, Break It, Fix It}},
url = {http://arxiv.org/abs/1606.01881 http://dl.acm.org/citation.cfm?doid=2976749.2978382},
year = {2016}
}
@article{Hudak2007,
abstract = {This paper describes the history of Haskell, including its genesis and principles, technical contributions, implementations and tools, and applications and impact.},
author = {Hudak, Paul and Hughes, John and {Peyton Jones}, Simon and Wadler, Philip},
doi = {10.1145/1238844.1238856},
isbn = {978-1-59593-766-7},
issn = {00448249},
journal = {Proceedings of the third ACM SIGPLAN conference on History of programming languages},
pages = {12--55},
title = {{A History of Haskell: Being Lazy With Class}},
year = {2007}
}
@article{Reynolds1972,
abstract = {Higher-order programming languages (i.e., languages in which procedures or labels can occur as values) are usually defined by interpreters which are themselves written in a programming language based on the lambda calculus (i.e., an applicative language such as pure LISP). Examples include McCarthy's definition of LISP, Landin's SECD machine, the Vienna definition of PL/I, Reynolds' definitions of GEDANKEN, and recent unpublished work by L. Morris and C. Wadsworth. Such definitions can be classified according to whether the interpreter contains higher-order functions, and whether the order of application (i.e., call-by-value versus call-by-name) in the defined language depends upon the order of application in the defining language. As an example, we consider the definition of a simple applicative programming language by means of an interpreter written in a similar language. Definitions in each of the above classifications are derived from one another by informal but constructive methods. The treatment of imperative features such as jumps and assignment is also discussed.},
author = {Reynolds, John C},
doi = {10.1023/A:1010027404223},
issn = {1388-3690},
journal = {Proceedings of the ACM annual conference on ACM 72},
keywords = {applicative language,closure,continuation,gedanken,higher order function,interpreter,j operator,lambda calculus,language definition,lisp,order application,pal,programming language,reference,secd machine},
number = {30602},
pages = {717--740},
title = {{Definitional interpreters for higher-order programming languages}},
volume = {2},
year = {1972}
}
@inproceedings{Launchbury1994,
address = {New York, New York, USA},
author = {Launchbury, John and {Peyton Jones}, Simon L.},
booktitle = {Proceedings of the ACM SIGPLAN 1994 conference on Programming language design and implementation - PLDI '94},
doi = {10.1145/178243.178246},
isbn = {089791662X},
pages = {24--35},
publisher = {ACM Press},
title = {{Lazy functional state threads}},
url = {http://portal.acm.org/citation.cfm?doid=178243.178246},
year = {1994}
}
@article{Graunke2010,
author = {Graunke, KW},
title = {{Extensible Scheduling in a Haskell-based Operating System}},
url = {http://web.cecs.pdx.edu/{~}kennyg/house/thesis.pdf},
year = {2010}
}
@article{Grabmuller2006,
abstract = {In this tutorial, we describe how to use monad transformers in order to incrementally add functionality to Haskell programs. It is not a paper about implementing transformers, but about using them to write elegant, clean and powerful programs in Haskell. Starting from an evaluation function for simple expressions, we convert it to monadic style and incrementally add error handling, environment passing, state, logging and input/output by composing monad transformers.},
author = {Grabm{\"{u}}ller, M},
journal = {Draft paper, October},
pages = {1--16},
title = {{Monad transformers step by step}},
url = {http://www.cs.virginia.edu/{~}wh5a/personal/Transformers.pdf},
volume = {2006},
year = {2006}
}
@article{Jung2016,
abstract = {The development of concurrent separation logic (CSL) has sparked a long line of work on modular verification of sophisticated concurrent programs. Two of the most important features supported by several existing extensions to CSL are higher-order quantification and custom ghost state. However, none of the logics that support both of these features reap the full potential of their combination. In particular, none of them provide general support for a feature we dub " higher-order ghost state " : the ability to store arbitrary higher-order separation-logic predicates in ghost variables. In this paper, we propose higher-order ghost state as a interesting and useful extension to CSL, which we formalize in the framework of Jung et al.'s recently developed Iris logic. To justify its soundness, we develop a novel algebraic structure called CMRAs (" cameras "), which can be thought of as " step-indexed partial commutative monoids " . Finally, we show that Iris proofs utilizing higher-order ghost state can be effectively formalized in Coq, and discuss the challenges we faced in formalizing them.},
author = {Jung, Ralf and Krebbers, Robbert and Birkedal, Lars and Dreyer, Derek},
doi = {10.1145/2951913.2951943},
isbn = {9781450342193},
keywords = {F31 [Logics and Mean-ings of Programs],Formal Definitions and Theory,compositional verification,fine-grained concurrency,higher-order logic,interactive theorem proving},
pages = {1--13},
title = {{Higher-Order Ghost State}},
year = {2016}
}
@article{Marlow2007,
abstract = {In the light of evidence that Haskell programs compiled by GHC exhibit large numbers of mispredicted branches on modern processors, we re-examine the "tagless" aspect of the STG-machine that GHC uses as its evaluation model.},
author = {Marlow, Simon and Yakushev, Alexey Rodriguez and Jones, Simon Peyton},
doi = {10.1145/1291220.1291194},
isbn = {9781595938152},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
number = {9},
pages = {277},
title = {{Faster laziness using dynamic pointer tagging}},
url = {http://portal.acm.org/citation.cfm?doid=1291220.1291194},
volume = {42},
year = {2007}
}
@inproceedings{Marlow2004,
address = {New York, New York, USA},
author = {Marlow, Simon and Jones, Simon Peyton},
booktitle = {Proceedings of the ninth ACM SIGPLAN international conference on Functional programming - ICFP '04},
doi = {10.1145/1016850.1016856},
isbn = {1581139055},
pages = {4},
publisher = {ACM Press},
title = {{Making a fast curry}},
url = {http://portal.acm.org/citation.cfm?doid=1016850.1016856},
year = {2004}
}
@article{Terei2009,
abstract = {This thesis details the motivation, design and implementation of a new back-end for the Glasgow Haskell Compiler which uses the Low Level Virtual Machine compiler infrastructure for code generation.Haskell as implemented by GHC was found to map remarkably well onto the LLVM Assembly language, although some new approaches were required. The most notable of these being the use of a custom calling convention in order to implement GHC's optimisation feature of pinning STG virtual registers to hardware registers. In the evaluation of the LLVM back-end in regards to GHC's C and native code generator back-end, the LLVM back-end was found to offer comparable results in regards to performance in most situations with the surprising finding that LLVM's optimisations didn't offer any improvement to the run-time of the generated code. The complexity of the LLVM back-end proved to be far simpler though then either the native code generator or C back-ends and as such it offers a compelling primary back-end target for GHC.},
author = {Terei, David Anthony},
keywords = {Compilers,GHC,Haskell,LLVM},
pages = {64},
title = {{Low Level Virtual Machine for Glasgow Haskell Compiler}},
url = {http://hackage.haskell.org/trac/ghc/wiki/Commentary/Compiler/Backends/LLVM},
year = {2009}
}
@article{Wadler1995,
abstract = {The use of monads to structure functional programs is de- scribed. Monads provide a convenient framework for simulating effects found in other languages, such as global state, exception handling, out- put, or non-determinism. Three case studies are looked at in detail: how monads ease the modification of a simple evaluator; how monads act as the basis of a datatype of arrays subject to in-place update; and how monads can be used to build parsers.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Wadler, Philip},
doi = {10.1007/3-540-59451-5_2},
eprint = {arXiv:1011.1669v3},
isbn = {978-3-540-59451-2},
issn = {03029743},
journal = {Advanced Functional Programming},
number = {August 1992},
pages = {1--31},
pmid = {21349828},
title = {{Monads for functional programming}},
url = {papers2://publication/uuid/51E0DEC3-3E25-4374-A5C6-6234824D0BB0},
year = {1995}
}
@article{Li2007,
abstract = {The Glasgow Haskell Compiler (GHC) has quite sophisticated support for concurrency in its runtime system, which is written in low-level C code. As GHC evolves, the runtime system becomes increasingly complex, error-prone, difficult to maintain and difficult to add new concurrency features.},
author = {Li, Peng and Marlow, Simon and Jones, Simon Peyton and Tolmach, Andrew},
doi = {10.1145/1291201.1291217},
isbn = {9781595936745},
journal = {Proceedings of the ACM SIGPLAN workshop on Haskell workshop  - Haskell '07},
keywords = {concurrency,concurrency abstrac-,haskell,might wish to experiment,new challenges,or data par-,such as multi-processor support,thread,tions,transactional memory,with a variety of},
number = {Figure 1},
pages = {107},
title = {{Lightweight concurrency primitives for GHC}},
url = {http://portal.acm.org/citation.cfm?doid=1291201.1291217},
year = {2007}
}
@misc{SimonL.PeytonJones,
author = {{Simon L. Peyton Jones}, Jon Salkild},
title = {{The spineless tagless G-machine v2.5}}
}
@article{Liang2014a,
author = {Liang, Hongjing},
title = {{Refinement Verification of Concurrent Programs and Its Applications}},
year = {2014}
}
@article{Shankar2002,
author = {Shankar, Natarajan},
isbn = {978-3-540-43928-8},
issn = {16113349},
journal = {FME 2002: Formal Methods - Getting IT Right},
pages = {1--20},
title = {{Little Engines of Proof}},
volume = {2391},
year = {2002}
}
@inproceedings{Goncharenko2016,
address = {New York, New York, USA},
author = {Goncharenko, Boryana and Zaytsev, Vadim},
booktitle = {Proceedings of the 2016 ACM SIGPLAN International Conference on Software Language Engineering - SLE 2016},
doi = {10.1145/2997364.2997386},
isbn = {9781450344470},
keywords = {conventions,software language design},
pages = {90--104},
publisher = {ACM Press},
title = {{Language design and implementation for the domain of coding conventions}},
url = {http://dl.acm.org/citation.cfm?doid=2997364.2997386},
year = {2016}
}
@article{Terei2012,
abstract = {Though Haskell is predominantly type-safe, implementations contain a few loopholes through which code can bypass typing and module encapsulation. This paper presents Safe Haskell, a language extension that closes these loopholes. Safe Haskell},
author = {Terei, David and Marlow, Simon and {Peyton Jones}, Simon and Mazi{\`{e}}res, David},
doi = {10.1145/2364506.2364524},
isbn = {9781450315746},
issn = {15232867},
journal = {Proceedings of the 2012 symposium on Haskell symposium - Haskell '12},
keywords = {haskell,security,type safety},
pages = {137},
title = {{Safe haskell}},
url = {http://dl.acm.org/citation.cfm?doid=2364506.2364524},
year = {2012}
}
@article{Chargueraud2012,
abstract = {This paper provides an introduction to the locally nameless approach to the representation of syntax with variable binding, focusing in particular on the use of this technique in formal proofs. First, it explains the benefits of representing bound variables with de Bruijn indices while retaining names for free variables. It then describes the operations involved for manipulating syntax in that form, and shows how to define and reason about judgments on locally nameless terms.},
author = {Chargu{\'{e}}raud, Arthur},
doi = {10.1007/s10817-011-9225-2},
issn = {01687433},
journal = {Journal of Automated Reasoning},
keywords = {Binders,Cofinite quantification,Formal proofs,Locally nameless,Metatheory},
number = {3},
pages = {363--408},
title = {{The locally nameless representation}},
volume = {49},
year = {2012}
}
@inproceedings{Dreyer2009,
author = {Dreyer, Derek and Ahmed, Amal and Birkedal, Lars},
booktitle = {2009 24th Annual IEEE Symposium on Logic In Computer Science},
doi = {10.1109/LICS.2009.34},
isbn = {978-0-7695-3746-7},
month = {aug},
pages = {71--80},
publisher = {IEEE},
title = {{Logical Step-Indexed Logical Relations}},
url = {http://ieeexplore.ieee.org/document/5230591/},
year = {2009}
}
@article{Scibior2011,
author = {Scibior, Adam and Gordon, Andrew D},
doi = {10.1145/2804302.2804317},
isbn = {9781450338080},
keywords = {and z is a,bayesian statis-,given particular values of,haskell,monads,monte carlo,normalising con-,parameters,posterior is a proper,probabilistic programming,probability distribution,stant that ensures the,tics},
pages = {165--176},
title = {{Practical Probabilistic Programming with Monads}},
year = {2011}
}
@article{Torp-Smith2008,
abstract = {We present a programming language, model, and logic appropriate for implementing and reasoning about a memory management system. We then state what is meant by correctness of a copying garbage collector, and employ a variant of the novel separation logics [ 18, 23] to formally specify partial correctness of Cheney's copying garbage collector [8]. Finally, we prove that our implementation of Cheney's algorithm meets its specification, using the logic we have given, and auxiliary variables [19].},
author = {Torp-Smith, Noah and Birkedal, Lars and Reynolds, John C.},
doi = {10.1145/1377492.1377499},
isbn = {0362-1340},
issn = {01640925},
journal = {ACM Transactions on Programming Languages and Systems},
keywords = {Separation logic,copying garbage collector,local reasoning},
number = {4},
pages = {1--58},
title = {{Local reasoning about a copying garbage collector}},
url = {http://dl.acm.org/citation.cfm?id=1377492.1377499},
volume = {30},
year = {2008}
}
@article{Calcagno2007,
abstract = {Separation logic is an extension of Hoare's logic which supports a local way of reasoning about programs that mutate memory. We present a study of the semantic structures lying behind the logic. The core idea is of a local action, a state transformer that mutates the state in a local way. We formulate local actions for a class of models called separation algebras, abstracting from the RAM and other specific concrete models used in work on separation logic. Local actions provide a semantics for a generalized form of (sequential) separation logic. We also show that our conditions on local actions allow a general soundness proof for a separation logic for concurrency, interpreted over arbitrary separation algebras.},
author = {Calcagno, Cristiano and O'Hearn, Peter W. and Yang, Hongseok},
doi = {10.1109/LICS.2007.30},
isbn = {0769529089},
issn = {10436871},
journal = {Proceedings - Symposium on Logic in Computer Science},
pages = {366--375},
title = {{Local action and abstract separation logic}},
year = {2007}
}
@article{Gotsman2007,
abstract = {We present a resource oriented program logic that is able to reason about concurrent heap-manipulating programs with unbounded numbers of dynamically-allocated locks and threads. The logic is inspired by concurrent separation logic, but handles these more realistic concurrency primitives. We demonstrate that the proposed logic allows local reasoning about programs for which there exists a notion of dynamic ownership of heap parts by locks and threads.},
author = {Gotsman, Alexey and Berdine, Josh and Cook, Byron and Rinetzky, Noam and Sagiv, Mooly},
doi = {10.1007/978-3-540-76637-7},
isbn = {978-3-540-76636-0},
issn = {16113349},
journal = {Aplas 2007},
pages = {19--37},
title = {{Local reasoning for storable locks and threads (TR)}},
url = {papers://cff96cb1-96b7-4b11-a3e3-f4947c1d45b9/Paper/p6724},
year = {2007}
}
@article{Cha2012,
abstract = {In this paper we present MAYHEM, a new sys- tem for automatically finding exploitable bugs in binary (i.e., executable) programs. Every bug reported by MAYHEM is accompanied by a working shell-spawning exploit. The working exploits ensure soundness and that each bug report is security- critical and actionable. M AYHEM works on raw binary code without debugging information. To make exploit generation possible at the binary-level, MAYHEM addresses two major technical challenges: actively managing execution paths without exhausting memory, and reasoning about symbolic memory indices, where a load or a store address depends on user input. To this end, we propose two novel techniques: 1) hybrid symbolic execution for combining online and offline (concolic) execution to maximize the benefits of both techniques, and 2) index-based memory modeling, a technique that allows MAYHEM to efficiently reason about symbolic memory at the binary level. We used M AYHEM to find and demonstrate 29 exploitable vulnerabilities in both Linux and Windows programs, 2 of which were previously undocumented.},
author = {Cha, Sang Kil and Avgerinos, Thanassis and Rebert, Alexandre and Brumley, David},
doi = {10.1109/SP.2012.31},
isbn = {9780769546810},
issn = {10816011},
journal = {Proceedings - IEEE Symposium on Security and Privacy},
keywords = {exploit generation,hybrid execution,index-based memory modeling,symbolic memory},
pages = {380--394},
title = {{Unleashing Mayhem on binary code}},
year = {2012}
}
@article{Hermida2014,
abstract = {In his seminal paper on "Types, Abstraction and Parametric Polymorphism," John Reynolds called for homomorphisms to be generalized from functions to relations. He reasoned that such a generalization would allow type-based "abstraction" (representation independence, information hiding, naturality or parametricity) to be captured in a mathematical theory, while accounting for higher-order types. However, after 30 years of research, we do not yet know fully how to do such a generalization. In this article, we explain the problems in doing so, summarize the work carried out so far, and call for a renewed attempt at addressing the problem. {\textcopyright} 2014 Elsevier B.V.},
author = {Hermida, Claudio and Reddy, Uday S. and Robinson, Edmund P.},
doi = {10.1016/j.entcs.2014.02.008},
issn = {15710661},
journal = {Electronic Notes in Theoretical Computer Science},
keywords = {Category Theory,Data abstraction,Definability,Fibrations,Homomorphisms,Information hiding,Logical Relations,Natural Transformations,Parametric polymorphism,Reflexive Graphs,Relation lifting,Relational Parametricity,Universal algebra},
pages = {149--180},
title = {{Logical relations and parametricity}},
volume = {303},
year = {2014}
}
@article{Halbwachs1991,
author = {Halbwachs, N. and Caspi, P. and Raymond, P. and Pilaud, D.},
doi = {10.1109/5.97300},
issn = {00189219},
journal = {Proceedings of the IEEE},
number = {9},
pages = {1305--1320},
title = {{The synchronous data flow programming language LUSTRE}},
url = {http://ieeexplore.ieee.org/document/97300/},
volume = {79},
year = {1991}
}
@article{Vafeiadis2011,
author = {Vafeiadis, Viktor and Jones, Cliff B.},
number = {687},
title = {{A marriage of rely/guarantee and separation logic}},
url = {http://www.mpi-sws.org/{~}viktor/rgsl-tutorial/part2.pdf},
year = {2011}
}
@article{Feng2009a,
author = {Feng, Xinyu},
doi = {10.1145/1594834.1480922},
isbn = {9781605583792},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {2003,a more compositional approach,calls for,concurrency,information hiding,jones,local reasoning,logic,of the reasons why,rely-guarantee reasoning,separation,these problems are part,to concurrency},
month = {jan},
number = {1},
pages = {315},
title = {{Local rely-guarantee reasoning}},
url = {http://portal.acm.org/citation.cfm?doid=1594834.1480922},
volume = {44},
year = {2009}
}
@article{Dreyer2007,
abstract = {ML modules and Haskell type classes have proven to be highly ef- fective tools for program structuring. Modules emphasize explicit configuration of program components and the use of data abstrac- tion. Type classes emphasize implicit program construction and ad hoc polymorphism. In this paper, we show how the implicitly- typed style of type class programming may be supported within the framework of an explicitly-typed module language by viewing type classes as a particular mode of use of modules. This view of- fers a harmonious integration of modules and type classes, where type class features, such as class hierarchies and associated types, arise naturally as uses of existing module-language constructs, such as module hierarchies and type components. In addition, program- mers have explicit control over which type class instances are avail- able for use by type inference in a given scope. We formalize our approach as a Harper-Stone-style elaboration relation, and provide a sound type inference algorithm as a guide to implementation},
author = {Dreyer, Derek and Harper, Robert and Chakravarty, Manuel M. T. and Keller, Gabriele},
doi = {10.1145/1190215.1190229},
isbn = {1595935754},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {design,languages,modules,theory,type classes,type inference,type systems},
number = {1},
pages = {63},
title = {{Modular type classes}},
volume = {42},
year = {2007}
}
@article{Hobor2010,
abstract = {Building semantic models that account for various kinds of indirect reference has traditionally been a difficult problem. Indirect reference can appear in many guises, such as heap pointers, higher-order functions, object references, and shared-memory mutexes. We give a general method to construct models containing indirect reference by presenting a "theory of indirection". Our method can be applied in a wide variety of settings and uses only simple, elementary mathematics. In addition to various forms of indirect reference, the resulting models support powerful features such as impredicative quantification and equirecursion; moreover they are compatible with the kind of powerful substructural accounting required to model (higher-order) separation logic. In contrast to previous work, our model is easy to apply to new settings and has a simple axiomatization, which is complete in the sense that all models of it are isomorphic. Our proofs are machine-checked in Coq. Copyright {\textcopyright} 2010 ACM.},
author = {Hobor, Aquinas and Dockins, Robert and Appel, Andrew W.},
doi = {10.1145/1707801.1706322},
isbn = {9781605584799},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {consider general references in,for this calculus,here is a flawed,indirection theory,semantic model of types,step-indexed models,the polymorphic $\lambda$ -calculus},
number = {1},
pages = {171},
title = {{A theory of indirection via approximation}},
volume = {45},
year = {2010}
}
@article{Winter2013,
abstract = {Path-sensitive data flow analysis pairs classical data flow analysis with an analysis of feasibility of paths to improve precision. In this paper we propose a framework for path-sensitive backward data flow analysis that is enhanced with an abstraction of the predicate do- main. The abstraction is based on a three-valued logic. It follows the strategy that path predicates are simplified if possible (without calling an external predicate solver) and every predicate that could not be re- duced to a simple predicate is abstracted to the unknown value, for which the feasibility is undecided. The implementation of the framework scales well and delivers promising results.},
author = {Winter, Kirsten and Zhang, Chenyi and Hayes, Ian J. and Keynes, Nathan and Cifuentes, Cristina and Li, Lian},
doi = {10.1007/978-3-642-41202-8_27},
isbn = {9783642412011},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {415--430},
title = {{Path-sensitive data flow analysis simplified}},
volume = {8144 LNCS},
year = {2013}
}
@article{Anderson2016,
author = {Anderson, Brian and Bergstrom, Lars and Goregaokar, Manish and Matthews, Josh and McAllister, Keegan and Moffitt, Jack and Sapin, Simon},
doi = {10.1145/2889160.2889229},
isbn = {978-1-4503-4205-6},
journal = {Proceedings of the 38th International Conference on Software Engineering Companion},
keywords = {Rust,browser engine,concurrency,parallelism,servo},
pages = {81--89},
title = {{Engineering the Servo Web Browser Engine Using Rust}},
url = {http://doi.acm.org/10.1145/2889160.2889229},
year = {2016}
}
@article{Xu2010,
author = {Xu, Zhongxing and Kremenek, Ted and Zhang, Jian},
doi = {10.1007/978-3-642-16558-0_44},
isbn = {3-642-16557-5, 978-3-642-16557-3},
journal = {4th International Symposium on Leveraging Applications (ISoLA 2010)},
pages = {535--548},
title = {{A memory model for static analysis of C programs}},
url = {http://dl.acm.org/citation.cfm?id=1939281.1939332{\%}5Cnhttp://rd.springer.com/chapter/10.1007{\%}2F978-3-642-16558-0{\_}44},
year = {2010}
}
@article{Leino2010,
abstract = {Traditionally, the full verification of a programs functional correctness has been obtained with pen and paper or with interactive proof assistants, whereas only reduced verification tasks, such as extended static checking, have enjoyed the automation offered by satisfiability-modulo-theories (SMT) solvers. More recently, powerful SMT solvers and well-designed program verifiers are starting to break that tradition, thus reducing the effort involved in doing full verification. This paper gives a tour of the language and verifier Dafny, which has been used to verify the functional correctness of a number of challenging pointer-based programs. The paper describes the features incorporated in Dafny, illustrating their use by small examples and giving a taste of how they are coded for an SMT solver. As a larger case study, the paper shows the full functional specification of the Schorr-Waite algorithm in Dafny.},
author = {Leino, K. Rustan M},
doi = {10.1007/978-3-642-17511-4_20},
isbn = {3642175104},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {348--370},
title = {{Dafny: An automatic program verifier for functional correctness}},
volume = {6355 LNAI},
year = {2010}
}
@article{Barnett2004,
abstract = {The Spec$\backslash${\#} programming system is a new attempt at a more cost effec- tive way to develop and maintain high-quality software. This paper describes the goals and architecture of the Spec$\backslash${\#} programming system, consisting of the object- oriented Spec{\#} programming language, the Spec{\#} compiler, and the Boogie static program verifier. The language includes constructs for writing specifications that capture programmer intentions about how methods and data are to be used, the compiler emits run-time checks to enforce these specifications, and the verifier can check the consistency between a program and its specifications.},
author = {Barnett, Mike and Leino, K. Rustan M. and Schulte, Wolfram},
doi = {10.1.1.11.2133},
isbn = {9783540242871},
issn = {03029743},
journal = {International Conference in Construction and Analysis of Safe, Secure and Interoperable Smart Devices (CASSIS '04)},
number = {October},
pages = {49--69},
title = {{The Spec{\#} Programming System: An Overview}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.2133},
year = {2004}
}
@article{Fischer2005,
abstract = {Dataflow analyses sacrifice path-sensitivity for efficiency and lead to false positives when used for verification. Predicate refinement based model checking methods are path-sensitive but must perform many expensive iterations to find all the relevant facts about a program, not all of which are naturally expressed and analyzed using predicates. We show how to join these complementary techniques to obtain efficient and precise versions of any lattice-based dataflow analysis using predicated lattices. A predicated lattice partitions the program state according to a set of predicates and tracks a lattice element for each partition. The resulting dataflow analysis is more precise than the eager dataflow analysis without the predicates.In addition, we automatically infer predicates to rule out imprecisions. The result is a dataflow analysis that can adaptively refine its precision. We then instantiate this generic framework using a symbolic execution lattice, which tracks pointer and value information precisely. We give experimental evidence that our combined analysis is both more precise than the eager analysis in that it is sensitive enough to prove various properties, as well as much faster than the lazy analysis, as many relevant facts are eagerly computed, thus reducing the number of iterations.This results in an order of magnitude improvement in the running times from a purely lazy analysis.},
author = {Fischer, Jeffrey and Jhala, Ranjit and Majumdar, Rupak},
doi = {10.1145/1095430.1081742},
isbn = {1595930140},
issn = {01635948},
journal = {ACM SIGSOFT Software Engineering Notes},
keywords = {abstraction,counterexample analysis,dataflow analysis,model checking,predicate},
pages = {227},
title = {{Joining dataflow with predicates}},
volume = {30},
year = {2005}
}
@article{Ocariza2013,
abstract = {...The majority (65{\%}) of JavaScript faults are DOM-related, meaning they are caused by faulty interactions of the JavaScript code with the Document Object Model (DOM). Further, 80{\%} of the highest impact JavaScript faults are DOM-related. Finally, most JavaScript faults originate from programmer mistakes committed in the JavaScript code itself,...},
author = {Ocariza, Frolin and Bajaj, Kartik and Pattabiraman, Karthik and Mesbah, Ali},
doi = {10.1109/ESEM.2013.18},
isbn = {978-0-7695-5056-5},
issn = {19493770},
journal = {International Symposium on Empirical Software Engineering and Measurement},
keywords = {Document Object Model (DOM),JavaScript,empirical study},
pages = {55--64},
title = {{An empirical study of client-side JavaScript bugs}},
year = {2013}
}
@article{,
number = {November},
title = {{Precise and Automatic Verification of Container-Manipulating Programs a Dissertation Submitted To the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of}},
year = {2011}
}
@article{Tan2010,
abstract = {Through foreign function interfaces (FFIs), software components in different programming languages interact with each other in the same address space. Recent years have witnessed a number of systems that analyze FFIs for safety and reliability. However, lack of formal specifications of FFIs hampers progress in this endeavor. We present a formal operational model, JNI Light (JNIL), for a subset of a widely used FFI-the Java Native Interface (JNI). JNIL focuses on the core issues when a high-level garbage-collected language interacts with a low-level language. It proposes abstractions for handling a shared heap, cross-language method calls, cross-language exception handling, and garbage collection. JNIL can directly serve as a formal basis for JNI tools and systems. The abstractions in JNIL are also useful when modeling other FFIs, such as the Python/C interface and the OCaml/C interface.},
author = {Tan, Gang},
doi = {10.1007/978-3-642-17164-2_9},
isbn = {364217163X},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {114--130},
title = {{JNI light: An operational model for the core JNI}},
volume = {6461 LNCS},
year = {2010}
}
@article{Black2016,
abstract = {Programming languages serve a dual purpose: to communicate programs to computers, and to communicate programs to humans. Indeed, it is this dual purpose that makes programming language design a constrained and challenging problem. Inheritance is an essential aspect of that second purpose: it is a tool to improve communication. Humans understand new concepts most readily by first looking at a number of concrete examples, and later abstracting over those examples. The essence of inheritance is that it mirrors this process: it provides a formal mechanism for moving from the concrete to the abstract.},
archivePrefix = {arXiv},
arxivId = {1601.02059},
author = {Black, Andrew P. and Bruce, Kim B. and Noble, James},
doi = {10.1007/978-3-319-30936-1_4},
eprint = {1601.02059},
isbn = {9783319309354},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
keywords = {Abstraction,Inheritance,Object-oriented programming,Program understanding,Programming languages},
pages = {73--94},
title = {{The essence of Inheritance}},
volume = {9600},
year = {2016}
}
@article{Xiao2015,
author = {Xiao, Xiao and Han, Shi and Zhang, Charles and Zhang, Dongmei},
doi = {10.1007/978-3-319-26529-2_18},
isbn = {9783319265285},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {335--355},
title = {{Uncovering JavaScript performance code smells relevant to type mutations}},
volume = {9458},
year = {2015}
}
@inproceedings{Saxena2010,
author = {Saxena, Prateek and Akhawe, Devdatta and Hanna, Steve and Mao, Feng and McCamant, Stephen and Song, Dawn},
booktitle = {2010 IEEE Symposium on Security and Privacy},
doi = {10.1109/SP.2010.38},
isbn = {978-1-4244-6894-2},
keywords = {-web security,string decision,symbolic execution},
pages = {513--528},
publisher = {IEEE},
title = {{A Symbolic Execution Framework for JavaScript}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5504700 http://ieeexplore.ieee.org/document/5504700/},
year = {2010}
}
@article{Wadler2009,
abstract = {We introduce the blame calculus, which adds the notion of blame from Findler and Felleisen's contracts to a system similar to Siek and Taha's gradual types and Flanagan's hybrid types. We characterise where positive and negative blame can arise by decomposing the usual notion of subtype into positive and negative subtypes, and show that these recombine to yield naive subtypes. Naive subtypes previously appeared in type systems that are unsound, but we believe this is the first time naive subtypes play a role in establishing type soundness.},
author = {Wadler, Philip and Findler, Robert Bruce},
doi = {10.1007/978-3-642-00590-9_1},
isbn = {9783642005893},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {1--16},
title = {{Well-Typed programs can't be blamed}},
volume = {5502},
year = {2009}
}
@article{Leino2009,
author = {Leino, K. Rustan M and M{\"{u}}ller, Peter and Smans, Jan},
doi = {10.1007/978-3-642-03829-7_7},
isbn = {364203828X},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {195--222},
title = {{Verification of concurrent programs with chalice}},
volume = {5705 LNCS},
year = {2009}
}
@article{Ahmadi2015,
author = {Ahmadi, Reza and Leino, K. Rustan M. and Nummenmaa, Jyrki},
doi = {10.1145/2786536.2786542},
isbn = {9781450336567},
journal = {Proceedings of the 17th Workshop on Formal Techniques for Java-like Programs - FTfJP '15},
keywords = {boogie,dafny,program verification,traits},
pages = {1--5},
title = {{Automatic verification of Dafny programs with traits}},
url = {http://dl.acm.org/citation.cfm?doid=2786536.2786542},
year = {2015}
}
@article{Calcagno2011,
abstract = {his paper describes a compositional shape analysis, where each procedure is analyzed independently of its callers. The analysis uses an abstract domain based on a restricted fragment of separation logic, and assigns a collection of Hoare triples to each procedure; the triples provide an over-approximation of data structure usage. Compositionality brings its usual benefits -- increased potential to scale, ability to deal with unknown calling contexts, graceful way to deal with imprecision -- to shape analysis, for the first time. The analysis rests on a generalized form of abduction (inference of explanatory hypotheses) which we call bi-abduction. Bi-abduction displays abduction as a kind of inverse to the frame problem: it jointly infers anti-frames (missing portions of state) and frames (portions of state not touched by an operation), and is the basis of a new interprocedural analysis algorithm. We have implemented our analysis algorithm and we report case studies on smaller programs to evaluate the quality of discovered specifications, and larger programs (e.g., an entire Linux distribution) to test scalability and graceful imprecision.},
author = {Calcagno, C and Distefano, D and O'Hearn, Pw and Yang, H},
doi = {10.1145/2049697.2049700},
isbn = {9781605583792},
issn = {00045411},
journal = {J. Acm},
keywords = {a program analysis is,compo-,languages,or program,parts,program,reliability,result of a composite,similarly,sitional if the analysis,the meanings of its,theory,verification},
number = {6},
pages = {26:1--26:66},
title = {{Compositional Shape Analysis by Means of Bi-Abduction.}},
url = {http://discovery.ucl.ac.uk/1342369/},
volume = {58},
year = {2011}
}
@article{Jung2015,
author = {Jung, Ralf and Swasey, David and Sieczkowski, Filip and Svendsen, Kasper and Turon, Aaron and Birkedal, Lars and Dreyer, Derek},
doi = {10.1145/2775051.2676980},
isbn = {978-1-4503-3300-9},
issn = {03621340},
journal = {Popl},
keywords = {atomicity,compositional verification,fine-grained concurrency,higher-order logic,invariants,partial commutative monoids,separation logic},
number = {1},
pages = {637--650},
title = {{Iris: Monoids and Invariants as an Orthogonal Basis for Concurrent Reasoning}},
url = {http://dl.acm.org/citation.cfm?id=2775051.2676980},
volume = {50},
year = {2015}
}
@article{Wikipedia2016,
author = {Wikipedia},
number = {February},
title = {{Garbage collection}},
url = {https://en.wikipedia.org/wiki/Garbage{\_}collection{\_}(computer{\_}science)},
year = {2016}
}
@article{Theisen,
author = {Theisen, Christopher and Drive, Oval and Raleigh, Campus Box and Williams, Laurie},
isbn = {9781450342773},
keywords = {attack surface,crash dumps,metrics,security,stack traces},
pages = {121--123},
title = {{Poster : Risk-Based Attack Surface Approximation}}
}
@article{Cadar2013,
abstract = {The challenges---and great promise---of modern symbolic execution techniques, and the tools to help implement them.},
author = {Cadar, Cristian and Sen, Koushik},
doi = {10.1145/2408776.2408795},
isbn = {978-1-109-44370-7},
issn = {0001-0782},
journal = {Communications of the ACM},
number = {2},
pages = {82--90},
pmid = {16031144},
title = {{Symbolic execution for software testing: three decades later}},
url = {http://dl.acm.org/ft{\_}gateway.cfm?id=2408795{\&}type=html},
volume = {56},
year = {2013}
}
@article{Liang2013,
author = {Liang, Hongjin and Hoffmann, Jan and Feng, Xinyu and Shao, Zhong},
doi = {10.1007/978-3-642-40184-8_17},
isbn = {9783642401831},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {227--241},
title = {{Characterizing progress properties of concurrent objects via contextual refinements}},
volume = {8052 LNCS},
year = {2013}
}
@article{BAHR2015,
abstract = {In this article, we present a new approach to the problem of calculating compilers. In particular, we develop a simple but general technique that allows us to derive correct compilers from high-level semantics by systematic calculation, with all details of the implementation of the compilers falling naturally out of the calculation process. Our approach is based upon the use of standard equational reasoning techniques, and has been applied to calculate compilers for a wide range of language features and their combination, including arithmetic expressions, exceptions, state, various forms of lambda calculi, bounded and unbounded loops, non-determinism and interrupts. All the calculations in the article have been formalised using the Coq proof assistant, which serves as a convenient interactive tool for developing and verifying the calculations.},
author = {BAHR, PATRICK and HUTTON, GRAHAM},
doi = {10.1017/S0956796815000180},
issn = {0956-7968},
journal = {Journal of Functional Programming},
month = {sep},
number = {July},
pages = {e14},
title = {{Calculating correct compilers}},
url = {http://www.journals.cambridge.org/abstract{\_}S0956796815000180},
volume = {25},
year = {2015}
}
@article{Wu2016,
abstract = {Call traces, i.e., sequences of function calls and returns, are fun-damental to a wide range of program analyses such as bug repro-duction, fault diagnosis, performance analysis, and many others. The conventional approach to collect call traces that instruments each function call and return site incurs large space and time over-head. Our approach aims at reducing the recording overheads by instrumenting only a small amount of call sites while keeping the capability of recovering the full trace. We propose a call trace model and a logged call trace model based on an LL(1) grammar, which enables us to define the criteria of a feasible solution to call trace collection. Based on the two models, we prove that to collect call traces with minimal instrumentation is an NP-hard problem. We then propose an efficient approach to obtaining a suboptimal solu-tion. We implemented our approach as a tool Casper and evaluated it using the DaCapo benchmark suite. The experiment results show that our approach causes significantly lower runtime (and space) overhead than two state-of-the-arts approaches.},
author = {Wu, Rongxin and Xiao, Xiao and Cheung, Shing-Chi and Zhang, Hongyu and Zhang, Charles},
doi = {10.1145/2837614.2837619},
isbn = {9781450335492},
issn = {07308566},
keywords = {Algorithms,D25 [Testing and Debug-ging],Instrumentation,Overhead,Performance Keywords Call Trace,Program Analysis,Tracing General Terms Theory},
pages = {678--690},
title = {{Casper: An Efficient Approach to Call Trace Collection}},
year = {2016}
}
@article{Veanes2014,
author = {Veanes, Margus and Bj{\o}rner, Nikolaj and Nachmanson, Lev and Bereg, Sergey},
doi = {10.1007/978-3-319-08867-9_42},
isbn = {9783319088662},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {628--645},
title = {{Monadic decomposition}},
volume = {8559 LNCS},
year = {2014}
}
@article{Luo2016,
author = {Luo, Zhaoyi and Atlee, Joanne M.},
doi = {10.1145/2997364.2997372},
isbn = {9781450344470},
journal = {Proceedings of the 2016 ACM SIGPLAN International Conference on Software Language Engineering  - SLE 2016},
keywords = {domain-specific language,language product line,mbeddr,mps,state-machine model},
pages = {105--117},
title = {{BSML-mbeddr: integrating semantically configurable state-machine models in a C programming environment}},
url = {http://dl.acm.org/citation.cfm?doid=2997364.2997372},
year = {2016}
}
@article{Tang2007,
abstract = {Production compilers ' optimizers typically operate at low abstraction levels, transformation rules enacting on operations on built-in types only. Transformations at higher levels of abstraction, on operations of types defined in libraries or by the user, are typically not supported. Such high-level optimizations could, however, yield greater benefits than first lowering the abstractions and subjecting the result to low-level transformations. Built-in compiler optimizations can in principle apply to user-defined types, if those types possess properties that guarantee that the optimizing transformations preserve the meaning of the program. The problem is conveying this information to the compiler in a non-disruptive manner. This article describes a framework for specifying and implementing generic “concept-based optimizations. ” The framework is based on careful categorization, applying the generic programming paradigm, of the algebraic properties that justify particular optimizing transformations. Our framework is built on top of ConceptGCC, a compiler implementing the new language features concept and concept map of the forthcoming standard C++. Concepts describe the syntactic and semantic properties of classes of types, for which generic semantics-preserving transformations can be defined. Concept maps establish memberships to these classes, enabling the generic optimizations for specific user-defined types.},
author = {Tang, Xiaolong and J{\"{a}}rvi, Jaakko},
doi = {10.1145/1512762.1512772},
isbn = {9781605580869},
journal = {Proceedings of the 2007 Symposium on Library-Centric Software Design},
keywords = {C++,Concepts,Design,Generic programming,High-level optimization,Languages,Performance},
pages = {97--108},
title = {{Concept-Based Optimization}},
url = {http://portal.acm.org/citation.cfm?doid=1512762.1512772},
year = {2007}
}
@article{Furr2008,
author = {Furr, Michael and Foster, Jeffrey S},
doi = {10.1145/1377492.1377493},
issn = {0164-0925},
journal = {ACM Trans. Program. Lang. Syst.},
keywords = {FFI,Foreign function interface,JNI,Java,Java Native Interface,OCaml,dataflow analysis,flow-sensitive type system,foreign function calls,multilingual type inference,multilingual type system,representational type},
number = {4},
pages = {18:1----18:63},
title = {{Checking Type Safety of Foreign Function Calls}},
url = {http://doi.acm.org/10.1145/1377492.1377493},
volume = {30},
year = {2008}
}
@article{Lattner2004,
author = {Lattner, Chris},
isbn = {0769521029},
number = {c},
title = {{LLVM : A Compilation Framework for Lifelong Program Analysis {\&} Transformation}},
year = {2004}
}
@article{Baldoni2016,
abstract = {Many security and software testing applications require checking whether certain properties of a program hold for any possible usage scenario. For instance, a tool for identifying software vulnerabilities may need to rule out the existence of any backdoor to bypass a program's authentication. One approach would be to test the program using different, possibly random inputs. As the backdoor may only be hit for very specific program workloads, automated exploration of the space of possible inputs is of the essence. Symbolic execution provides an elegant solution to the problem, by systematically exploring many possible execution paths at the same time without necessarily requiring concrete inputs. Rather than taking on fully specified input values, the technique abstractly represents them as symbols, resorting to constraint solvers to construct actual instances that would cause property violations. Symbolic execution has been incubated in dozens of tools developed over the last four decades, leading to major practical breakthroughs in a number of prominent software reliability applications. The goal of this survey is to provide an overview of the main ideas, challenges, and solutions developed in the area, distilling them for a broad audience.},
archivePrefix = {arXiv},
arxivId = {1610.00502},
author = {Baldoni, Roberto and Coppa, Emilio and D'Elia, Daniele Cono and Demetrescu, Camil and Finocchi, Irene},
eprint = {1610.00502},
number = {i},
pages = {1--39},
title = {{A Survey of Symbolic Execution Techniques}},
url = {http://arxiv.org/abs/1610.00502},
year = {2016}
}
@inproceedings{PolitzJoeGibbsandEliopoulosSpiridonAristidesandGuhaArjunandKrishnamurthi2011,
author = {{Politz, Joe Gibbs and Eliopoulos, Spiridon Aristides and Guha, Arjun and Krishnamurthi}, Shriram},
booktitle = {Proceedings of the 20th USENIX Conference on Security},
isbn = {]},
keywords = {sandbox,script inclusion,security architecture,web application security,web mashups},
pages = {1},
publisher = {USENIX Association},
title = {{ADsafety: type-based verification of JavaScript Sandboxing}},
url = {http://dl.acm.org/citation.cfm?id=2028067.2028079},
year = {2011}
}
@article{Barr2013,
abstract = {It is well-known that floating-point exceptions can be disastrous and writing exception-free numerical programs is very difficult. Thus, it is important to automatically detect such errors. In this paper, we present Ariadne, a practical symbolic execution system specifically designed and implemented for detecting floating-point exceptions. Ariadne systematically transforms a numerical program to explicitly check each exception triggering condition. Ariadne symbolically executes the transformed program using real arithmetic to find candidate real-valued inputs that can reach and trigger an exception. Ariadne converts each candidate input into a floating-point number, then tests it against the original program. In general, approximating floating-point arithmetic with real arithmetic can change paths from feasible to infeasible and vice versa. The key insight of this work is that, for the problem of detecting floating-point exceptions, this approximation works well in practice because, if one input reaches an exception, many are likely to, and at least one of them will do so over both floating-point and real arithmetic. To realize Ariadne, we also devised a novel, practical linearization technique to solve nonlinear constraints. We extensively evaluated Ariadne over 467 scalar functions in the widely used GNU Scientific Library (GSL). Our results show that Ariadne is practical and identifies a large number of real runtime exceptions in GSL. The GSL developers confirmed our preliminary findings and look forward to Ariadne's public release, which we plan to do in the near future.},
author = {Barr, Earl T. and Vo, Thanh and Le, Vu and Su, Zhendong},
doi = {10.1145/2429069.2429133},
isbn = {978-1-4503-1832-7},
issn = {0362-1340},
journal = {POPL: Principles of Programming Languages},
keywords = {33,algorithms,buggy,floating-point exceptions,in february 2010,languages,may invalidate results when,reliability,symbolic execution,toyota,verification},
pages = {549--560},
title = {{Automatic detection of floating-point exceptions}},
url = {http://doi.acm.org/10.1145/2429069.2429133{\%}5Cnhttp://dl.acm.org/ft{\_}gateway.cfm?id=2429133{\&}type=pdf},
year = {2013}
}
@article{Petricek2015a,
author = {Petricek, Tomas},
doi = {10.1145/2814228.2814249},
isbn = {978-1-4503-3688-8},
journal = {2015 ACM International Symposium on New Ideas, New Paradigms, and Reflections on Programming and Software (Onward!)},
keywords = {philosophy,science,types},
pages = {254--266},
title = {{Against a Universal Definition of 'Type'}},
url = {http://doi.acm.org/10.1145/2814228.2814249},
year = {2015}
}
@article{Whaley2005,
abstract = {Many problems in program analysis can be expressed naturally and  concisely in a declarative language like Datalog. This makes it easy to specify  new analyses or extend or compose existing analyses. However, previous implementations  of declarative languages perform poorly compared with traditional  implementations. This paper describes bddbddb, a BDD-Based Deductive DataBase,  which implements the declarative language Datalog with stratified negation,  totally-ordered finite domains and comparison operators. bddbddb uses binary  decision diagrams (BDDs) to efficiently represent large relations. BDD operations  take time proportional to the size of the data structure, not the number  of tuples in a relation, which leads to fast execution times. bddbddb is an effective  tool for implementing a large class of program analyses. We show that a  context-insensitive points-to analysis implemented with bddbddb is about twice  as fast as a carefully hand-tuned version. The use of BDDs also allows us to  solve heretofore unsolved problems, like context-sensitive pointer analysis for  large programs.},
author = {Whaley, John and Avots, Dzintars and Carbin, Michael and Lam, Monica S.},
doi = {10.1007/11575467_8},
isbn = {3540297359},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {97--118},
title = {{Using Datalog with binary decision diagrams for program analysis}},
volume = {3780 LNCS},
year = {2005}
}
@article{Beyer2004,
abstract = { We have extended the software model checker BLAST to automatically generate test suites that guarantee full coverage with respect to a given predicate. More precisely, given a C program and a target predicate p, BLAST determines the set L of program locations which program execution can reach with p true, and automatically generates a set of test vectors that exhibit the truth of p at all locations in L. We have used BLAST to generate test suites and to detect dead code in C programs with up to 30 K lines of code. The analysis and test vector generation is fully automatic (no user intervention) and exact (no false positives).},
author = {Beyer, Dirk and Chlipala, Adam J. and Henzinger, Thomas A. and Jhala, Ranjit and Majumdar, Rupak},
doi = {10.1109/ICSE.2004.1317455},
isbn = {0-7695-2163-0},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
pages = {326--335},
title = {{Generating tests from counterexamples}},
volume = {26},
year = {2004}
}
@article{Cousot2001,
author = {Cousot, Patrick},
doi = {10.2217/iim.10.49.Image},
pages = {138--156},
title = {{Abstract Interpretation Based Formal Methods and Future Challenges}},
year = {2001}
}
@article{Ku2007,
abstract = {Software model checking based on abstraction-refinement has recently$\backslash$nachieved widespread success in verifying API conformance in device$\backslash$ndrivers, and we believe this success can be replicated for the problem$\backslash$nof buffer overflow detection. This paper presents a publicly-available$\backslash$nbenchmark suite to help guide and evaluate this research. The benchmark$\backslash$nconsists of 298 code fragments of varying complexity capturing 22$\backslash$nbuffer overflow vulnerabilities in 12 open source applications. We$\backslash$ngive a preliminary evaluation of the benchmark using the SatAbs model$\backslash$nchecker},
author = {Ku, Kelvin and Hart, Thomas E. and Chechik, Marsha and Lie, David},
doi = {10.1145/1321631.1321691},
isbn = {9781595938824},
journal = {Proceedings of the twenty-second IEEE/ACM international conference on Automated software engineering - ASE '07},
keywords = {array bounds checking,bench-,buffer overflow,mark,model checking},
pages = {389},
title = {{A buffer overflow benchmark for software model checkers}},
url = {http://dl.acm.org/citation.cfm?id=1321631.1321691},
year = {2007}
}
@article{Ball2001a,
abstract = {Model checking has been widely successful in validating and debugging designs in the hardware and protocol domains. However, state-space explosion limits the applicability of model checking tools, so model checkers typically operate on abstractions of systems. Recently, there has been significant interest in applying model checking to software. For infinite-state systems like software, abstraction is even more critical. Techniques for abstracting software are a prerequisite to making software model checking a reality. We present the first algorithm to automatically construct a predicate abstraction of programs written in an industrial programming language such as C, and its implementation in a tool — C2BP. The C2BP tool is part of the SLAM toolkit, which uses a combination of predicate abstraction, model checking, symbolic reasoning, and iterative refinement to statically check temporal safety properties of programs. Predicate abstraction of software has many applications, including detecting program errors, synthesizing program invariants, and improving the precision of program analyses through predicate sensitivity. We discuss our experience applying the C2BP predicate abstraction tool to a variety of problems, ranging from checking that list-manipulating code preserves heap invariants to finding errors in Windows NT device drivers.},
author = {Ball, Thomas and Majumdar, Rupak and Millstein, Todd and Rajamani, Sriram K.},
doi = {10.1145/381694.378846},
isbn = {1-58113-414-2},
issn = {03621340},
journal = {Proceedings of the ACM SIGPLAN conference on Programming language design and implementation (PLDI)},
number = {5},
pages = {203--213},
title = {{Automatic predicate abstraction of C programs}},
volume = {36},
year = {2001}
}
@article{Might2011,
abstract = {Abstract We present a functional approach to parsing unrestricted context-free grammars based on Brzozowski's derivative of regular expressions. If we consider context-free grammars as recursive regular expressions, Brzozowski's equational theory extends ...},
author = {Might, Matthew and Darais, David and Spiewak, Daniel},
doi = {10.1145/2034773.2034801},
isbn = {978-1-4503-0865-6},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {parsing},
number = {9},
pages = {189--195},
title = {{Parsing with derivatives}},
url = {http://dl.acm.org/citation.cfm?doid=2034574.2034801{\%}5Cnpapers3://publication/doi/10.1145/2034574.2034801},
volume = {46},
year = {2011}
}
@article{Madsen2015,
abstract = {JavaScript is a language that is widely-used for both web- based and standalone applications such as those in the upcoming Windows 8 operating system. Analysis of JavaScript has long been known to be challenging due to its dynamic nature. On top of that, most JavaScript applications rely on large and complex libraries and frameworks, often written in a combination of JavaScript and native code such as C and C++. Stubs have been commonly employed as a partial specification mechanism to address the library problem; however, they are tedious to write, incomplete, and occasionally incorrect.   However, the manner in which library code is used within applications often sheds light on what library APIs return or consume as parameters. In this paper, we propose a technique which combines pointer analysis with use analysis to handle many challenges posed by large JavaScript libraries. Our approach enables a variety of applications, ranging from call graph discovery to auto-complete to supporting runtime optimizations. Our techniques have been implemented and empirically validated on a set of 25 Windows 8 JavaScript applications, averaging 1,587 lines of code, demonstrating a combination of scalability and precision.},
author = {Madsen, Magnus and Livshits, Benjamin and Fanning, Michael},
doi = {10.1145/2491411.2491417},
isbn = {978-1-4503-2237-9},
journal = {Ase},
keywords = {JavaScript,frameworks,libraries,points-to analysis,use analysis},
pages = {499--509},
title = {{Practical Static Analysis of JavaScript Applications in the Presence of Frameworks and Libraries}},
url = {http://doi.acm.org/10.1145/2491411.2491417},
year = {2015}
}
@article{Hudak1999,
abstract = {Functional reactive programming, or FRP, is a style of programming based on two key ideas: continuous time-varying behaviors, and event-based reactivity. FRP is the essence of Fran [1,2], a domain-specific language for functional reactive graphics and animation, and has recently been used in the design of Frob [3,4], a domain-specific language for functional vision and robotics. In general, FRP can be viewed as an interesting language for describing hybrid systems, which are systems comprised of both analog (continuous) and digital (discrete) subsystems. Continuous behaviors can be thought of simply as functions from time to some value: Behavior a = Time -{\textgreater} a. For example: an image behavior may represent an animation; a Cartesian-point behavior may be a mouse; a velocity-vector behavior may be the control vector for a robot; and a tuple-of-distances behavior may be the input from a robot's sonar array. Both continuous behaviors and event-based reactivity have interesting pro erties worthy of independent study, but their integration is particularly interesting. At the core of the issue is that events are intended to cause discrete shifts in declarative behavior; i.e. not just shifts in the state of reactivity. Being declarative, the natural desire is for everything to be first-class and higher-order. But this causes interesting clashes in frames of reference, especially when time and space transformations are applied. In this talk the fundamental ideas behind FRP are presented, along with a discussion of various issues in its formal semantics. This is joint work with Conal Elliot at Microsoft Research, and John Peterson at Yale.},
author = {Hudak, Paul},
doi = {10.1007/3-540-49099-X_1},
isbn = {1581136056},
journal = {Proceedings of the 8th European Symposium on Programming (ESOP'99), LNCS 1576},
keywords = {domain-specific languages,frp,functional programming,guages,haskell,hybrid modeling,part by a national,synchronous dataflow lan-,this material is based,upon work supported in},
pages = {1--1},
title = {{Functional Reactive Programming}},
volume = {1576},
year = {1999}
}
@article{Cadar2006,
abstract = {This paper presents EXE, an effective bug-finding tool that automatically generates inputs that crash real code. Instead of running code on manually or randomly constructed input, EXE runs it on symbolic input initially allowed to be "anything." As checked code runs, EXE tracks the constraints on each symbolic (i.e., input-derived) memory location. If a statement uses a symbolic value, EXE does not run it, but instead adds it as an input-constraint; all other statements run as usual. If code conditionally checks a symbolic expression, EXE forks execution, constraining the expression to be true on the true branch and false on the other. Because EXE reasons about all possible values on a path, it has much more power than a traditional runtime tool: (1) it can force execution down any feasible program path and (2) at dangerous operations (e.g., a pointer dereference), it detects if the current path constraints allow any value that causes a bug.When a path terminates or hits a bug, EXE automatically generates a test case by solving the current path constraints to find concrete values using its own co-designed constraint solver, STP. Because EXE's constraints have no approximations, feeding this concrete input to an uninstrumented version of the checked code will cause it to follow the same path and hit the same bug (assuming deterministic code).EXE works well on real code, finding bugs along with inputs that trigger them in: the BSD and Linux packet filter implementations, the udhcpd DHCP server, the pcre regular expression library, and three Linux file systems.},
author = {Cadar, Cristian and Ganesh, Vijay and Pawlowski, Peter M and Dill, David L and Engler, Dawson R},
doi = {10.1145/1180405.1180445},
isbn = {1595935185},
issn = {10949224},
journal = {Computer},
keywords = {attack generation,bolic execution,bug finding,constraint solving,dynamic analysis,sym,test case generation},
number = {2},
pages = {322--335},
title = {{EXE : Automatically Generating Inputs of Death}},
url = {http://portal.acm.org/citation.cfm?id=1455518.1455522},
volume = {12},
year = {2006}
}
@article{Song2008,
abstract = {In this paper, we give an overview of the BitBlaze project, a new approach to computer security via binary analysis. In particular, BitBlaze focuses on building a unified binary analysis platform and using it to provide novel solutions to a broad spectrum of different security problems. The binary analysis platform is designed to enable accurate analysis, provide an extensible architecture, and combines static and dynamic analysis as well as program verification techniques to satisfy the common needs of security applications. By extracting security-related properties from binary programs directly, BitBlaze enables a principled, root-cause based approach to computer security, offering novel and effective solutions, as demonstrated with over a dozen different security applications.},
author = {Song, Dawn and Brumley, David and Yin, Heng and Caballero, Juan and Jager, Ivan and Kang, Min Gyung and Liang, Zhenkai and Newsome, James and Poosankam, Pongsin and Saxena, Prateek},
doi = {10.1007/978-3-540-89862-7_1},
isbn = {3540898611},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
keywords = {Binary analysis,Malware analysis and defense,Reverse engineering,Vulnerability analysis and defense},
pages = {1--25},
title = {{BitBlaze: A new approach to computer security via binary analysis}},
volume = {5352 LNCS},
year = {2008}
}
@article{Patai2009,
author = {Patai, Gergely},
journal = {Draft Proceedings of Implementation and Application of Functional Languages (IFL'09)},
pages = {126--140},
title = {{Eventless Reactivity from Scratch}},
year = {2009}
}
@article{Sergey2015a,
abstract = {Efficient concurrent programs and data structures rarely employ coarse-grained synchronization mechanisms (i.e., locks); instead, they implement custom synchronization patterns via fine-grained primitives, such as compare-and-swap. Due to sophisticated inter- ference scenarios between threads, reasoning about such programs is challenging and error-prone, and can benefit from mechanization. In this paper, we present the first completely formalized frame- work for mechanized verification of full functional correctness of fine-grained concurrent programs. Our tool is based on the re- cently proposed program logic FCSL. It is implemented as an embedded domain-specific language in the dependently-typed lan- guage of the Coq proof assistant, and is powerful enough to rea- son about programming features such as higher-order functions and local thread spawning. By incorporating a uniform concurrency model, based on state-transition systems and partial commutative monoids, FCSL makes it possible to build proofs about concurrent libraries in a thread-local, compositional way, thus facilitating scal- ability and reuse: libraries are verified just once, and their specifi- cations are used ubiquitously in client-side reasoning.We illustrate the proof layout in FCSL by example, and report on our experience of using FCSL to verify a number of concurrent programs.},
author = {Sergey, Ilya and Nanevski, Aleksandar and Banerjee, Anindya},
doi = {10.1145/2737924.2737964},
isbn = {978-1-4503-3468-6},
issn = {15232867},
journal = {Programming Language Design and Implementation},
keywords = {Compositional program verification,concurrency,dependent types,mechanized proofs,separation logic},
number = {4},
pages = {77--87},
title = {{Mechanized Verification of Fine-grained Concurrent Programs}},
url = {http://doi.acm.org/10.1145/2737924.2737964},
year = {2015}
}
@article{Liua,
author = {Liu, Peng and Tripp, Omer and Zhang, Charles},
isbn = {9781450330565},
keywords = {concurrency bugs,context-aware fixing},
pages = {318--329},
title = {{Grail : Context-Aware Fixing of Concurrency Bugs Categories and Subject Descriptors}}
}
@article{Kashyap2014,
abstract = {JavaScript is used everywhere from the browser to the server, including desktops and mobile devices. However, the current state of the art in JavaScript static analysis lags far behind that of other languages such as C and Java. Our goal is to help remedy this lack. We describe JSAI, a formally specified, robust abstract interpreter for JavaScript. JSAI uses novel abstract domains to compute a reduced product of type inference, pointer analysis, control-flow analysis, string analysis, and integer and boolean constant propagation. Part of JSAI{\&}{\#}039;s novelty is user-configurable analysis sensitivity, i.e., context-, path-, and heap-sensitivity. JSAI is designed to be provably sound with respect to a specific concrete semantics for JavaScript, which has been extensively tested against a commercial JavaScript implementation. We provide a comprehensive evaluation of JSAI{\&}{\#}039;s performance and precision using an extensive benchmark suite, including real-world JavaScript applications, machine generated JavaScript code via Emscripten, and browser addons. We use JSAI{\&}{\#}039;s configurability to evaluate a large number of analysis sensitivities (some well-known, some novel) and observe some surprising results that go against common wisdom. These results highlight the usefulness of a configurable analysis platform such as JSAI. },
archivePrefix = {arXiv},
arxivId = {1403.3996},
author = {Kashyap, Vineeth and Dewey, Kyle and Kuefner, Ethan A and Wagner, John and Gibbons, Kevin and Sarracino, John and Wiedermann, Ben and Hardekopf, Ben},
doi = {10.1145/2635868.2635904},
eprint = {1403.3996},
isbn = {978-1-4503-3056-5},
journal = {Proceedings of the 22Nd ACM SIGSOFT International Symposium on Foundations of Software Engineering},
keywords = {Abstract Interpretation,JavaScript Analysis},
pages = {121--132},
title = {{JSAI: A Static Analysis Platform for JavaScript}},
url = {http://doi.acm.org/10.1145/2635868.2635904},
year = {2014}
}
@article{Lee2012,
abstract = {The prevalent uses of JavaScript in web programming have re-vealed security vulnerability issues of JavaScript applications, which emphasizes the need for JavaScript analyzers to detect such issues. Recently, researchers have proposed several analyzers of JavaScript programs and some web service companies have devel-oped various JavaScript engines. However, unfortunately, most of the tools are not documented well, thus it is very hard to understand and modify them. Or, such tools are often not open to the public. In this paper, we present formal specification and implemen-tation of SAFE, a scalable analysis framework for ECMAScript, developed for the JavaScript research community. This is the very first attempt to provide both formal specification and its open-source implementation for JavaScript, compared to the existing ap-proaches focused on only one of them. To make it more amenable for other researchers to use our framework, we formally define three kinds of intermediate representations for JavaScript used in the framework, and we provide formal specifications of translations between them. To be adaptable for adventurous future research in-cluding modifications in the original JavaScript syntax, we actively use open-source tools to automatically generate parsers and some intermediate representations. To support a variety of program anal-yses in various compilation phases, we design the framework to be as flexible, scalable, and pluggable as possible. Finally, our frame-work is publicly available, and some collaborative research using the framework are in progress.},
author = {Lee, Hongki and Won, Sooncheol and Jin, Joonho and Cho, Junhee and Ryu, Sukyoung},
journal = {Fool},
keywords = {0,compiler,ecmascript 5,formal,formal semantics,interpreter,javascript,specification},
title = {{SAFE : Formal Specification and Implementation of a Scalable Analysis Framework for ECMAScript}},
year = {2012}
}
@inproceedings{Bae2014,
address = {New York, New York, USA},
author = {Bae, Sunggyeong and Cho, Hyunghun and Lim, Inho and Ryu, Sukyoung},
booktitle = {Proceedings of the 22nd ACM SIGSOFT International Symposium on Foundations of Software Engineering - FSE 2014},
doi = {10.1145/2635868.2635916},
isbn = {9781450330565},
keywords = {all or part of,bug detection,javascript,or,or hard copies of,permission to make digital,static analysis,this work for personal,web application},
pages = {507--517},
publisher = {ACM Press},
title = {{SAFEWAPI: web API misuse detector for web applications}},
url = {http://dl.acm.org/citation.cfm?doid=2635868.2635916},
year = {2014}
}
@article{Kuhn2016,
abstract = {Since the year 1977, role modeling has been continuously investigated as promising paradigm to model complex, dy-namic systems. However, this research had almost no in-fluence on the design of todays increasingly complex and context-sensitive software systems. The reason for that is twofold. First, most modeling languages focused either on the behavioral, relational or context-dependent nature of roles rather than combining them. Second, there is a lack of tool support for the design, validation, and generation of role-based software systems. In particular, there exists no graphical role modeling editor supporting the three natures as well as the various proposed constraints. To overcome this deficiency, we introduce the Full-fledged Role Modeling Editor (FRaMED), a graphical modeling editor embracing all natures of roles and modeling constraints featuring gen-erators for a formal representation and source code of a role-based programming language. To show its applicability for the development of role-based software systems, an example from the banking domain is employed.},
author = {K{\"{u}}hn, Thomas and Bierzynski, Kay and Richly, Sebastian and A{\ss}mannn, Uwe},
doi = {10.1145/2997364.2997371},
isbn = {9781450344470},
keywords = {I65 [Simulation and Modeling],Model Validation and Analysis—Role-based Modeling},
pages = {132--136},
title = {{FRaMED: Full-Fledge Role Modeling Editor (Tool Demo)}},
year = {2016}
}
@article{Hathhorn2012,
author = {Hathhorn, Chris and Becchi, Michela and Harrison, William L. and Procter, Adam},
doi = {10.4204/EPTCS.102.11},
issn = {2075-2180},
journal = {Electronic Proceedings in Theoretical Computer Science},
number = {Ssv},
pages = {115--124},
title = {{Formal Semantics of Heterogeneous CUDA-C: A Modular Approach with Applications}},
url = {http://arxiv.org/abs/1211.6193v1},
volume = {102},
year = {2012}
}
@article{Park2016a,
abstract = {Now that HTML5 technologies are everywhere from web services to various platforms, assuring quality of web ap-plications becomes very important. While web application developers use syntactic checkers and type-related bug de-tectors, extremely dynamic features and diverse execution environments of web applications make it particularly diffi-cult to statically analyze them leading to too many false pos-itives. Recently, researchers have developed static analyzers for JavaScript web applications addressing quirky JavaScript language semantics and browser environments, but they lack empirical studies on the practicality of such analyzers. In this paper, we collect 30 JavaScript web applications in the wild, analyze them using SAFE, the state-of-the-art JavaScript static analyzer with bug detection, and investi-gate false positives in the analysis results. After manually inspecting them, we classify 7 reasons that cause the false positives: W3C APIs, browser-specific APIs, JavaScript li-brary APIs, dynamic file loading, dynamic code generation, asynchronous calls, and others. Among them, we identify 4 cases which are the sources of false positives that we can practically reduce. Rather than striving for sound analysis with unrealistic assumptions, we choose to be intentionally unsound to analyze web applications in the real world with less false positives. Our evaluation shows that the approach effectively reduces false positives in statically analyzing web applications in the wild.},
author = {Park, Joonyoung and Lim, Inho and Ryu, Sukyoung},
doi = {10.1145/2889160.2889227},
isbn = {9781450342056},
journal = {Proceedings of the 38th International Conference on Software Engineering Companion - ICSE '16},
keywords = {JavaScript,Keywords Static analysis,false positives,web applications},
pages = {61--70},
title = {{Battles with false positives in static analysis of JavaScript web applications in the wild}},
url = {http://dl.acm.org/citation.cfm?doid=2889160.2889227},
year = {2016}
}
@article{Harris,
author = {Harris, Tim and Jones, Simon Peyton},
title = {{Lightweight Concurrency in GHC}}
}
@article{Midtgaard2012,
abstract = {We present a survey of control-flow analysis of functional programs, which has been the subject of extensive investigation throughout the past 30 years. Analyses of the control flow of functional programs have been formulated inmultiple settings and have led tomany different approximations, starting with the seminal works of Jones, Shivers, and Sestoft. In this paper, we survey control-flow analysis of functional programs by structuring the multitude of formulations and approximations and comparing them. Categories},
author = {Midtgaard, Jan},
doi = {10.1145/2187671.2187672},
isbn = {0360-0300},
issn = {03600300},
journal = {ACM Computing Surveys},
number = {3},
pages = {1--33},
title = {{Control-flow analysis of functional programs}},
volume = {44},
year = {2012}
}
@article{Turing1938,
abstract = {Turing, A. M. "On Computable Numbers with an Application to the Entscheidungsproblem." ,},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Turing, A. M.},
doi = {10.1112/plms/s2-43.6.544},
eprint = {arXiv:1011.1669v3},
isbn = {9780123869807},
issn = {1460244X},
journal = {Proceedings of the London Mathematical Society},
number = {1},
pages = {544--546},
pmid = {25246403},
title = {{On computable numbers, with an application to the entscheidungsproblem. a correction}},
volume = {s2-43},
year = {1938}
}
@article{Hobor2008,
author = {Hobor, Aquinas and Appel, Andrew W. and Nardelli, Francesco Zappa},
doi = {10.1007/978-3-540-78739-6_27},
isbn = {3540787380},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
number = {April},
pages = {353--367},
title = {{Oracle semantics for concurrent separation logic}},
volume = {4960 LNCS},
year = {2008}
}
@incollection{Graf1997,
author = {Graf, Susanne and Saidi, Hassen},
doi = {10.1007/3-540-63166-6_10},
pages = {72--83},
title = {{Construction of abstract state graphs with PVS}},
url = {http://link.springer.com/10.1007/3-540-63166-6{\_}10},
year = {1997}
}
@article{Myer1968,
abstract = {6. Conclusion A field-proven scheme for achieving reliable full-duplex transmission over noisy half-duplex telephone lines has been presented. The sensitivity of the algorithm and the difficulty of the problem have been illustrated by contrast- ing the algorithm with another, slightly },
author = {Myer, T. H. and Sutherland, I. E.},
doi = {10.1145/363347.363368},
isbn = {0001-0782},
issn = {00010782},
journal = {Communications of the ACM},
keywords = {and phrases,computer graphics,dfsplay programming,display channel,display genera-,display processor design,display system,displays,graphic terminal,graphical interaction,graphics,remote,tor},
number = {6},
pages = {410--414},
title = {{On the design of display processors}},
volume = {11},
year = {1968}
}
@article{Guha,
author = {Guha, Arjun and Saftoiu, Claudiu and Krishnamurthi, Shriram},
number = {section 4},
pages = {1--20},
title = {{Typing Local Control and State Using Flow Analysis}}
}
@book{Turon2014,
abstract = {Weak memory models formalize the inconsistent behaviors that one can expect to observe in multithreaded programs running on modern hardware. In so doing, however, they complicate the already-difficult task of reasoning about cor- rectness of concurrent code.Worse, they render impotent the sophisticated formal methods that have been developed to tame concurrency, which almost universally assume a strong (i.e., sequentially consistent) memory model. This paper introduces GPS, the first program logic to pro- vide a full-fledged suite of modern verification techniques— including ghost state, protocols, and separation logic—for high-level, structured reasoning about weak memory. We demonstrate the effectiveness of GPS by applying it to chal- lenging examples drawn from the Linux kernel as well as lock-free data structures. We also define the semantics of GPS and prove in Coq that it is sound with respect to the axiomatic C11 weak memory model. Categories},
author = {Turon, Aaron and Vafeiadis, Viktor and Dreyer, Derek},
booktitle = {ACM International Conference on Object Oriented Programming Systems Languages},
doi = {10.1145/2660193.2660243},
isbn = {978-1-4503-2585-1},
issn = {0362-1340},
keywords = {c/c++,concurrency,program logic,separation logic,weak memory models},
pages = {691--707},
title = {{GPS: Navigating Weak Memory with Ghosts, Protocols, and Separation}},
url = {http://doi.acm.org/10.1145/2660193.2660243},
year = {2014}
}
@article{Birkedal2015,
abstract = {This report documents the program and the outcomes of Dagstuhl Seminar 15191 “Composi- tional Verification Methods for Next-Generation Concurrency”. The seminar was successful and facilitated a stimulating interchange between the theory and practice of concurrent programming, and thereby laid the ground for the development of compositional verification methods that can scale to handle the realities of next-generation concurrency.},
author = {Birkedal, Lars and Dreyer, Derek and Gardner, Philippa},
doi = {10.4230/DagRep.5.5.1 1},
journal = {Dagshtul Reports},
keywords = {1,4230,5,and phrases verification of,automated ana-,concurrent programming,concurrent programs,dagrep,digital object identifier 10,logics,lysis,models},
number = {5},
pages = {1--23},
title = {{Compositional Verification Methods for Next-Generation Concurrency}},
volume = {5},
year = {2015}
}
@article{Zhang2014,
author = {Zhang, Lei},
title = {{DASE : Document-Assisted Symbolic Execution for Improving Automated Test Generation}},
year = {2014}
}
@article{Clarke2003,
abstract = {The state explosion problem remains a major hurdle in applying symbolic model checking to large hardware designs. State space abstraction, having been essential for verifying designs of industrial complexity, is typically a manual process, requiring considerable creativity and insight.In this article, we present an automatic iterative abstraction-refinement methodology that extends symbolic model checking. In our method, the initial abstract model is generated by an automatic analysis of the control structures in the program to be verified. Abstract models may admit erroneous (or "spurious") counterexamples. We devise new symbolic techniques that analyze such counterexamples and refine the abstract model correspondingly. We describe aSMV, a prototype implementation of our methodology in NuSMV. Practical experiments including a large Fujitsu IP core design with about 500 latches and 10000 lines of SMV code confirm the effectiveness of our approach.},
archivePrefix = {arXiv},
arxivId = {1301.4779},
author = {Clarke, Edmund and Grumberg, Orna and Jha, Somesh and Lu, Yuan and Veith, Helmut},
doi = {10.1145/876638.876643},
eprint = {1301.4779},
isbn = {9783540272311},
issn = {0004-5411},
journal = {J. Acm},
number = {5},
pages = {752--794},
title = {{Counter Example Guided Abstraction Refinement for Symbolic Model Checking (CEGAR)}},
url = {http://portal.acm.org/citation.cfm?id=876643{\%}5Cnhttp://portal.acm.org/ft{\_}gateway.cfm?id=876643{\&}type=pdf{\&}coll=GUIDE{\&}dl=GUIDE{\&}CFID=45815308{\&}CFTOKEN=61927748},
volume = {50},
year = {2003}
}
@article{Whisnant2012,
author = {Whisnant, David},
journal = {Provided for Course Without Origin},
title = {{Relational Database Concepts for Beginners}},
year = {2012}
}
@article{Harris2008,
abstract = {Writing concurrent programs is notoriously difficult, and is of increasing practical importance. A particular source of concern is that even correctly-implemented concurrency abstractions cannot be composed together to form larger abstractions. In this paper we present a new concurrency model, based on transactional memory, that offers far richer composition. All the usual benefits of transactional memory are present (e.g. freedom from deadlock), but in addition we describe newmodular forms of blocking and choice that have been inaccessible in earlier work.},
author = {Harris, Tim and Marlow, Simon and Jones, Simon Peyton and Herlihy, Maurice},
doi = {10.1145/1378704.1378725},
isbn = {1595930809},
issn = {00010782},
journal = {Communications of the ACM},
keywords = {algorithms,languages},
number = {8},
pages = {91},
title = {{Composable memory transactions}},
volume = {51},
year = {2008}
}
@inproceedings{Gu2015,
address = {New York, New York, USA},
author = {Gu, Ronghui and Koenig, J{\'{e}}r{\'{e}}mie and Ramananandro, Tahina and Shao, Zhong and Wu, Xiongnan (Newman) and Weng, Shu-Chun and Zhang, Haozhong and Guo, Yu},
booktitle = {Proceedings of the 42nd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages - POPL '15},
doi = {10.1145/2676726.2676975},
isbn = {9781450333009},
issn = {15232867},
keywords = {abstraction layer,certified compilers,certified os kernels,deep specification,modularity,program verification},
pages = {595--608},
publisher = {ACM Press},
title = {{Deep Specifications and Certified Abstraction Layers}},
url = {http://doi.acm.org/10.1145/2676726.2676975 http://dl.acm.org/citation.cfm?doid=2676726.2676975},
year = {2015}
}
@article{Chatterjee2016a,
archivePrefix = {arXiv},
arxivId = {1611.01063},
author = {Chatterjee, Krishnendu and Novotn{\'{y}}, Petr and {\v{Z}}ikeli{\'{c}}, Đorđe},
eprint = {1611.01063},
keywords = {martingales,probabilistic programs,termination},
title = {{Stochastic Invariants for Probabilistic Termination}},
year = {2016}
}
@article{Epstein2012,
abstract = {We present Cloud Haskell, a domain-specific language for developing programs for a distributed computing environment. Implemented as a shallow embedding in Haskell, it provides a message-passing communication model, inspired by Erlang, without introducing incompatibility wit Haskell's established shared-memory concurrency. A key contribution is a method for serializing function closures for transmission across the network. Cloud Haskell has been implemented; we present example code and some preliminary performance measurements.},
author = {Epstein, Jeff and Black, Andrew P. and Peyton-Jones, Simon},
doi = {10.1145/2096148.2034690},
isbn = {9781450308601},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {erlang,haskell,message-passing},
number = {12},
pages = {118},
title = {{Towards Haskell in the cloud}},
volume = {46},
year = {2012}
}
@article{Amin2016,
abstract = {Focusing on path-dependent types, the paper develops foundations for Scala from first principles. Starting from a simple calculus D-{\textless}: of dependent functions, it adds records, intersections and recursion to arrive at DOT, a calculus for dependent object types. The paper shows an encoding of System F with subtyping in D-{\textless}: and demonstrates the expressiveness of DOT by modeling a range of Scala constructs in it.},
author = {Amin, Nada and Gr{\"{u}}tter, Samuel and Odersky, Martin and Rompf, Tiark and Stucki, Sandro},
doi = {10.1007/978-3-319-30936-1_14},
isbn = {9783319309354},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
keywords = {Calculus,Dependent types,Scala},
pages = {249--272},
title = {{The essence of dependent object types}},
volume = {9600},
year = {2016}
}
@article{,
keywords = {assertion,computational effects,contracts,dy-,namic verification,refinement types},
pages = {1--14},
title = {{Stateful Manifest Contracts}},
year = {2016}
}
@article{Willsey,
author = {Willsey, Max and Pfenning, Frank},
pages = {1--10},
title = {{Design and Implementation of Concurrent C0}}
}
@article{Rolandi2011,
abstract = {Figures are an essential part of the scientific paper. Scientists often learn how to create figures by trial and error. A scientist, a graphic designer, and a cognitive psychologist have teamed up to write a brief guide to ease this process. This guide, aimed at researchers in scientific fields, provides an easy-to-follow set of instructions to design effective figures.},
archivePrefix = {arXiv},
arxivId = {cs/9605103},
author = {Rolandi, Marco and Cheng, Karen and P{\'{e}}rez-Kriz, Sarah},
doi = {10.1002/adma.201102518},
eprint = {9605103},
isbn = {1521-4095},
issn = {09359648},
journal = {Advanced Materials},
number = {38},
pages = {4343--4346},
pmid = {21960472},
primaryClass = {cs},
title = {{A brief guide to designing effective figures for the scientific paper}},
volume = {23},
year = {2011}
}
@article{Chlipala,
author = {Chlipala, Adam},
title = {{A Program Optimization for Automatic Database Result Caching}}
}
@article{Paykin2016,
author = {Paykin, Jennifer and Rand, Robert and Zdancewic, Steve},
keywords = {a circuit is just,a sequence of instructions,although circuits manipulate quantum,classical data,data,denotational semantics,describ-,linear types,quantum circuits,quantum programming languages,they themselves are},
pages = {1--13},
title = {{Q WIRE : A QRAM-Inspired Quantum Circuit Language}},
year = {2016}
}
@article{Dinsdale-Young2010,
abstract = {Abstraction is key to understanding and reasoning about large computer systems. Abstraction is simple to achieve if the relevant data structures are disjoint, but rather difficult when they are partially shared, as is often the case for concurrent modules. We present a program logic for reasoning abstractly about data structures that provides a fiction of disjointness and permits compositional reasoning. The internal details of a module are completely hidden from the client by concurrent abstract predicates. We reason about a module's implementation using separation logic with permissions, and provide abstract specifications for use by client programs using concurrent abstract predicates. We illustrate our abstract reasoning by building two implementations of a lock module on top of hardware instructions, and two implementations of a concurrent set module on top of the lock module.},
author = {Dinsdale-Young, Thomas and Dodds, Mike and Gardner, Philippa and Parkinson, Matthew J. and Vafeiadis, Viktor},
doi = {10.1007/978-3-642-14107-2_24},
isbn = {3642141064},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {504--528},
title = {{Concurrent abstract predicates}},
volume = {6183 LNCS},
year = {2010}
}
@article{Madhavan2016,
author = {Madhavan, Ravichandhran},
pages = {1--19},
title = {{Resource Verification for Higher- order Functions with Memoization}},
year = {2016}
}
@article{Barthe,
author = {Barthe, Gilles and Gaboardi, Marco and Hoffmann, Jan},
keywords = {complexity analysis,relational reasoning,type and},
title = {{Relational Cost Analysis}}
}
@article{Odersky2016,
author = {Odersky, Martin and Martres, Guillaume and Petrashko, Dmitry},
doi = {10.1145/2998392.2998400},
isbn = {9781450346481},
journal = {Scala},
keywords = {dependent,dot,dotty,higher-kinded,higher-order genericity,scala,type constructor polymorphism,types},
pages = {51--60},
title = {{Implementing Higher-Kinded Types in Dotty}},
year = {2016}
}
@book{Assaf2016,
abstract = {We show how static analysis for secure information flow can be expressed and proved correct entirely within the framework of abstract interpretation. The key idea is to define a Galois connection that directly approximates the hyperproperty of interest. To enable use of such Galois connections, we introduce a fixpoint characterisation of hypercollecting semantics, i.e. a "set of set" transformer. This makes it possible to systematically derive static analyses for hyperproperties entirely within the calculational framework of abstract interpretation. We evaluate this technique by deriving example static analyses. For qualitative information flow, we derive a dependence analysis similar to the logic of Amtoft and Banerjee (SAS'04) and the type system of Hunt and Sands (POPL'06). For quantitative information flow, we derive a novel cardinality analysis that bounds the leakage conveyed by a program instead of simply deciding whether it exists. This encompasses problems that are hypersafety but not k-safety. We put the framework to use and introduce variations that achieve precision rivalling the most recent and precise static analyses for information flow.},
archivePrefix = {arXiv},
arxivId = {1608.01654},
author = {Assaf, Mounir and Naumann, David A. and Signoles, Julien and Totel, {\'{E}}ric and Tronel, Fr{\'{e}}d{\'{e}}ric},
eprint = {1608.01654},
isbn = {9781450346603},
keywords = {abstract interpretation,information flow,static analysis},
number = {2010},
title = {{Hypercollecting Semantics and its Application to Static Analysis of Information Flow}},
url = {http://arxiv.org/abs/1608.01654},
year = {2016}
}
@article{Chang2017,
author = {Chang, Stephen and Greenman, Ben},
isbn = {9781450346603},
keywords = {macros,type systems,typed embedded dsls},
title = {{Type Systems as Macros}},
year = {2017}
}
@article{Jafery2016,
abstract = {A long-standing shortcoming of statically typed functional languages is that type checking does not rule out pattern-matching failures (run-time match exceptions). Refinement types distinguish different values of datatypes; if a program annotated with refinements passes type checking, pattern-matching failures become impossible. Unfortunately, refinement is a monolithic property of a type, exacerbating the difficulty of adding refinement types to nontrivial programs. Gradual typing has explored how to incrementally move between static typing and dynamic typing. We develop a type system of gradual sums that combines refinement with imprecision. Then, we develop a bidirectional version of the type system, which rules out excessive imprecision, and give a type-directed translation to a target language with explicit casts. We prove that the static sublanguage cannot have match failures, that a well-typed program remains well-typed if its type annotations are made less precise, and that making annotations less precise causes target programs to fail later. Several of these results correspond to criteria for gradual typing given by Siek et al. (2015).},
archivePrefix = {arXiv},
arxivId = {1611.02392},
author = {Jafery, Khurram A. and Dunfield, Joshua},
eprint = {1611.02392},
keywords = {gradual typing,refinement types},
pages = {1--60},
title = {{Sums of Uncertainty: Refinements Go Gradual}},
url = {http://arxiv.org/abs/1611.02392},
year = {2016}
}
@article{Leijen2016,
author = {Leijen, Daan},
number = {August},
title = {{Type Directed Compilation of Row-typed Algebraic Effects}},
year = {2016}
}
@article{Krogh-jespersen,
author = {Krogh-jespersen, Morten and Svendsen, Kasper and Birkedal, Lars},
keywords = {automatic parallelisation,logical rela-,program transformation,separation logic,tions,type-and-effect system},
title = {{A Relational Model of Types-and-Effects in Higher-Order Concurrent Separation Logic}}
}
@article{Tofte1994,
abstract = {We present a translation scheme for the polymorphically typed call-by-value {\&}lgr;-calculus. All runtime values, including function closures, are put into regions. The store consists of a stack of regions. Region inference and effect inference are used to infer where regions can be allocated and de-allocated. Recursive functions are handled using a limited form of polymorphic recursion. The translation is proved correct with respect to a store semantics, which models as a region-based run-time system. Experimental results suggest that regions tend to be small, that region allocation is frequent and that overall memory demands are usually modest, even without garbage collection.},
author = {Tofte, Mads and Talpin, Jean-Pierre},
doi = {10.1145/174675.177855},
isbn = {0897916360},
issn = {07308566},
journal = {Proceedings of the 21st ACM SIGPLAN-SIGACT symposium on Principles of programming languages  - POPL '94},
pages = {188--201},
title = {{Implementation of the typed call-by-value $\lambda$-calculus using a stack of regions}},
url = {http://portal.acm.org/citation.cfm?doid=174675.177855},
year = {1994}
}
@article{Grigore2016a,
abstract = {This note proves that nominal subtyping with contravariance is undecidable even in the absence of multiple instantiation inheritance, thus solving an open problem posed by Kennedy and Pierce in 2007.},
archivePrefix = {arXiv},
arxivId = {1605.05274},
author = {Grigore, Radu},
eprint = {1605.05274},
keywords = {decidability,java,subtype checking},
number = {0},
pages = {1--6},
title = {{Java Generics are Turing Complete}},
url = {http://arxiv.org/abs/1605.05274},
year = {2016}
}
@article{Moerman2016,
abstract = {We present an Angluin-style algorithm to learn nominal automata, which are acceptors of languages over infinite (structured) alpha-bets. The abstract approach we take allows us to seamlessly extend known variations of the algorithm to this new setting. In particu-lar we can learn a subclass of nominal non-deterministic automata. An implementation using a recently developed Haskell library for nominal computation is provided for preliminary experiments.},
archivePrefix = {arXiv},
arxivId = {arXiv:1607.06268v1},
author = {Moerman, Joshua and Sammartino, Matteo and Silva, Alexandra and Klin, Bartek and Szynwelski, Micha{\l}},
doi = {10.1145/3009837.3009879},
eprint = {arXiv:1607.06268v1},
keywords = {()},
title = {{Learning nominal automata}},
year = {2016}
}
@article{Lago2016,
abstract = {We introduce a Geometry of Interaction model for higher-order quantum computation, and prove its adequacy for a full quantum programming language in which entanglement, duplication, and recursion are all available. Our model comes with a multi-token machine, a proof net system, and a PCF-style language. The approach we develop is not specific to quantum computation, and our model is an instance of a new framework whose main feature is the ability to model commutative effects in a parallel setting. Being based on a multi-token machine equipped with a memory, it has a concrete nature which makes it well suited for building low-level operational descriptions of higher-order languages.},
archivePrefix = {arXiv},
arxivId = {1610.09629},
author = {Lago, Ugo Dal and Faggian, Claudia and Valiron, Benoit and Yoshimizu, Akira},
eprint = {1610.09629},
keywords = {geometry of interaction,memory structure,quantum},
title = {{The Geometry of Parallelism. Classical, Probabilistic, and Quantum Effects}},
url = {http://arxiv.org/abs/1610.09629},
year = {2016}
}
@article{Kiselyova,
author = {Kiselyov, Oleg and Palladinos, Nick and Athens, Nessos I T S A},
keywords = {code generation,multi-stage programming,optimiza-,stream fusion,streams,tion},
title = {{Stream Fusion , to Completeness}}
}
@article{Hoffmann,
author = {Hoffmann, Jan},
keywords = {amortized analysis,lp solving,resource bound analysis,static analysis,tems,type inference,type sys-},
title = {{Towards Automatic Resource Bound Analysis for OCaml}}
}
@article{Ilik2015,
abstract = {Lambda calculi with algebraic data types lie at the core of functional programming languages and proof assistants, but conceal at least two fundamental theoretical problems already in the presence of the simplest non-trivial data type, the sum type. First, we do not know of an explicit and implemented algorithm for deciding the beta-eta-equality of terms---and this in spite of the first decidability results proven two decades ago. Second, it is not clear how to decide when two types are essentially the same, i.e. isomorphic, in spite of the meta-theoretic results on decidability of the isomorphism. In this paper, we present the exp-log normal form of types---derived from the representation of exponential polynomials via the unary exponential and logarithmic functions---that any type built from arrows, products, and sums, can be isomorphically mapped to. The type normal form can be used as a simple heuristic for deciding type isomorphism, thanks to the fact that it is a systematic application of the high-school identities. We then show that the type normal form allows to reduce the standard beta-eta equational theory of the lambda calculus to a specialized version of itself, while preserving the completeness of equality on terms. We end by describing an alternative representation of normal terms of the lambda calculus with sums, together with a Coq-implemented converter into/from our new term calculus. The difference with the only other previously implemented heuristic for deciding interesting instances of eta-equality by Balat, Di Cosmo, and Fiore, is that we exploit the type information of terms substantially and this often allows us to obtain a canonical representation of terms without performing sophisticated term analyses.},
archivePrefix = {arXiv},
arxivId = {1502.04634},
author = {Ilik, Danko},
eprint = {1502.04634},
keywords = {eta equality,isomorphism,normal term,normal type,sum type,type,type-directed partial evaluation},
pages = {1--13},
title = {{The exp-log normal form of types}},
url = {http://arxiv.org/abs/1502.04634},
year = {2015}
}
@article{Types,
author = {Types, J C Reynolds.},
journal = {In R.E. Mason, editor, IFIP '},
number = {{\{}523}},
title = {{Abstraction and Parametric Polymorphism}},
volume = {83pages513}
}
@article{Raina2016,
author = {Raina, Sagar and Kaza, Siddharth and Taylor, Blair},
doi = {10.1145/2839509.2844609},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {auto-grading,buffer overflow,cs0,cs1,cs2,input,instant-feedback,integer overflow,interactive learning,learning sciences,modules,security injections,validation},
pages = {144--149},
title = {{Security Injections 2.0: Increasing Ability to Apply Secure Coding Knowledge using Segmented and Interactive Modules in CS0}},
year = {2016}
}
@article{Amin2016a,
author = {Amin, Nada},
pages = {1--15},
title = {{LMS-Verify : Abstraction Without Regret for Verified Systems Programming}},
year = {2016}
}
@article{Kopczy2016,
author = {Kopczy, Eryk and Toru, Szymon},
title = {syntax and semantics},
year = {2016}
}
@article{Flur,
author = {Flur, Shaked and Gray, Kathryn E and Sezgin, Ali and Sewell, Peter},
keywords = {28,29,30,31,32,33,34,35,36,37,38,39,40,among this has established,and arm,ibm,isa,mixed-size,power,recent work,relaxed memory models,semantic models for x86,semantics,that are validated},
title = {{Mixed-size Concurrency : ARM , POWER , C / C ++ 11 , and SC}}
}
@article{Zhang2016a,
abstract = {The growing populariy and adoption of differential privacy in academic and industrial settings has resulted in the development of increasingly sophisticated algorithms for releasing information while preserving privacy. Accompanying this phenomenon is the natural rise in the development and publication of incorrect algorithms, thus demonstrating the necessity of formal verification tools. However, existing formal methods for differential privacy face a dilemma: methods based on customized logics can verify sophisticated algorithms but comes with a steep learning curve and significant annotation burden on the programmers; while existing type systems lacks expressive power for some sophisticated algorithms. In this paper, we present AutoPriv, a simple imperative language that strikes a better balance between expressive power and usefulness. The core of AutoPriv is a novel relational type system that separates relational reasoning from privacy budget calculations. With dependent types, the type system is powerful enough to verify sophisticated algorithms where the composition theorem falls short. In addition, the inference engine of AutoPriv infers most of the proof details, and even searches for the proof with minimal privacy cost when multiple proofs exist. We show that AutoPriv verifies sophisticated algorithms with little manual effort.},
archivePrefix = {arXiv},
arxivId = {1607.08228},
author = {Zhang, Danfeng and Kifer, Daniel},
eprint = {1607.08228},
pages = {1--15},
title = {{AutoPriv: Automating Differential Privacy Proofs}},
url = {http://arxiv.org/abs/1607.08228},
year = {2016}
}
@article{Brookes2006,
author = {Brookes, Stephen},
number = {March},
pages = {1--80},
title = {{A semantics for concurrent permission logic}},
year = {2006}
}
@article{Dolan2016,
abstract = {We present a type system combining subtyping and ML-style para-metric polymorphism. Unlike previous work, our system supports type inference and has compact principal types. We demonstrate this system in the minimal language MLsub, which types a strict superset of core ML programs. This is made possible by keeping a strict separation between the types used to describe inputs and those used to describe out-puts, and extending the classical unification algorithm to handle subtyping constraints between these input and output types. Prin-cipal types are kept compact by type simplification, which exploits deep connections between subtyping and the algebra of regular lan-guages. An implementation is available online.},
author = {Dolan, Stephen and Mycroft, Alan},
keywords = {algebra,at no,but this,constraint does not arise,d be acceptable to,from the behaviour of,in that it demands,pass as the default,polymorphism,strange,subtyping,that whatever we,the predicate p,the program,this scheme is quite,type inference},
title = {{Polymorphism, subtyping and type inference in MLsub}},
year = {2016}
}
@article{Bouajjani2016,
abstract = {Causal consistency is one of the most adopted consistency criteria for distributed implementations of data structures. It ensures that operations are executed at all sites according to their causal precedence. We address the issue of verifying automatically whether the executions of an implementation of a data structure are causally consistent. We consider two problems: (1) checking whether one single execution is causally consistent, which is relevant for developing testing and bug finding algorithms, and (2) verifying whether all the executions of an implementation are causally consistent. We show that the first problem is NP-complete. This holds even for the read-write memory abstraction, which is a building block of many modern distributed systems. Indeed, such systems often store data in key-value stores, which are instances of the read-write memory abstraction. Moreover, we prove that, surprisingly, the second problem is undecidable, and again this holds even for the read-write memory abstraction. However, we show that for the read-write memory abstraction, these negative results can be circumvented if the implementations are data independent, i.e., their behaviors do not depend on the data values that are written or read at each moment, which is a realistic assumption.},
archivePrefix = {arXiv},
arxivId = {1611.00580},
author = {Bouajjani, Ahmed and Enea, Constantin and Guerraoui, Rachid and Hamza, Jad},
eprint = {1611.00580},
keywords = {causal consistency,distributed systems,ing,model check-,static program analysis},
title = {{On Verifying Causal Consistency}},
url = {http://arxiv.org/abs/1611.00580},
year = {2016}
}
@article{Wickerson2016,
author = {Wickerson, John and Batty, Mark and Sorensen, Tyler and Constantinides, George A},
keywords = {c,constraint solving,currency,gpu,graphics processor,model checking,opencl,program synthesis,shared memory con-,weak memory models},
title = {{Automatically Comparing Memory Consistency Models}},
year = {2016}
}
@article{Antoni2017,
author = {Antoni, Loris D},
isbn = {9781450346603},
keywords = {2 l - str,29,30,able extension of m,c,f,finite strings,for describing finite sequences,in this paper,ing satisfiability,m 2 l -,mso logic,over arbitrary domains,str as a decid-,sws1s,symbolic automata,we present s -,ws 1 s for},
title = {{Monadic Second-Order Logic on Finite Sequences}},
year = {2017}
}
@article{Kumar2016,
author = {Kumar, Ananya and Blelloch, Guy E and Harper, Robert},
keywords = {arrays,concurrency,cost semantics,functional data,parallel,persistence,structures},
pages = {1--13},
title = {{Parallel Functional Arrays}},
year = {2016}
}
@article{,
title = {{A Semantic Account of Metric Preservation}},
year = {2016}
}
@article{Tafliovich2016,
author = {Tafliovich, Anya and Petersen, Andrew and Campbell, Jennifer},
doi = {10.1145/2839509.2844647},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {evaluation,graduate software development project,motivating students,student teamwork,under-},
pages = {181--186},
title = {{Evaluating Student Teams : Do Educators Know What Students Think ?}},
year = {2016}
}
@article{Nandi2016,
abstract = {Hackathons are fast-paced events where competitors work in teams to go from an idea to working software or hardware within a single day or a weekend and demonstrate their cre- ation to a live audience of peers. Due to the “fun” and informal nature of such events, they make for excellent in- formal learning platforms that attract a diverse spectrum of students, especially those typically uninterested in tradi- tional classroom settings. In this paper, we investigate the informal learning aspects of Ohio State's annual hackathon events over the past two years, with over 100 student par- ticipants in 2013 and over 200 student participants in 2014. Despite the competitive nature of such events, we observed a significant amount of peer-learning – students teaching each other how to solve specific challenges and learn new skills. The events featured mentors from both the university and industry, who provided round-the-clock hands-on support, troubleshooting and advice. Due to the gamified format of the events, students were heavily motivated to learn new skills due to practical applicability and peer effects, rather than merely academic metrics. Some teams continued their hacks as long-term projects, while others formed new stu- dent groups to host lectures and practice building prototypes on a regular basis. Using a combined analysis of post-event surveys, student academic records and source-code commit log data from the event, we share insights, demographics, statistics and anecdotes from hosting these hackathons.},
author = {Nandi, Arnab and Mandernach, Meris},
doi = {10.1145/2839509.2844590},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
pages = {346--351},
title = {{Hackathons as an informal learning platform}},
url = {http://doi.org/10.1145/2839509.2844590},
year = {2016}
}
@article{Hu2016,
author = {Hu, Chenglie},
doi = {10.1145/2839509.2844563},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {design education,software design,teaching of software design},
pages = {199--204},
title = {{Can Students Design Software ? The Answer Is More Complex Than You Think 2 . CAN SOPHMORE STUDENTS DESIGN}},
year = {2016}
}
@article{Tamer2016,
author = {Tamer, Bur{\c{c}}in and Stout, Jane G},
doi = {10.1145/2839509.2844573},
isbn = {978-1-4503-3685-7},
journal = {Proceedings of the 47th ACM Technical Symposium on Computing Science Education},
keywords = {career,professor,undergraduate education,undergraduate research experiences,underrepresented students},
pages = {114--119},
title = {{Understanding How Research Experiences for Undergraduate Students May Foster Diversity in the Professorate}},
url = {http://doi.acm.org/10.1145/2839509.2844573},
year = {2016}
}
@article{Podelski2005,
abstract = {fair termination; german re-; in part by the; liveness; software model checking; this research was supported; transition predicate abstraction},
author = {Podelski, Andreas and Rybalchenko, Andrey},
doi = {10.1145/1047659.1040317},
isbn = {158113830X},
issn = {03621340},
journal = {Proc. POPL},
number = {1},
pages = {132--144},
title = {{Transition predicate abstraction and fair termination}},
url = {http://portal.acm.org/citation.cfm?doid=1047659.1040317},
volume = {40},
year = {2005}
}
@article{Shankar2009,
abstract = {Automated deduction uses computation to perform symbolic logical reasoning. It has been a core technology for program verification from the very beginning. Satisfiability solvers for propositional and first-order logic significantly automate the task of deductive program verification. We introduce some of the basic deduction techniques used in software and hardware verification and outline the theoretical and engineering issues in building deductive verification tools. Beyond verification, deduction techniques can also be used to support a variety of applications including planning, program optimization, and program synthesis},
author = {Shankar, Natarajan},
doi = {10.1145/1592434.1592437},
isbn = {0360-0300},
issn = {03600300},
journal = {ACM Computing Surveys},
number = {4},
pages = {1--56},
title = {{Automated deduction for verification}},
volume = {41},
year = {2009}
}
@article{Backman2016,
author = {Backman, Nathan},
doi = {10.1145/2839509.2844648},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {application security,capture the flag,hacking},
pages = {603--608},
title = {{Facilitating a Battle Between Hackers : Computer Security Outside of the Classroom}},
year = {2016}
}
@article{Becker2016,
abstract = {One of the many challenges novice programmers face from the time they write their first program is inadequate com-piler error messages. These messages report details on er-rors the programmer has made and are the only feedback the programmer gets from the compiler. For students they play a particularly essential role as students often have little experience to draw upon, leaving compiler error messages as their primary guidance on error correction. However these messages are frequently inadequate, presenting a barrier to progress and are often a source of discouragement. We have designed and implemented an editor that provides enhanced compiler error messages and conducted a controlled empir-ical study with CS1 students learning Java. We find a re-duced frequency of overall errors and errors per student. We also identify eight frequent compiler error messages for which enhancement has a statistically significant effect. Fi-nally we find a reduced number of repeated errors. These findings indicate fewer students struggling with compiler er-ror messages.},
author = {Becker, Brett A},
doi = {10.1145/2839509.2844584},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {com-,cs1,debugging,error messages,errors,feedback,java,novice,piler errors,programming,syntax errors},
pages = {126--131},
title = {{An Effective Approach to Enhancing Compiler Error Messages}},
year = {2016}
}
@article{Dragon2016,
author = {Dragon, Toby and Dickson, Paul E},
doi = {10.1145/2839509.2844607},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {computer science education,memory diagram,pedagogy,program memory traces,tracing},
pages = {546--551},
title = {{Memory Diagrams : A Consistant Approach Across Concepts and Languages}},
year = {2016}
}
@article{Kawash2016,
author = {Kawash, Jalal and Kuipers, Andrew and Manzara, Leonard and Collier, Robert},
doi = {http://dx.doi.org/10.1145/2839509.2844552},
isbn = {9781450336857},
keywords = {assembly language,computer science education,hardware,programming,raspberry pi,software interface},
pages = {498--503},
title = {{Undergraduate Assembly Language Instruction Sweetened with the Raspberry Pi}},
year = {2016}
}
@article{Boese2016,
abstract = {Computer Science education has to change - the students are demanding a new paradigm in this Just Google It era [3][8]. This paper discusses what Just in Time Learning is, how it is more effective than the traditional educational process, and how to change education to embrace the Internet through incorporating the Just-In-Time Learning model. There are five parts to incorporating the Just-In-Time Learning model: one – recognizing that the textbook is dead, as students Just Google It, two – help students learn how to vet the information they find online, three – incorporate real-world problems and support creative student ideas, four – modify the classroom to include an active-learning environment to fully support Just-In-Time Learning, and five – the role of the teacher is now as a tutor, helping students learn and learn how to learn. By incorporating these five parts of the Just- In-Time Learning model, there is no longer the concept of cheating, and students are learning the core necessary skills: problem-solving, critical thinking, good decision making, self- learning, and effective communication.},
author = {Boese, Elizabeth},
doi = {10.1145/2839509.2844583},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {active-learning,computer science education,flipped,in-case learning model,just google it,just-,just-in-time learning model},
pages = {341--345},
title = {{Just-In-Time Learning for the Just Google It Era}},
year = {2016}
}
@article{Bloomfield2016,
author = {Bloomfield, Aaron},
doi = {10.1145/2839509.2844632},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {icpc,programming contest,strategy},
pages = {609--614},
title = {{A Programming Contest Strategy Guide}},
year = {2016}
}
@article{Adams2016,
author = {Adams, Joel C and Crain, Patrick A and Dilley, Christopher P and Unger, Javin B and Stel, Mark B Vander},
doi = {10.1145/2839509.2844557},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {graphics,image processing,integration,library,loop,mandelbrot},
pages = {473--478},
title = {{Seeing Is Believing : Helping Students Visualize Multithreaded Behavior}},
year = {2016}
}
@article{Clarke-midura2016,
author = {Clarke-midura, Jody and Hill, Old Main and Hill, Old Main and Close, Kevin and Hill, Old Main},
doi = {10.1145/2839509.2844581},
isbn = {9781450336857},
journal = {Proceedings of the 47th ACM Technical Symposium on Computer Science Education (SIGCSE '16)},
keywords = {app inventor,girls,near peer mentoring,self-efficacy},
pages = {297--302},
title = {{Investigating the Role of Being a Mentor as a Way of Increasing Interest in CS}},
year = {2016}
}
@article{Feng2007a,
abstract = {We study the relationship between Concurrent Separation Logic (CSL) and the assume-guarantee (A-G) method (a.k.a. rely-guarantee method). We show in three steps that CSL can be treated as a specialization of the A-G method for well-synchronized concurrent programs. First, we present an A-G based program logic for a low-level language with built-in locking primitives. Then we extend the program logic with explicit separation of “private data” and “shared data”, which provides better memory modularity. Finally, we show that CSL (adapted for the low-level language) can be viewed as a specialization of the extended A-G logic by enforcing the invariant that “shared resources are well-formed outside of critical regions”. This work can also be viewed as a different approach (from Brookes') to proving the soundness of CSL: our CSL inference rules are proved as lemmas in the A-G based logic, whose soundness is established following the syntactic approach to proving soundness of type systems. },
author = {Feng, Xinyu and Ferreira, Rodrigo and Shao, Zhong},
doi = {10.1007/978-3-540-71316-6_13},
isbn = {354071314X},
issn = {03029743},
journal = {Esop},
pages = {173--188},
title = {{On the Relationship between Concurrent Separation Logic and Assume-Guarantee Reasoning}},
year = {2007}
}
@article{Loginov2006,
author = {Loginov, Alexey and Reps, Thomas W and Sagiv, Mooly},
isbn = {3540377565},
issn = {16113349},
journal = {13th Int.$\backslash$ Static Analysis Symposium (SAS)},
pages = {261--279},
title = {{Automated Verification of the {\{}D{\}}eutsch-{\{}S{\}}chorr-{\{}W{\}}aite Tree-Traversal Algorithm}},
volume = {4134},
year = {2006}
}
@article{Matsakis2014,
author = {Matsakis, Nicholas D and {Klock II}, Felix S},
doi = {10.1145/2663171.2663188},
isbn = {978-1-4503-3217-0},
issn = {1094-3641},
journal = {Proceedings of the 2014 ACM SIGAda Annual Conference on High Integrity Language Technology},
keywords = {affine type systems,memory management,rust,systems programming},
pages = {103--104},
title = {{The Rust Language}},
url = {http://doi.acm.org/10.1145/2663171.2663188},
year = {2014}
}
@article{Tassarottia,
abstract = {Read-Copy-Update (RCU) is a technique for letting multiple readers safely access a data structure while a writer concurrently modifies it. It is used heavily in the Linux kernel in situations where fast reads are important and writes are infrequent. Optimized implementations rely only on the weaker memory orderings provided by modern hard-ware, avoiding the need for expensive synchronization instructions (such as memory barriers) as much as possible. Using GPS, a recently developed program logic for the C/C++11 memory model, we verify an implementation of RCU for a singly-linked list assuming " release-acquire " semantics. Although release-acquire synchronization is stronger than what is required by real RCU implementations, it is nonetheless significantly weaker than the assumption of sequential consistency made in prior work on RCU verification. Ours is the first formal proof of correctness for an implementation of RCU under a weak memory model.},
author = {Tassarotti, Joseph and Dreyer, Derek and Vafeiadis, Viktor},
doi = {10.1145/2737924.2737992},
isbn = {9781450334686},
keywords = {C/C++,F31 [Logics and Mean-ings of Programs],Formal Definitions and Theory,Program logic,RCU,Separation logic,Theory,Verification Keywords Concurrency,Weak memory models},
pages = {1--11},
title = {{Verifying Read-Copy-Update in a Logic for Weak Memory}}
}
@article{Haller,
author = {Haller, Philipp and Geries, Simon and Eichberg, Michael and Salvaneschi, Guido},
isbn = {9781450346481},
keywords = {asynchronous programming,concurrent pro-,deterministic concurrency,gramming,scala,static analysis},
pages = {11--20},
title = {{Reactive Async: Expressive Deterministic Concurrency}}
}
@article{Tofte1997,
abstract = {This paper describes a memory management discipline for programs that perform dynamic memory allocation and de-allocation. At runtime, all values are put into regions. The store consists of a stack of regions. All points of region allocation and deallocation are inferred automatically, using a type and effect based program analysis. The scheme does not assume the presence of a garbage collector. The scheme was first presented by Tofte and Talpin (1994); subsequently, it has been tested in The ML Kit with Regions, a region-based, garbage-collection free implementation of the Standard ML Core language, which includes recursive datatypes, higher-order functions and updatable references (Birkedal et al. 96, Elsman and Hallenberg 95). This paper defines a region-based dynamic semantics for a skeletal programming language extracted from Standard ML. We present the inference system which specifies where regions can be allocated and de-allocated and a detailed proof that the system is sound wi...},
author = {Tofte, M},
doi = {10.1006/inco.1996.2613},
issn = {08905401},
journal = {Information and Computation},
number = {2},
pages = {109--176},
title = {{Region-Based Memory Management}},
url = {http://linkinghub.elsevier.com/retrieve/doi/10.1006/inco.1996.2613},
volume = {132},
year = {1997}
}
@inproceedings{Doeraene2016,
address = {New York, New York, USA},
author = {Doeraene, S{\'{e}}bastien and Schlatter, Tobias and Stucki, Nicolas},
booktitle = {Proceedings of the 2016 7th ACM SIGPLAN Symposium on Scala - SCALA 2016},
doi = {10.1145/2998392.2998404},
isbn = {9781450346481},
pages = {85--94},
publisher = {ACM Press},
title = {{Semantics-driven interoperability between Scala.js and JavaScript}},
url = {http://dl.acm.org/citation.cfm?doid=2998392.2998404},
volume = {0},
year = {2016}
}
@article{Reps2004,
author = {Reps, Thomas W and Reps, Thomas W and Sagiv, Shmuel and Sagiv, Shmuel and Yorsh, Greta and Yorsh, Greta},
isbn = {9783540208037},
issn = {16113349},
journal = {Vmcai},
pages = {252--266},
title = {{Symbolic Implementation of the Best Transformer}},
year = {2004}
}
@article{Terragni2015,
abstract = {Concurrent programs proliferate as multi-core technologies advance. The regression testing of concurrent programs often requires running a failing test for weeks before catching a faulty interleaving, due to the myriad of possible interleavings of memory accesses arising from concurrent program executions. As a result, the conventional approach that selects a sub-set of test cases for regression testing without considering interleavings is insufficient. In this paper we present RECONTEST to address the problem by selecting the new interleavings that arise due to code changes. These interleavings must be explored in order to uncover regression bugs. RECONTEST efficiently selects new interleavings by first identifying shared memory accesses that are affected by the changes, and then exploring only those problematic interleavings that contain at least one of these accesses. We have implemented RECONTEST as an automated tool and evaluated it using 13 real-world concurrent program subjects. Our results show that RECONTEST can significantly reduce the regression testing cost without missing any faulty interleavings induced by code changes.},
author = {Terragni, Valerio and Cheung, Shing Chi and Zhang, Charles},
doi = {10.1109/ICSE.2015.45},
isbn = {9781479919345},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
pages = {246--256},
title = {{RECONTEST: Effective regression testing of concurrent programs}},
volume = {1},
year = {2015}
}
@article{Nieto2003,
author = {Nieto, Lp},
isbn = {3-540-00886-1},
issn = {03029743},
journal = {Programming Languages and Systems},
pages = {348--362},
title = {{The rely-guarantee method in Isabelle/HOL}},
url = {http://link.springer.com/chapter/10.1007/3-540-36575-3{\_}24},
year = {2003}
}
@article{Reynolds1998,
abstract = {To introduce the republication of ``Definitional Interpreters for$\backslash$nHigher-Order Programming Languages'', the author recounts the circumstances of its$\backslash$ncreation, clarifies several obscurities, corrects a few mistakes, and briefly$\backslash$nsummarizes some more recent developments.},
author = {Reynolds, John C.},
doi = {10.1023/A:1010075320153},
issn = {13883690},
journal = {Higher-Order and Symbolic Computation},
keywords = {applicative language,assignment,call by,call by value,closure,continuation,continuation-passing-style transformation,defunctionalization,denotational semantics,escape,functional language,higher-order function,interpreter,iswim,j-operator,lambda calculus,lisp,metacircularity,name,operational semantics,pal,scheme,secd machine},
pages = {355--361},
title = {{Definitional Interpreters Revisited}},
url = {http://www.brics.dk/{~}hosc/local/HOSC-11-4-pp355-361.pdf},
volume = {11},
year = {1998}
}
@article{Li2011,
abstract = {Points-to analysis is a fundamental static analysis technique which computes the set of memory objects that a pointer may point to. Many different applications, such as security-related program analyses, bug checking, and analyses of multi-threaded programs, require precise points-to information to be effective. Recent work has focused on improving the precision of points-to analysis through flow-sensitivity and great progress has been made. However, even with all recent progress, flow-sensitive points-to analysis can still be much slower than a flow-insensitive analysis. In this paper, we propose a novel method that simplifies flow-sensitive points-to analysis to a general graph reachability problem in a value flow graph. The value flow graph summarizes dependencies between pointer variables, including those memory dependencies via pointer dereferences. The points-to set for each pointer variable can then be computed as the set of memory objects that can reach it in the graph. We develop an algorithm to build the value flow graph efficiently by examining the pointed-to-by set of a memory object, i.e., the set of pointers that point to an object. The pointed-to-by information of memory objects is very useful for applications such as escape analysis, and information flow analysis. Our approach is intuitive, easy to implement and very efficient. The implementation is around 2000 lines of code and it is more efficient than existing flow-sensitive points-to analyses. The runtime is comparable with the state-of-the-art flow-insensitive points-to analysis.},
author = {Li, Lian and Cifuentes, Cristina and Keynes, Nathan},
doi = {10.1145/2025113.2025160},
isbn = {9781450304436},
journal = {Proceedings of the 19th ACM SIGSOFT symposium and the 13th European conference on Foundations of software engineering - SIGSOFT/FSE '11},
pages = {343},
title = {{Boosting the performance of flow-sensitive points-to analysis using value flow}},
url = {http://dl.acm.org/citation.cfm?doid=2025113.2025160},
year = {2011}
}
@article{Godefroid2005,
author = {Godefroid, Patrice},
doi = {10.1145/1064978.1065036},
isbn = {1595930809},
issn = {03621340},
keywords = {automated test,generation,interfaces,program verification,random testing,software testing},
pages = {213--223},
title = {{DART : Directed Automated Random Testing}},
year = {2005}
}
@article{Fogg2015,
author = {Fogg, Peter and Tobin-hochstadt, Sam and Newton, Ryan R},
pages = {1--12},
title = {{Parallel Type-checking with Saturating LVars}},
year = {2015}
}
@article{Zhang2014a,
abstract = {Inclusion-based alias analysis for C can be formulated as a context-free language (CFL) reachability problem. It is well known that the traditional cubic CFL-reachability algorithm does not scale well in practice. We present a highly scalable and efficient CFL-reachability-based alias analysis for C. The key novelty of our algorithm is to propagate reachability information along only original graph edges and bypass a large portion of summary edges, while the traditional CFL-reachability algorithm propagates along all summary edges. We also utilize the Four Russians' Trick - a key enabling technique in the subcubic CFL-reachability algorithm - in our alias analysis. We have implemented our subcubic alias analysis and conducted extensive experiments on widely-used C programs from the pointer analysis literature. The results demonstrate that our alias analysis scales extremely well in practice. In particular, it can analyze the recent Linux kernel (which consists of 10M SLOC) in about 30 seconds.},
author = {Zhang, Qirun and Xiao, Xiao and Zhang, Charles and Yuan, Hao and Su, Zhendong},
doi = {10.1145/2660193.2660213},
isbn = {9781450325851},
issn = {0362-1340},
journal = {Proceedings of the 2014 ACM International Conference on Object Oriented Programming Systems Languages {\&}{\#}38; Applications},
keywords = {algorithms,alias analysis,cfl-reachability,experimentation,languages},
pages = {829--845},
title = {{Efficient Subcubic Alias Analysis for C}},
url = {http://doi.acm.org/10.1145/2660193.2660213},
year = {2014}
}
@article{Hall1996,
abstract = {typeclasses; unread},
author = {Hall, Cordelia V. and Hammond, Kevin and {Peyton Jones}, Simon L. and Wadler, Philip L.},
doi = {10.1145/227699.227700},
isbn = {0164-0925},
issn = {01640925},
journal = {ACM Transactions on Programming Languages and Systems},
number = {2},
pages = {109--138},
title = {{Type classes in Haskell}},
volume = {18},
year = {1996}
}
@article{Guhaa,
author = {Guha, Arjun and Lerner, Benjamin and Politz, Joe Gibbs},
pages = {1--3},
title = {{Web API Verification : Results and Challenges Summary of Prior and Current Work}}
}
@article{Jones1983,
abstract = {Development methods for (sequential) programs that run in isolation have been studied elsewhere. Programs that run in parallel can interfere with each other, either via shared storage or by sending messages. Extensions to earlier development methods are proposed for the rigorous development of interfering programs. In particular, extensions to the specification method based on postconditions that are predicates of two states and the development methods of operation decomposition and data refinement are proposed.},
author = {Jones, C. B.},
doi = {10.1145/69575.69577},
isbn = {0164-0925},
issn = {01640925},
journal = {ACM Transactions on Programming Languages and Systems},
keywords = {communicating sequential processes,design,guarantee-conditions,languages,rely-conditions,verification},
number = {4},
pages = {596--619},
title = {{Tentative steps toward a development method for interfering programs}},
url = {http://dl.acm.org/citation.cfm?id=69575.69577{\%}5Cnhttp://portal.acm.org/citation.cfm?doid=69575.69577},
volume = {5},
year = {1983}
}
@article{Owicki1976,
abstract = {A language for parallel programming, with a primitive construct for synchronization and mutual exclusion, is presented. Hoare's deductive system for proving partial correctness of sequential programs is extended to include the paral-lelism described by the language. The proof method lends insight into how one should underst{\~{}},nd and present parallel programs. Examples are given using several of the standard problems in the literature. Methods for proving termination and the absence of deadlock are also given.},
author = {Owicki, Susan and Gries, David},
doi = {10.1007/BF00268134},
issn = {00015903},
journal = {Acta Informatica},
number = {4},
pages = {319--340},
title = {{An axiomatic proof technique for parallel programs I}},
volume = {6},
year = {1976}
}
@article{Maas2015,
author = {Maas, Alisa J},
doi = {10.1145/2814189.2815367},
isbn = {9781450337229},
journal = {Companion Proceedings of the 2015 ACM SIGPLAN International Conference on Systems, Programming, Languages and Applications: Software for Humanity},
keywords = {bindings,braries,ffi,foreign function interfaces,li-,static analysis,type inference},
pages = {69--70},
title = {{Automatic Array Property Detection via Static Analysis}},
year = {2015}
}
@article{Herlihy1990,
abstract = {A concurrent object is a data object shared by concurrent processes. Linearizability is a correctness condition for concurrent objects that exploits the semantics of abstract data types. It permits a high degree of concurrency, yet it permits programmers to specify and reason about concurrent objects using known techniques from the sequential domain. Linearizability provides the illusion that each operation applied by concurrent processes takes effect instantaneously at some point between its invocation and its response, implying that the meaning of a concurrent object's operations can be given by pre- and post-conditions. This paper defines linearizability, compares it to other correctness conditions, presents and demonstrates a method for proving the correctness of implementations, and shows how to reason about concurrent objects, given they are linearizable.},
author = {Herlihy, Maurice P. and Wing, Jeannette M.},
doi = {10.1145/78969.78972},
issn = {01640925},
journal = {ACM Transactions on Programming Languages and Systems},
keywords = {and phrases,concurrrency,processing,serializability,shared memory,specification},
number = {3},
pages = {463--492},
title = {{Linearizability: a correctness condition for concurrent objects}},
url = {http://portal.acm.org/citation.cfm?doid=78969.78972},
volume = {12},
year = {1990}
}
@article{Tofte1998,
abstract = {Region Inference is a program analysis which infers$\backslash$nlifetimes of values. It is targeted at a runtime model in$\backslash$nwhich the store consists of a stack of regions and memory$\backslash$nmanagement predominantly consists of pushing and popping$\backslash$nregions, rather than performing garbage collection. Region$\backslash$nInference has previously been specified by a set of$\backslash$ninference rules which formalize when regions may be$\backslash$nallocated and deallocated. This article presents an$\backslash$nalgorithm which implements the specification. We prove that$\backslash$nthe algorithm is sound with respect to the region inference$\backslash$nrules and that it always terminates even though the region$\backslash$ninference rules permit polymorphic recursion in regions.$\backslash$nThe algorithm is the result of several years of experiments$\backslash$nwith region inference algorithms in the ML Kit, a compiler$\backslash$nfrom Standard ML to assembly language. We report on$\backslash$npractical experience with the algorithm and give hints on$\backslash$nhow to implement it.},
author = {Tofte, Mads and Birkedal, Lars},
doi = {10.1145/291891.291894},
issn = {01640925},
journal = {ACM Transactions on Programming Languages and Systems},
number = {4},
pages = {724--767},
title = {{A region inference algorithm}},
volume = {20},
year = {1998}
}
@article{Steimann2006,
abstract = {Aspect-oriented programming is considered a promising new technology. As object-oriented programming did before, it is beginning to pervade all areas of software engineering. With its growing popularity, practitioners and academics alike are wondering whether they should start looking into it, or otherwise risk having missed an important development. The author of this essay finds that much of aspect-oriented programming's success seems to be based on the conception that it improves both modularity and the structure of code, while in fact, it works against the primary purposes of the two, namely independent development and understandability of programs. Not seeing any way of fixing this situation, he thinks the success of aspect-oriented programming to be paradoxical.},
archivePrefix = {arXiv},
arxivId = {hep-ph/0312273},
author = {Steimann, Friedrich},
doi = {10.1145/1167515.1167514},
eprint = {0312273},
isbn = {1595933484},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {aspect-oriented-programming,modularization},
number = {10},
pages = {481},
primaryClass = {hep-ph},
title = {{The paradoxical success of aspect-oriented programming}},
url = {http://dx.doi.org/10.1145/1167515.1167514{\%}5Cnhttp://portal.acm.org/citation.cfm?doid=1167515.1167514},
volume = {41},
year = {2006}
}
@article{Calcagno2007a,
abstract = {Concurrent programs are difficult to verify because the proof must consider the interactions between the threads. Fine-grained concurrency and heap allocated data structures exacerbate this problem, because threads interfere more often and in richer ways. In this pa- per we provide a thread-modular safety checker for a class of pointer- manipulating fine-grained concurrent algorithms. Our checker uses ownership to avoid interference whenever possible, and rely/guarantee (assume/guarantee) to deal with interference when it genuinely exists.},
author = {Calcagno, Cristiano and Parkinson, Matthew and Vafeiadis, Viktor},
doi = {10.1007/978-3-540-74061-2_15},
isbn = {9783540740605},
issn = {03029743},
journal = {International Static Analysis Symposium},
pages = {233--248},
title = {{Modular Safety Checking for Fine-Grained Concurrency}},
year = {2007}
}
@article{Li2013a,
author = {Li, Lian and Cifuentes, Cristina and Keynes, Nathan},
doi = {10.1145/2491894.2466483},
isbn = {9781450321006},
issn = {0362-1340},
journal = {ACM SIGPLAN Notices},
keywords = {cfl-reachability,compact,context-sensitive analysis,demand- ing compact parameterized,driven,flow-sensitive analysis,function summaries,function summaries also suggest,function summary,hence the computed results,however,may,not be preserved in,that some useful information,the summary},
pages = {85--96},
title = {{Precise and Scalable Context-sensitive Pointer Analysis via Value Flow Graph}},
url = {http://dl.acm.org/citation.cfm?doid=2491894.2466483},
year = {2013}
}
@article{Sieczkowski2015,
author = {Sieczkowski, Filip and Bizjak, Ale{\v{s}} and Birkedal, Lars},
doi = {10.1007/978-3-319-22102-1_25},
isbn = {9783319221014},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {375--390},
title = {{ModuRes: A Coq library for modular reasoning about concurrent higher-order imperative programming languages}},
volume = {9236},
year = {2015}
}
@article{Kozen,
author = {Kozen, Dexter},
isbn = {9783319127354},
issn = {16113349},
keywords = {frenetic,kleene algebra,kleene algebra with tests,netkat,openflow,packet switching,soft-,ware defined networking},
title = {{NetKAT — A Formal System for the Verification of Networks}}
}
@article{Hansen2003,
author = {Hansen, Helle Hvid},
keywords = {algebraic duality,coalgebra,correspondence,craig interpolation,definability,neighbourhood semantics,non-normal modal logic,simulation,theory,university of amsterdam},
pages = {117},
title = {{Monotonic Modal Logics}},
year = {2003}
}
@article{Boyland2003,
abstract = {We describe a type system for checking interference using the concept of linear capabilities (which we call “permissions”).},
author = {Boyland, John and Boyland, John},
doi = {10.1007/3-540-44898-5_4},
isbn = {978-3-540-40325-8},
journal = {Sas},
number = {9984681},
pages = {1075$\backslash$r--1075},
title = {{Checking Interference with Fractional Permissions}},
volume = {2003},
year = {2003}
}
@article{Klein2008,
author = {Klein, Gerwin},
keywords = {formal software verification,operating systems,theorem proving},
title = {{Operating System Verification --- An Overview}},
year = {2008}
}
@article{Vafeiadis2014,
author = {Vafeiadis, Viktor},
keywords = {concurrency,race condition,separation logic,soundness},
pages = {1--16},
title = {{Concurrent Separation Logic Lecture Notes}},
url = {http://concurrency.cs.uni-kl.de/documents/ConcurrencyTheory{\_}SS{\_}2014/lecturenotes/30{\_}04{\_}2014{\_}csl-soundness.pdf},
year = {2014}
}
@article{Might2010,
abstract = {Low-level program analysis is a fundamental problem, taking the shape of "flow analysis" in functional languages and "points-to" analysis in imperative and object-oriented languages. Despite the similarities, the vocabulary and results in the two communities remain largely distinct, with limited cross-understanding. One of the few links is Shivers's k-CFA work, which has advanced the concept of "context-sensitive analysis" and is widely known in both communities. Recent results indicate that the relationship between the functional and object-oriented incarnations of k-CFA is not as well understood as thought. Van Horn and Mairson proved k-CFA for k ≥ 1 to be EXPTIME-complete; hence, no polynomial-time algorithm can exist. Yet, there are several polynomial-time formulations of context-sensitive points-to analyses in object-oriented languages. Thus, it seems that functional k-CFA may actually be a profoundly different analysis from object-oriented k-CFA. We resolve this paradox by showing that the exact same specification of k-CFA is polynomial-time for object-oriented languages yet exponential-time for functional ones: objects and closures are subtly different, in a way that interacts crucially with context-sensitivity and complexity. This illumination leads to an immediate payoff: by projecting the object-oriented treatment of objects onto closures, we derive a polynomial-time hierarchy of context-sensitive CFAs for functional programs.},
archivePrefix = {arXiv},
arxivId = {arXiv:1311.4231v1},
author = {Might, Matthew and Smaragdakis, Yannis and {Van Horn}, David},
doi = {10.1145/1809028.1806631},
eprint = {arXiv:1311.4231v1},
isbn = {978-1-4503-0019-3},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {control-flow analysis,functional,k-cfa,m-cfa,object-oriented,pointer analysis,static analysis},
number = {6},
pages = {305},
title = {{Resolving and exploiting the k -CFA paradox}},
url = {http://dl.acm.org/citation.cfm?id=1809028.1806631{\%}5Cnhttp://portal.acm.org/citation.cfm?doid=1809028.1806631},
volume = {45},
year = {2010}
}
@article{Damas1982,
abstract = {An abstract is not available.},
author = {Damas, Luis and Milner, Robin},
doi = {10.1145/582153.582176},
isbn = {0897910656},
issn = {01406736},
journal = {Proceedings of the 9th ACM SIGPLAN-SIGACT symposium on Principles of programming languages - POPL '82},
number = {October},
pages = {207--212},
title = {{Principal type-schemes for functional programs}},
url = {http://portal.acm.org/citation.cfm?doid=582153.582176},
year = {1982}
}
@article{Najd2016,
abstract = {We describe a new approach to implementing Domain-Specific Languages(DSLs), called Quoted DSLs (QDSLs), that is inspired by two old ideas:quasi-quotation, from McCarthy's Lisp of 1960, and the subformula principle of normal proofs, from Gentzen's natural deduction of 1935. QDSLs reuse facilities provided for the host language, since host and quoted terms share the same syntax, type system, and normalisation rules. QDSL terms are normalised to a canonical form, inspired by the subformula principle, which guarantees that one can use higher-order types in the source while guaranteeing first-order types in the target, and enables using types to guide fusion. We test our ideas by re-implementing Feldspar, which was originally implemented as an Embedded DSL (EDSL), as a QDSL; and we compare the QDSL and EDSL variants. The two variants produce identical code.},
archivePrefix = {arXiv},
arxivId = {1507.07264},
author = {Najd, Shayan and Lindley, Sam and Svenningsson, Josef and Wadler, Philip},
doi = {10.1145/2847538.2847541},
eprint = {1507.07264},
isbn = {9781450340977},
journal = {Proceedings of the 2016 ACM SIGPLAN Workshop on Partial Evaluation and Program Manipulation - PEPM 2016},
keywords = {DSL,EDSL,QDSL,domain-specific language,embedded language,normalisation,quotation,subformula principle},
pages = {25--36},
title = {{Everything old is new again: quoted domain-specific languages}},
url = {http://dl.acm.org/citation.cfm?id=2847538.2847541},
year = {2016}
}
@article{OHearn1999,
abstract = {We introduce a logic BI in which a multiplicative (or linear) and an additive (or intuitionistic) implication live side-by-side. The propositional version of BI arises from an analysis of the proof-theoretic relationship between conjunction and implication; it can be viewed as a merging of intuitionistic logic and multiplicative intuitionistic linear logic. The naturality of BI can be seen categorically: models of propositional BI's proofs are given by bicartesian doubly closed categories, i.e., categories which freely combine the semantics of propositional intuitionistic logic and propositional multiplicative intuitionistic linear logic. The predicate version of BI includes, in addition to standard additive quantifiers, multiplicative (or intensional) quantifiers ∀ new and ∃ new which arise from observing restrictions on structural rules on the level of terms as well as propositions. We discuss computational interpretations, based on sharing, at both the propositional and predicate levels.},
author = {O'Hearn, Peter W. and Pym, David J.},
doi = {10.2307/421090},
isbn = {9788578110796},
issn = {1079-8986},
journal = {Bulletin of Symbolic Logic},
number = {02},
pages = {215--244},
title = {{The Logic of Bunched Implications}},
url = {https://www.jstor.org/stable/421090{\%}5Cnhttp://www.journals.cambridge.org/abstract{\_}S1079898600007022},
volume = {5},
year = {1999}
}
@article{Sider2010,
author = {Sider, Theodore},
issn = {0004-5411},
journal = {Logic For Philosophy},
pages = {1--10},
title = {{Propositional Modal Logic}},
year = {2010}
}
@article{Hallgren2005,
abstract = {We describe amonadic interface to low-level hardware features that is a suitable basis for building operating systems in Haskell. The interface includes primitives for controlling memory management hardware, user-mode process execution, and low-level device I/O. The interface enforces memory safety in nearly all circumstances. Its behavior is specified in part by formal assertions written in a programming logic called P-Logic. The interface has been imple- mented on bare IA32 hardware using the Glasgow Haskell Com- piler (GHC) runtime system.We show how a variety of simple O/S kernels can be constructed on top of the interface, including a sim- ple separation kernel and a demonstration system in which the ker- nel, window system, and all device drivers are written in Haskell.},
author = {Hallgren, Thomas and Jones, Mark P. and Leslie, Rebekah and Tolmach, Andrew},
doi = {10.1145/1090189.1086380},
isbn = {1595930647},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
keywords = {ads,hardware interface,haskell,mon-,operating systems,programming logic,verification},
number = {9},
pages = {116},
title = {{A principled approach to operating system construction in Haskell}},
url = {http://portal.acm.org/citation.cfm?doid=1090189.1086380},
volume = {40},
year = {2005}
}
@article{Kovacs2016,
abstract = {The theory of finite term algebras provides a natural framework to describe the semantics of functional languages. The ability to efficiently reason about term algebras is essential to automate program analysis and verification for functional or imperative programs over algebraic data types such as lists and trees. However, as the theory of finite term algebras is not finitely axiomatizable, reasoning about quantified properties over term algebras is challenging. In this paper we address full first-order reasoning about properties of programs manipulating term algebras, and describe two approaches for doing so by using first-order theorem proving. Our first method is a conservative extension of the theory of term algebras using a finite number of statements, while our second method relies on extending the superposition calculus of first-order theorem provers with additional inference rules. We implemented our work in the first-order theorem prover Vampire and evaluated it on a large number of algebraic data type benchmarks, as well as game theory constraints. Our experimental results show that our methods are able to find proofs for many hard problems previously unsolved by state-of-the-art methods. We also show that Vampire implementing our methods outperforms existing SMT solvers able to deal with algebraic data types.},
archivePrefix = {arXiv},
arxivId = {1611.02908},
author = {Kovacs, Laura and Robillard, Simon and Voronkov, Andrei},
eprint = {1611.02908},
keywords = {algebraic data types,automated reasoning,first-order theorem proving,program analysis and verification,superposition},
title = {{Coming to Terms with Quantified Reasoning}},
url = {http://arxiv.org/abs/1611.02908},
year = {2016}
}
@article{Lu,
author = {Lu, Shan},
isbn = {9781450344449},
keywords = {abstraction refinement,automated debugging,bug isolation,field failures,statistical},
title = {{Low-Overhead and Fully Automated Statistical Debugging with Abstraction Refinement}}
}
@article{Angiuli2016,
abstract = {Formal constructive type theory has proved to be an effective language for mechanized proof. By avoiding non-constructive principles, such as the law of the excluded middle, type theory admits sharper proofs and broader interpretations of results. From a computer science perspective, interest in type theory arises from its applications to programming languages. Standard constructive type theories used in mechanization admit computational interpretations based on meta-mathematical normalization theorems. These proofs are notoriously brittle; any change to the theory potentially invalidates its computational meaning. As a case in point, Voevodsky's univalence axiom raises questions about the computational meaning of proofs. We consider the question: Can higher-dimensional type theory be construed as a programming language? We answer this question affirmatively by providing a direct, deterministic operational interpretation for a representative higher-dimensional dependent type theory with higher inductive types and an instance of univalence. Rather than being a formal type theory defined by rules, it is instead a computational type theory in the sense of Martin-Lo ̈f's meaning explanations and of the NuPRL semantics. The definition of the type theory starts with programs, and defines types as specifica- tions of program behavior. The main result is a canonicity theorem, the first of its kind, stating that closed programs of boolean type evaluate to true or false.},
author = {Angiuli, Carlo and Harper, Robert and Wilson, Todd},
keywords = {homotopy type theory,logical relations},
title = {{Computational Higher-Dimensional Type Theory}},
year = {2016}
}
@article{Scherer2016,
abstract = {The logical technique of focusing can be applied to the {\$}\backslashlambda{\$}-calculus; in a simple type system with atomic types and negative type formers (functions, products, the unit type), its normal forms coincide with {\$}\backslashbeta\backslasheta{\$}-normal forms. Introducing a saturation phase gives a notion of quasi-normal forms in presence of positive types (sum types and the empty type). This rich structure let us prove the decidability of {\$}\backslashbeta\backslasheta{\$}-equivalence in presence of the empty type, the fact that it coincides with contextual equivalence, and a finite model property.},
archivePrefix = {arXiv},
arxivId = {1610.01213},
author = {Scherer, Gabriel},
eprint = {1610.01213},
title = {{Deciding equivalence with sums and the empty type}},
url = {http://arxiv.org/abs/1610.01213},
year = {2016}
}
@article{Marlow2009,
abstract = {Purely functional programs should run well on parallel hardware because of the absence of side effects, but it has proved hard to realise this potential in practice. Plenty of papers describe promising ideas, but vastly fewer describe real implementations with good wall-clock performance. We describe just such an implementation, and quantitatively explore some of the complex design tradeoffs that make such implementations hard to build. Our measurements are necessarily detailed and specific, but they are reproducible, and we believe that they offer some general insights.},
author = {Marlow, Simon and {Peyton Jones}, Simon and Singh, Satnam},
doi = {10.1145/1631687.1596563},
isbn = {9781605583327},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
number = {9},
pages = {65},
title = {{Runtime support for multicore Haskell}},
url = {http://portal.acm.org/citation.cfm?doid=1631687.1596563},
volume = {44},
year = {2009}
}
@article{Barthe2016,
abstract = {Couplings are a powerful mathematical tool for reasoning about pairs of probabilistic processes. Recent developments in formal verification identify a close connection between couplings and pRHL, a relational program logic motivated by applications to provable security, enabling formal construction of couplings from the probability theory literature. However, existing work using pRHL merely shows existence of a coupling and does not give a way to prove quantitative properties about the coupling, which are need to reason about mixing and convergence of probabilistic processes. Furthermore, pRHL is inherently incomplete, and is not able to capture some advanced forms of couplings such as shift couplings. We address both problems as follows. First, we define an extension of pRHL, called xpRHL, which explicitly constructs the coupling in a pRHL derivation in the form of a probabilistic product program that simulates two correlated runs of the original program. Existing verification tools for probabilistic programs can then be directly applied to the probabilistic product to prove quantitative properties of the coupling. Second, we equip pRHL with a new rule for while loops, where reasoning can freely mix synchronized and unsynchronized loop iterations. Our proof rule can capture examples of shift couplings, and the logic is relatively complete for deterministic programs. We show soundness of xpRHL and use it to analyze two classes of examples. First, we verify rapid mixing using different tools from coupling: standard coupling, shift coupling, and path coupling, a compositional principle for combining local couplings into a global coupling. Second, we verify (approximate) equivalence between a source and an optimized program for several instances of loop optimizations from the literature.},
archivePrefix = {arXiv},
arxivId = {1607.03455},
author = {Barthe, Gilles and Gr{\'{e}}goire, Benjamin and Hsu, Justin and Strub, Pierre-Yves},
eprint = {1607.03455},
keywords = {formal verification,probabilistic algorithms,probabilistic couplings,product programs,rela-,tional hoare logic},
title = {{Coupling proofs are probabilistic product programs}},
url = {http://arxiv.org/abs/1607.03455},
year = {2016}
}
@article{Ahman2016,
abstract = {Dijkstra monads are a means by which a dependent type theory can be enhanced with support for reasoning about effectful code. These specification-level monads computing weakest preconditions, and their closely related counterparts, Hoare monads, provide the basis on which verification tools like F*, Hoare Type Theory (HTT), and Ynot are built. In this paper we show that Dijkstra monads can be derived "for free" by applying a continuation-passing style (CPS) translation to the standard monadic definitions of the underlying computational effects. Automatically deriving Dijkstra monads provides a correct-by-construction and efficient way of reasoning about user-defined effects in dependent type theories. We demonstrate these ideas in EMF*, a new dependently typed calculus, validating it both by formal proof and via a prototype implementation within F*. Besides equipping F* with a more uniform and extensible effect system, EMF* enables within F* a mixture of intrinsic and extrinsic proofs that was previously impossible.},
archivePrefix = {arXiv},
arxivId = {1608.06499},
author = {Ahman, Danel and Hritcu, Catalin and Martinez, Guido and Plotkin, Gordon and Protzenko, Jonathan and Rastogi, Aseem and Swamy, Nikhil},
eprint = {1608.06499},
keywords = {effectful programming,proof assistants,verification},
title = {{Dijkstra Monads for Free}},
url = {http://arxiv.org/abs/1608.06499},
year = {2016}
}
@article{Martins2016,
author = {Martins, Ruben and Wang, Yuepeng and Reps, Thomas W},
keywords = {component-based,petri-net,program,type-directed},
pages = {1--16},
title = {{Component-Based Synthesis for Complex APIs}},
year = {2016}
}
@article{Levy2017,
author = {Levy, Paul Blain},
keywords = {call-by-push-value,computational,contextual equivalence,effects,isomorphism},
title = {{Contextual Isomorphisms}},
year = {2017}
}
@article{Zhang2016b,
author = {Zhang, Yang and Feng, Xinyu},
doi = {10.1007/s11704-015-4492-4},
issn = {20952236},
journal = {Frontiers of Computer Science},
keywords = {DRF-Guarantee,JMM,happens-before,operatonal semantics,relaxed memory model},
number = {1},
pages = {54--81},
title = {{An operational happens-before memory model}},
volume = {10},
year = {2016}
}
@article{Shan2016,
abstract = {Bayesian inference, of posterior knowledge from prior knowledge and observed evidence, is typically defined by Bayes's rule. But the observation of a continuous quantity usually has probability zero, in which case Bayes's rule says only that the unknown times zero is zero. To infer a posterior distribution from a zeroprobability observation, the statistical notion of disintegration tells us to specify the observation as an expression rather than a predicate, but does not tell us how to compute the posterior. We present the first method of computing a disintegration from a probabilistic program and an observable expression, solving the problem of drawing inferences from zeroprobability observations. Because the method produces an exact posterior term, it composes with other inference methods in a modular way without sacrificing accuracy or performance.},
author = {Shan, Chung-chieh and Ramsey, Norman},
keywords = {conditional measures,continu-,probabilistic programs},
pages = {1--15},
title = {{Exact Bayesian Inference by Symbolic Disintegration}},
year = {2016}
}
@article{Lehmann2017,
author = {Lehmann, Nico},
isbn = {9781450346603},
keywords = {abstract interpreta-,gradual typing,refinement types},
number = {Dcc},
pages = {1543--1556},
title = {{Gradual Refinement Types}},
year = {2017}
}
@article{Bornat,
author = {Bornat, Richard and Calcagno, Cristiano and Hearn, Peter O and Parkinson, Matthew},
isbn = {158113830X},
keywords = {2005,by permission of acm,c acm,concurrency,for your personal use,it is posted here,logic,not for redistribution,permissions,s version of the,separation,the,this is the author,work},
title = {{Permission Accounting in Separation Logic Categories and Subject Descriptors}}
}
@article{Danvy1992,
author = {Danvy, Oliver and Filinski, Andrzex},
doi = {10.1017/S0960129500001535},
issn = {0960-1295},
journal = {Mathematical Structures in Computer Science},
month = {dec},
number = {04},
pages = {361},
title = {{Representing Control: a Study of the CPS Transformation}},
url = {http://www.journals.cambridge.org/abstract{\_}S0960129500001535},
volume = {2},
year = {1992}
}
@article{Brutschy,
author = {Brutschy, Lucas and Peter, M and Vechev, Martin},
title = {{Effective Serializability for Eventual Consistency}}
}
@article{Omar2016,
abstract = {Programs are rich inductive structures, but human programmers typically construct and manipulate them only indirectly, through flat textual representations. This indirection comes at a cost – programmers must comprehend the various subtleties of parsing, and it can require many text editor actions to make a single syntactically and semantically welldefined change. During these sequences of text editor actions, or when the programmer makes a mistake, programmers and programming tools must contend with malformed or semantically illdefined program text, complicating the programming process. Structure editors promise to alleviate these burdens by exposing only edit actions that produce sensible changes to the program structure. Existing designs for structure editors, however, are complex and somewhat ad hoc. They also focus primarily on syntactic wellformedness, so programs can still be left semantically illdefined as they are being constructed. In this paper, we report on our ongoing efforts to develop Hazelnut, a minimal structure editor defined in a principled typetheoretic style where all edit actions leave the program in both a syntactically and semantically welldefined state. Uniquely, Hazelnut does not force the programmer to construct the program in a strictly " outsidein " fashion. Formally, Hazelnut is a bidirectionally typed lambda calculus extended with 1) holes (which mark subterms that are being constructed from the inside out); 2) a focus model ; and 3) a bidirectional action model equipped with a useful action sensibility theorem.},
archivePrefix = {arXiv},
arxivId = {arXiv:1607.04180v1},
author = {Omar, Cyrus and Voysey, Ian and Hilton, Michael and Aldrich, Jonathan and Hammer, Matthew A},
eprint = {arXiv:1607.04180v1},
title = {{Hazelnut : A Bidirectionally Typed Structure Editor Calculus}},
year = {2016}
}
@article{Lindley2016,
abstract = {We explore the design and implementation of Frank, a strict functional programming language with a bidirectional effect type system designed from the ground up around a novel variant of Plotkin and Pretnar's effect handler abstraction. Effect handlers provide an abstraction for modular effectful programming: a handler acts as an interpreter for a collection of commands whose interfaces are statically tracked by the type system. However, Frank eliminates the need for an additional effect handling construct by generalising the basic mechanism of functional abstraction itself. A function is simply the special case of a Frank operator that interprets no commands. Moreover, Frank's operators can be multihandlers which simultaneously interpret commands from several sources at once, without disturbing the direct style of functional programming with values. Effect typing in Frank employs a novel form of effect polymorphism which avoid mentioning effect variables in source code. This is achieved by propagating an ambient ability inwards, rather than accumulating unions of potential effects outwards. We introduce Frank by example, and then give a formal account of the Frank type system and its semantics. We introduce Core Frank by elaborating Frank operators into functions, case expressions, and unary handlers, and then give a sound small-step operational semantics for Core Frank. Programming with effects and handlers is in its infancy. We contribute an exploration of future possibilities, particularly in combination with other forms of rich type system.},
archivePrefix = {arXiv},
arxivId = {1611.09259},
author = {Lindley, Sam and McBride, Conor and McLaughlin, Craig},
eprint = {1611.09259},
keywords = {algebraic effects,bidi-,call-by-push-value,continuations,effect handlers,effect polymor-,pattern matching,phism},
title = {{Do be do be do}},
url = {http://arxiv.org/abs/1611.09259},
year = {2016}
}
@article{Markus2017,
author = {Markus, P and Vechev, Martin},
isbn = {9781450346603},
keywords = {abstract interpretation,numerical program analysis,partitions,performance optimization,polyhedra decomposition},
title = {{Fast Polyhedra Abstract Domain}},
year = {2017}
}
@article{Germane2016,
author = {Germane, Kimball},
keywords = {and may,be established by an,before inlining,critical role,environment analysis,environment analysis plays a,static analysis,the,the potential inline in,this condition concerns environments},
pages = {1--13},
title = {{A Posteriori Environment Analysis with Pushdown Delta CFA}},
year = {2016}
}
@article{Lange2016,
abstract = {Go is a production-level statically typed programming language whose design features explicit message-passing primitives and lightweight threads, enabling (and encouraging) programmers to develop concurrent systems where components interact through communication more so than by lock-based shared memory concurrency. Go can only detect global deadlocks at runtime, but provides no compile-time protection against all too common communication mis-matches or partial deadlocks. This work develops a static verification framework for liveness and safety in Go programs, able to detect communication errors and partial deadlocks in a general class of realistic concurrent programs, including those with dynamic channel creation, unbounded thread creation and recursion. Our approach infers from a Go program a faithful representation of its communication patterns as a behavioural type. By checking a syntactic restriction on channel usage, dubbed fencing, we ensure that programs are made up of finitely many different communication patterns that may be repeated infinitely many times. This restriction allows us to implement a decision procedure for liveness and safety in types which in turn statically ensures liveness and safety in Go programs. We have implemented a type inference and decision procedures in a tool-chain and tested it against publicly available Go programs.},
archivePrefix = {arXiv},
arxivId = {1610.08843},
author = {Lange, Julien and Ng, Nicholas and Toninho, Bernardo and Yoshida, Nobuko},
eprint = {1610.08843},
keywords = {channel-based programming,compile-time,deadlock detection,message-passing,process calculus,programming,safety and liveness,static,types},
title = {{Fencing off Go: Liveness and Safety for Channel-based Programming (extended version)}},
url = {http://arxiv.org/abs/1610.08843},
year = {2016}
}
@inproceedings{Sousa2016,
author = {Sousa, Marcelo and Dillig, Isil},
booktitle = {PLDI '16 Proceedings of the 37th ACM SIGPLAN Conference on Programming Language Design and Implementation},
doi = {10.1145/2980983.2908092},
isbn = {8750152009},
issn = {03621340},
keywords = {automated verification,product programs,relational hoare logic,safety hyper-properties},
month = {jun},
number = {6},
pages = {57--69},
title = {{Cartesian hoare logic for verifying k-safety properties}},
url = {http://dl.acm.org/citation.cfm?doid=2980983.2908092},
volume = {51},
year = {2016}
}
@article{Richards,
author = {Richards, Gregor and Burg, Brian},
keywords = {dynamic behavior,dynamic met-,execution tracing,javascript,program analysis,rics},
title = {{An Analysis of the Dynamic Behavior of JavaScript Programs.pdf}}
}
@article{Shapiro2011,
author = {Shapiro, Marc},
number = {2011},
title = {{A comprehensive study of Convergent and Commutative Replicated Data}},
year = {2011}
}
@article{Cimini2016a,
author = {Cimini, Matteo and Siek, Jeremy G},
keywords = {gradual typing,operational semantics,type systems},
pages = {1--15},
title = {{Automatically Generating the Dynamic Semantics of Gradually Typed Languages}},
year = {2016}
}
@article{Introduction,
author = {Introduction, A N and Liquidhaskell, T O},
title = {{Programming with Refinement Types}}
}
@article{Rojas1997,
abstract = {This paper is a short and painless introduction to the $\lambda$ calculus. Originally developed in order to study some mathematical properties of effectively com- putable functions, this formalism has provided a strong theoretical foundation for the family of functional programming languages. We show how to perform some arithmetical computations using the $\lambda$ calculus and how to define recur- sive functions, even though functions in $\lambda$ calculus are not given names and thus cannot refer explicitly to themselves.},
archivePrefix = {arXiv},
arxivId = {arXiv:1503.09060v1},
author = {Rojas, Ra{\'{u}}l},
doi = {10.1006/anbe.1999.1219},
eprint = {arXiv:1503.09060v1},
issn = {00033472},
journal = {FU Berlin},
pages = {1--9},
pmid = {10512656},
title = {{A Tutorial Introduction to the Lambda Calculus}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.136.4173{\&}rep=rep1{\&}type=pdf},
volume = {58},
year = {1997}
}
@article{Mandelbaum2003,
abstract = {We develop an explicit two level system that allows programmers to reason about the behavior of effectful programs. The first level is an ordinary ML-style type system, which confers standard properties on program behavior. The second level is a conservative extension of the first that uses a logic of type refinements to check more precise properties of program behavior. Our logic is a fragment of intuitionistic linear logic, which gives programmers the ability to reason locally about changes of program state. We provide a generic resource semantics for our logic as well as a sound, decidable, syntactic refinement-checking system. We also prove that refinements give rise to an optimization principle for programs. Finally, we illustrate the power of our system through a number of examples.},
author = {Mandelbaum, Yitzhak and Walker, David and Harper, Robert},
doi = {10.1145/944746.944725},
isbn = {1-58113-756-7},
issn = {03621340},
journal = {ACM SIGPLAN Notices},
pages = {213--225},
title = {{An effective theory of type refinements}},
volume = {38},
year = {2003}
}
@article{li2017static,
  title={Static analysis of android apps: A systematic literature review},
  author={Li, Li and Bissyand{\'e}, Tegawend{\'e} F and Papadakis, Mike and Rasthofer, Siegfried and Bartel, Alexandre and Octeau, Damien and Klein, Jacques and Traon, Le},
  journal={Information and Software Technology},
  volume={88},
  pages={67--95},
  year={2017},
  publisher={Elsevier}
}