@article {Majfds.2021.1.064, author = {Yu chien (Calvin) Ma and Zoe Wang and Alexander Fleiss}, title = {Deep Q-Learning for Trading Cryptocurrency}, elocation-id = {jfds.2021.1.064}, year = {2021}, doi = {10.3905/jfds.2021.1.064}, publisher = {Institutional Investor Journals Umbrella}, abstract = {This article sets forth a framework for deep reinforcement learning as applied to trading cryptocurrencies. Specifically, the authors adopt Q-Learning, which is a model-free reinforcement learning algorithm, to implement a deep neural network to approximate the best possible states and actions to take in the cryptocurrency market. Bitcoin, Ethereum, and Litecoin were selected as representatives to test the model. The Deep Q trading agent generated an average portfolio return of 65.98\%, although it showed extreme volatility over the 2,000 runs. Despite the high volatility of deep reinforcement learning, the experiment demonstrates that it has exceptionally high potential to be employed and provides a solid foundation on which to build further research.TOPICS: Currency, big data/machine learning, performance measurementKey Findings▪ The authors use deep neural networks to create a Deep Q-Learning trading agent that approximates the best actions to take based on rewards to maximize returns from trading the three cryptocurrencies with the largest market capitalization.▪ The Deep Q-Learning agent generates a return of 65.98\% on average over the course of 2,000 episodes; however, the returns do exhibit a large standard deviation given the highly volatile nature of the cryptocurrencies. ▪ The authors introduce a framework on which future deep reinforcement learning and rewards-based trading agents can be built and improved.}, issn = {2640-3943}, URL = {https://jfds.pm-research.com/content/early/2021/06/08/jfds.2021.1.064}, eprint = {https://jfds.pm-research.com/content/early/2021/06/08/jfds.2021.1.064.full.pdf}, journal = {The Journal of Financial Data Science} }