@inproceedings{1fd9e6ec3dc340f8a2c11aca37bf4410,
title = "A Flexible DNN Accelerator Design with Layer Pipeline for FPGAs",
abstract = "Deep Neural Networks (DNNs) are very effective for image recognition, object detection and tracking. To meet the need of deployment of DNNs, accelerator designs have been proposed, especially for implementation on FPGAs. Layer pipeline is a new technology to improve computation efficiency by reducing latency between layers. In this paper, we propose a group-based DNN pipeline accelerator design for FPGAs. An entire DNN is partitioned into layer groups with pipeline computation within every group. A configurable logic is used to switch between groups instantaneously. Thus, our design can handle any size DNNs on a single FPGA chip. We evaluate our accelerator for Alexnet and VGG16 on a Xilinx ZC706 board. Our experimental results show that we can increase the throughput by 19% and 42% when compared with layer-based accelerators in [1] and [16].",
keywords = "Deep Neural Network, FPGA, Layer Pipeline",
author = "Weijie You and Deming Chen and Chang Wu",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 6th International Conference on Information Science and Control Engineering, ICISCE 2019 ; Conference date: 20-12-2019 Through 22-12-2019",
year = "2019",
month = dec,
doi = "10.1109/ICISCE48695.2019.00192",
language = "English (US)",
series = "Proceedings - 2019 6th International Conference on Information Science and Control Engineering, ICISCE 2019",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "959--962",
editor = "Shaozi Li and Yun Cheng and Ying Dai and Jianwei Ma",
booktitle = "Proceedings - 2019 6th International Conference on Information Science and Control Engineering, ICISCE 2019",
address = "United States",
}