@inproceedings{2c9473e93689409983a253b218ff15b7,
title = "Design flow of accelerating hybrid extremely low bit-width neural network in embedded FPGA",
abstract = "Neural network accelerators with low latency and low energy consumption are desirable for edge computing. To create such accelerators, we propose a design flow for accelerating the extremely low bit-width neural network (ELB-NN) in embedded FPGAs with hybrid quantization schemes. This flow covers both network training and FPGA-based network deployment, which facilitates the design space exploration and simplifies the tradeoff between network accuracy and computation efficiency. Using this flow helps hardware designers to deliver a network accelerator in edge devices under strict resource and power constraints. We present the proposed flow by supporting hybrid ELB settings within a neural network. Results show that our design can deliver very high performance peaking at 10.3 TOPS and classify up to 325.3 image/s/watt while running large-scale neural networks for less than 5W using embedded FPGA. To the best of our knowledge, it is the most energy efficient solution in comparison to GPU or other FPGA implementations reported so far in the literature.",
keywords = "DNN, ELB NN, FPGA, hybrid quantization",
author = "Junsong Wang and Qiuwen Lou and Xiaofan Zhang and Chao Zhu and Yonghua Lin and Deming Chen",
note = "Publisher Copyright: {\textcopyright} 2018 IEEE.; 28th International Conference on Field-Programmable Logic and Applications, FPL 2018 ; Conference date: 26-08-2018 Through 30-08-2018",
year = "2018",
month = nov,
day = "9",
doi = "10.1109/FPL.2018.00035",
language = "English (US)",
series = "Proceedings - 2018 International Conference on Field-Programmable Logic and Applications, FPL 2018",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "163--169",
booktitle = "Proceedings - 2018 International Conference on Field-Programmable Logic and Applications, FPL 2018",
address = "United States",
}