diff --git a/INSTALL.md b/INSTALL.md index bd713cd1..8d0ba4aa 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,3 +1,4 @@ + # Installation See http://caffe.berkeleyvision.org/installation.html for the latest @@ -14,27 +15,109 @@ $ git clone https://github.com/NVIDIA/caffe ``` -2. +2. -```bash -$ cd caffe -$ mkdir build +Edit ~/.bashrc + + +```diff ++ export LIBRARY_PATH=${GFLAGS_DIR}/lib:${LIBRARY_PATH} ++ export LD_LIBRARY_PATH=${GFLAGS_DIR}/lib:${LD_LIBRARY_PATH} ``` -modify cmake/Cuda.cmake -modify Makefile -modify cmake/Modules/FindAtlas.cmake +3. -```bash -$ cd build -$ CMAKE_PREFIX_PATH=$NV_CAFFE_HOME/share \ - cmake \ - -DCMAKE_INSTALL_PREFIX=$NV_CAFFE_HOME \ - -DCUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME\ #required >=8.0 - -DBLAS=$Atlas_LAPACK_LIBRARY \ - -DCUDNN_LIBRARY=$CUDNN_LIBRARY \ #required >=6.0 - -DCUDNN_INCLUDE=$CUDNN_INCLUDE \ #required >=6.0 - .. - -$ make all -j 8 -$ +Edit some Cmake Files + +```diff +--- a/cmake/Cuda.cmake ++++ b/cmake/Cuda.cmake +@@ -251,6 +251,9 @@ if(USE_CUDNN) + endif() + endif() + ++list(APPEND CUDA_NVCC_FLAGS "--pre-include $ENV{CUDNN6_HOME}/include/cudnn.h") ++include_directories(BEFORE "$ENV{CUDNN6_HOME}/include") ++ + if(UNIX OR APPLE) + list(APPEND CUDA_NVCC_FLAGS -std=c++11;-Xcompiler;-fPIC) + endif() +``` + +```diff +diff --git a/cmake/Modules/FindAtlas.cmake b/cmake/Modules/FindAtlas.cmake +index 6e15643..e83fbeb 100644 +--- a/cmake/Modules/FindAtlas.cmake ++++ b/cmake/Modules/FindAtlas.cmake +@@ -28,7 +28,7 @@ find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH + + find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) + find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) +-find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) ++find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r lapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) + + set(LOOKED_FOR + Atlas_CBLAS_INCLUDE_DIR +diff --git a/cmake/Modules/FindNCCL.cmake b/cmake/Modules/FindNCCL.cmake +index 1f7d97c..73f816c 100644 +--- a/cmake/Modules/FindNCCL.cmake ++++ b/cmake/Modules/FindNCCL.cmake +@@ -20,6 +20,7 @@ find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIR NCCL_LIBRARY + + if(NCCL_FOUND) + message(STATUS "Found NCCL (include: ${NCCL_INCLUDE_DIR}, library: ${NCCL_LIBRARY})") ++ include_directories(${NCCL_INCLUDE_DIR}) + mark_as_advanced(NCCL_INCLUDE_DIR NCCL_LIBRARY) + endif() +``` + +```diff +diff --git a/cmake/Modules/FindNVML.cmake b/cmake/Modules/FindNVML.cmake +index 8747ab3..2a8b3e7 100644 +--- a/cmake/Modules/FindNVML.cmake ++++ b/cmake/Modules/FindNVML.cmake +@@ -14,8 +14,16 @@ find_path(NVML_INCLUDE_DIR NAMES nvml.h + PATHS ${CUDA_INCLUDE_DIRS} ${NVML_ROOT_DIR}/include + ) + ++set(MLPATH "/usr/lib64/nvidia") ++ + find_library(NVML_LIBRARY nvidia-ml PATHS ${MLPATH} ${NVML_ROOT_DIR}/lib ${NVML_ROOT_DIR}/lib64) + ++ ++message(STATUS "CUDA_INCLUDE_DIRS: ${CUDA_INCLUDE_DIRS}") ++message(STATUS "NVML_INCLUDE_DIR: ${NVML_INCLUDE_DIR}") ++message(STATUS "NVML_LIBRARY: ${NVML_LIBRARY}") ++ ++ + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(NVML DEFAULT_MSG NVML_INCLUDE_DIR NVML_LIBRARY) +``` + +4. +Then you can do CMAKE + +``` +cmake -DCMAKE_INSTALL_PREFIX=${CAFFE_0.16_HOME} -DCUDA_TOOLKIT_ROOT_DIR=${CUDA8_HOME} .. +``` + +console output seems like + + +``` +-- Found gflags (include: /usr/include, library: /usr/lib64/libgflags.so) +``` + +after that ,you should do make command + +``` +$ make all -j 64 +$ make install -j 64 +``` + +However, if you check library dependency by using ldd command + +``` +$ldd ${CAFFE_HOME}/build/tools/caffe +libgflags.so.2.2 => ${GFLAGS_DIR}libs//libgflags.so.2.2 (0x0000XXXXXXXXXX) +``` diff --git a/examples/imagenet/create_imagenet.sh b/examples/imagenet/create_imagenet.sh index e912ac43..1c55c5bb 100755 --- a/examples/imagenet/create_imagenet.sh +++ b/examples/imagenet/create_imagenet.sh @@ -2,12 +2,12 @@ # Create the imagenet lmdb inputs # N.B. set the path to the imagenet train + val data dirs -EXAMPLE=examples/imagenet -DATA=data/ilsvrc12 -TOOLS=build/tools +EXAMPLE=/mnt/nas100/share/data/imagenet/caffe_tool_dir +DATA=/mnt/nas100/share/data/imagenet/caffe_tool_dir/txt_dir +TOOLS=/home/hiroki11x/dl/NV-Caffe/build/tools -TRAIN_DATA_ROOT=/path/to/imagenet/train/ -VAL_DATA_ROOT=/path/to/imagenet/val/ +TRAIN_DATA_ROOT=/mnt/nas100/share/data/imagenet/train/ +VAL_DATA_ROOT=/mnt/nas100/share/data/imagenet/val/ # Set RESIZE=true to resize the images to 256x256. Leave as false if images have # already been resized using another tool. diff --git a/install.sh b/install.sh new file mode 100755 index 00000000..8083fd87 --- /dev/null +++ b/install.sh @@ -0,0 +1,3 @@ +make all -j 64 +make test -j 64 +make runtest -j 64 diff --git a/models/bvlc_alexnet/train_val_fp16.prototxt b/models/bvlc_alexnet/train_val_fp16.prototxt index d2383884..3eb6bf8f 100644 --- a/models/bvlc_alexnet/train_val_fp16.prototxt +++ b/models/bvlc_alexnet/train_val_fp16.prototxt @@ -13,10 +13,17 @@ layer { transform_param { mirror: true crop_size: 227 +<<<<<<< HEAD mean_file: "/home/hiroki11x/ImageNet/imagenet_mean.binaryproto" } data_param { source: "/home/hiroki11x/ImageNet/ilsvrc12_train_lmdb" +======= + mean_file: "/mnt/nas101/hiroki11x/imagenet_mean.binaryproto" + } + data_param { + source: "/mnt/nas101/hiroki11x/ilsvrc12_train_lmdb" +>>>>>>> 5c1efbba51c185a619be77a215179bf2b04d9e7a batch_size: 256 # batch_size: 1024 # DGX1 backend: LMDB @@ -31,11 +38,19 @@ layer { transform_param { mirror: false crop_size: 227 +<<<<<<< HEAD mean_file: "/home/hiroki11x/ImageNet/imagenet_mean.binaryproto" } data_param { source: "/home/hiroki11x/ImageNet/ilsvrc12_val_lmdb" batch_size: 10 +======= + mean_file: "/mnt/nas101/hiroki11x/imagenet_mean.binaryproto" + } + data_param { + source: "/mnt/nas101/hiroki11x/ilsvrc12_val_lmdb" + batch_size: 32 +>>>>>>> 5c1efbba51c185a619be77a215179bf2b04d9e7a # batch_size: 256 # DGx1 backend: LMDB } diff --git a/run.sh b/run.sh index ca9c633d..52b2daf1 100755 --- a/run.sh +++ b/run.sh @@ -27,11 +27,17 @@ PREFIX_PATH=$PREFIX_PATH:$LOCAL/$s done +for file in `git grep -l '"/home/hiroki11x/env/local/cuda/include/cudnn.h"'`; do +sed -i -e \ +'s@"/home/hiroki11x/env/local/cuda/include/cudnn.h"@@g' \ +"$file" +done CMAKE_PREFIX_PATH=$PYTHON_INCLUDE:$PYTHON_LIB:$HDF5_HL_LIBRARIES:$PREFIX_PATH cmake \ -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda \ --DCMAKE_INSTALL_PREFIX=/home/hiroki11x/dl/nvcaffe \ +-DCMAKE_INSTALL_PREFIX=/home/hiroki11x/dl/nvcaffe/local \ -DUSE_NCCL=ON \ +-DAtlas_LAPACK_LIBRARY=/home/hiroki11x/env/local/ATLAS \ -DUSE_LEVELDB=OFF \ .. | tee configure.log diff --git a/share/Caffe/CaffeConfig.cmake b/share/Caffe/CaffeConfig.cmake deleted file mode 100644 index ed7f711f..00000000 --- a/share/Caffe/CaffeConfig.cmake +++ /dev/null @@ -1,65 +0,0 @@ -# Config file for the Caffe package. -# -# Note: -# Caffe and this config file depends on opencv, -# so put `find_package(OpenCV)` before searching Caffe -# via `find_package(Caffe)`. All other lib/includes -# dependencies are hard coded in the file -# -# After successful configuration the following variables -# will be defined: -# -# Caffe_INCLUDE_DIRS - Caffe include directories -# Caffe_LIBRARIES - libraries to link against -# Caffe_DEFINITIONS - a list of definitions to pass to compiler -# -# Caffe_HAVE_CUDA - signals about CUDA support -# Caffe_HAVE_CUDNN - signals about cuDNN support -# -# -# -# OpenCV dependency - -if(ON) - if(NOT OpenCV_FOUND) - set(Caffe_OpenCV_CONFIG_PATH "/home/hiroki11/env/local/opencv_2.4.13/share/OpenCV") - if(Caffe_OpenCV_CONFIG_PATH) - get_filename_component(Caffe_OpenCV_CONFIG_PATH ${Caffe_OpenCV_CONFIG_PATH} ABSOLUTE) - - if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core) - message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}") - include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake) - endif() - - else() - find_package(OpenCV REQUIRED) - endif() - unset(Caffe_OpenCV_CONFIG_PATH) - endif() -endif() - -# Compute paths -get_filename_component(Caffe_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) -set(Caffe_INCLUDE_DIRS "/home/hiroki11/env/local/cuda/include;/home/hiroki11/env/local/boost_1.63.0/include;/home/hiroki11/env/local/glog_0.3.4/include;/usr/include;/home/hiroki11/env/local/hdf5_1.10.0/include;/home/hiroki11/env/local/lmdb_0.9.18/include;/home/hiroki11/env/local/leveldb/include;/home/hiroki11/env/local/snappy_1.1.4/include;/usr/local/cuda-8.0/include;/home/hiroki11/env/local/opencv_2.4.13/include/opencv;/home/hiroki11/env/local/opencv_2.4.13/include;/home/hiroki11/env/local/atlas_3.10.3/include;/home/hiroki11/env/local/nccl_1.3.4/include") - -get_filename_component(__caffe_include "${Caffe_CMAKE_DIR}/../../include" ABSOLUTE) -list(APPEND Caffe_INCLUDE_DIRS ${__caffe_include}) -unset(__caffe_include) - - -# Our library dependencies -if(NOT TARGET caffe AND NOT caffe_BINARY_DIR) - include("${Caffe_CMAKE_DIR}/CaffeTargets.cmake") -endif() - -# List of IMPORTED libs created by CaffeTargets.cmake -set(Caffe_LIBRARIES caffe) - -# Definitions -set(Caffe_DEFINITIONS "-DUSE_OPENCV;-DUSE_LMDB;-DUSE_LEVELDB;-DUSE_NCCL") - -# Cuda support variables -set(Caffe_CPU_ONLY OFF) -set(Caffe_HAVE_CUDA TRUE) -set(Caffe_HAVE_CUDNN TRUE) - diff --git a/share/Caffe/CaffeTargets-release.cmake b/share/Caffe/CaffeTargets-release.cmake deleted file mode 100644 index 2241bb62..00000000 --- a/share/Caffe/CaffeTargets-release.cmake +++ /dev/null @@ -1,30 +0,0 @@ -#---------------------------------------------------------------- -# Generated CMake target import file for configuration "Release". -#---------------------------------------------------------------- - -# Commands may need to know the format version. -set(CMAKE_IMPORT_FILE_VERSION 1) - -# Import target "caffe" for configuration "Release" -set_property(TARGET caffe APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) -set_target_properties(caffe PROPERTIES - IMPORTED_LINK_INTERFACE_LIBRARIES_RELEASE "proto;proto;/home/hiroki11/env/local/boost_1.63.0/lib/libboost_system.so;/home/hiroki11/env/local/boost_1.63.0/lib/libboost_thread.so;/home/hiroki11/env/local/boost_1.63.0/lib/libboost_filesystem.so;-lpthread;/home/hiroki11/env/local/glog_0.3.4/lib/libglog.so;/usr/lib64/libgflags.so;/usr/lib64/libprotobuf.so;-lpthread;/home/hiroki11/env/local/hdf5_1.10.0/lib/libhdf5_hl.so;/home/hiroki11/env/local/hdf5_1.10.0/lib/libhdf5.so;/home/hiroki11/env/local/hdf5_1.10.0/lib/libhdf5_hl.so;/home/hiroki11/env/local/hdf5_1.10.0/lib/libhdf5.so;/home/hiroki11/env/local/lmdb_0.9.18/lib/liblmdb.so;/home/hiroki11/env/local/leveldb/lib/libleveldb.so;/home/hiroki11/env/local/snappy_1.1.4/lib/libsnappy.so;/usr/local/cuda-8.0/lib64/libcudart.so;/usr/local/cuda-8.0/lib64/libcurand.so;/usr/local/cuda-8.0/lib64/libcublas.so;/home/hiroki11/env/local/cuda/lib64/libcudnn.so;opencv_core;opencv_highgui;opencv_imgproc;/home/hiroki11/env/local/atlas_3.10.3/lib/liblapack.a;/home/hiroki11/env/local/atlas_3.10.3/lib/libptcblas.a;/home/hiroki11/env/local/atlas_3.10.3/lib/libatlas.a;/usr/lib64/libpython2.7.so;/home/hiroki11/env/local/boost_1.63.0/lib/libboost_python.so;/home/hiroki11/env/local/nccl_1.3.4/lib/libnccl.so;/usr/lib64/nvidia/libnvidia-ml.so" - IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libcaffe-nv.so.0.16.1" - IMPORTED_SONAME_RELEASE "libcaffe-nv.so.0.16" - ) - -list(APPEND _IMPORT_CHECK_TARGETS caffe ) -list(APPEND _IMPORT_CHECK_FILES_FOR_caffe "${_IMPORT_PREFIX}/lib/libcaffe-nv.so.0.16.1" ) - -# Import target "proto" for configuration "Release" -set_property(TARGET proto APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) -set_target_properties(proto PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" - IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libproto.a" - ) - -list(APPEND _IMPORT_CHECK_TARGETS proto ) -list(APPEND _IMPORT_CHECK_FILES_FOR_proto "${_IMPORT_PREFIX}/lib/libproto.a" ) - -# Commands beyond this point should not need to know the version. -set(CMAKE_IMPORT_FILE_VERSION) diff --git a/share/Caffe/CaffeTargets.cmake b/share/Caffe/CaffeTargets.cmake deleted file mode 100644 index 4723a92a..00000000 --- a/share/Caffe/CaffeTargets.cmake +++ /dev/null @@ -1,86 +0,0 @@ -# Generated by CMake 2.8.12.2 - -if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5) - message(FATAL_ERROR "CMake >= 2.6.0 required") -endif() -cmake_policy(PUSH) -cmake_policy(VERSION 2.6) -#---------------------------------------------------------------- -# Generated CMake target import file. -#---------------------------------------------------------------- - -# Commands may need to know the format version. -set(CMAKE_IMPORT_FILE_VERSION 1) - -# Protect against multiple inclusion, which would fail when already imported targets are added once more. -set(_targetsDefined) -set(_targetsNotDefined) -set(_expectedTargets) -foreach(_expectedTarget caffe proto) - list(APPEND _expectedTargets ${_expectedTarget}) - if(NOT TARGET ${_expectedTarget}) - list(APPEND _targetsNotDefined ${_expectedTarget}) - endif() - if(TARGET ${_expectedTarget}) - list(APPEND _targetsDefined ${_expectedTarget}) - endif() -endforeach() -if("${_targetsDefined}" STREQUAL "${_expectedTargets}") - set(CMAKE_IMPORT_FILE_VERSION) - cmake_policy(POP) - return() -endif() -if(NOT "${_targetsDefined}" STREQUAL "") - message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") -endif() -unset(_targetsDefined) -unset(_targetsNotDefined) -unset(_expectedTargets) - - -# Compute the installation prefix relative to this file. -get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) -get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) -get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) - -# Create imported target caffe -add_library(caffe SHARED IMPORTED) - -# Create imported target proto -add_library(proto STATIC IMPORTED) - -# Load information for each installed configuration. -get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) -file(GLOB CONFIG_FILES "${_DIR}/CaffeTargets-*.cmake") -foreach(f ${CONFIG_FILES}) - include(${f}) -endforeach() - -# Cleanup temporary variables. -set(_IMPORT_PREFIX) - -# Loop over all imported files and verify that they actually exist -foreach(target ${_IMPORT_CHECK_TARGETS} ) - foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) - if(NOT EXISTS "${file}" ) - message(FATAL_ERROR "The imported target \"${target}\" references the file - \"${file}\" -but this file does not exist. Possible reasons include: -* The file was deleted, renamed, or moved to another location. -* An install or uninstall procedure did not complete successfully. -* The installation package was faulty and contained - \"${CMAKE_CURRENT_LIST_FILE}\" -but not all the files it references. -") - endif() - endforeach() - unset(_IMPORT_CHECK_FILES_FOR_${target}) -endforeach() -unset(_IMPORT_CHECK_TARGETS) - -# This file does not depend on other imported targets which have -# been exported from the same project but in a separate export set. - -# Commands beyond this point should not need to know the version. -set(CMAKE_IMPORT_FILE_VERSION) -cmake_policy(POP) diff --git a/tools/extra/README.md b/tools/extra/README.md new file mode 100644 index 00000000..83e314b8 --- /dev/null +++ b/tools/extra/README.md @@ -0,0 +1,5 @@ +gnuplot test_plot +-> create Test accuracy vs. training time Graph + +gnuplot train_plot +-> create Training loss vs. training time Graph diff --git a/tools/extra/alexnet_fp16_test_plot b/tools/extra/alexnet_fp16_test_plot new file mode 100755 index 00000000..05b894d1 --- /dev/null +++ b/tools/extra/alexnet_fp16_test_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "alexnet_fp16_test.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Test accuracy vs. training time" +set xlabel "Training time" +set ylabel "Test accuracy" +plot "alexnet_fp16.log.test" using 1:3 title "alexnet_fp16" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/alexnet_fp16_train_plot b/tools/extra/alexnet_fp16_train_plot new file mode 100755 index 00000000..dc4d45b0 --- /dev/null +++ b/tools/extra/alexnet_fp16_train_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "alexnet_fp16_train.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training iterations" +set ylabel "Training loss" +plot "alexnet_fp16.log.train" using 1:3 title "alexnet_fp16" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/alexnet_test_plot b/tools/extra/alexnet_test_plot new file mode 100755 index 00000000..1402ef26 --- /dev/null +++ b/tools/extra/alexnet_test_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "alexnet_test.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Test accuracy vs. training time" +set xlabel "Training time" +set ylabel "Test accuracy" +plot "alexnet.log.test" using 1:3 title "alexnet" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/alexnet_train_plot b/tools/extra/alexnet_train_plot new file mode 100755 index 00000000..759d9e06 --- /dev/null +++ b/tools/extra/alexnet_train_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "alexnet_train.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training iterations" +set ylabel "Training loss" +plot "alexnet.log.train" using 1:3 title "alexnet" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/basic_plot/test_plot b/tools/extra/basic_plot/test_plot new file mode 100755 index 00000000..6d0298a7 --- /dev/null +++ b/tools/extra/basic_plot/test_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "alexnet_fp16_test.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Test accuracy vs. training time" +set xlabel "Training time" +set ylabel "Test accuracy" +plot "train_alexnet_fp16.out.test" using 1:3 title "alexnet_fp16" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/basic_plot/train_plot b/tools/extra/basic_plot/train_plot new file mode 100755 index 00000000..187f7563 --- /dev/null +++ b/tools/extra/basic_plot/train_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "alexnet_fp16_training.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training iterations" +set ylabel "Training loss" +plot "train_alexnet_fp16.out.train" using 1:3 title "alexnet_fp16" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/googlenet_fp16_test_plot b/tools/extra/googlenet_fp16_test_plot new file mode 100755 index 00000000..7c78009a --- /dev/null +++ b/tools/extra/googlenet_fp16_test_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "googlenet_fp16_test.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Test accuracy vs. training time" +set xlabel "Training time" +set ylabel "Test accuracy" +plot "googlenet_fp16.log.test" using 1:3 title "googlenet" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/googlenet_fp16_train_plot b/tools/extra/googlenet_fp16_train_plot new file mode 100755 index 00000000..c21eed6b --- /dev/null +++ b/tools/extra/googlenet_fp16_train_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "googlenet_fp16_train.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training iterations" +set ylabel "Training loss" +plot "googlenet_fp16.log.train" using 1:3 title "googlenet_fp16" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/googlenet_test_plot b/tools/extra/googlenet_test_plot new file mode 100755 index 00000000..f6655ebf --- /dev/null +++ b/tools/extra/googlenet_test_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "googlenet_test.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Test accuracy vs. training time" +set xlabel "Training time" +set ylabel "Test accuracy" +plot "googlenet.log.test" using 1:3 title "googlenet" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/googlenet_train_plot b/tools/extra/googlenet_train_plot new file mode 100755 index 00000000..ec60fa81 --- /dev/null +++ b/tools/extra/googlenet_train_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "googlenet_train.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training iterations" +set ylabel "Training loss" +plot "googlenet.log.train" using 1:3 title "googlenet" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/img_12h/alexnet_fp16_test.png b/tools/extra/img_12h/alexnet_fp16_test.png new file mode 100644 index 00000000..6645851d Binary files /dev/null and b/tools/extra/img_12h/alexnet_fp16_test.png differ diff --git a/tools/extra/img_12h/alexnet_fp16_train.png b/tools/extra/img_12h/alexnet_fp16_train.png new file mode 100644 index 00000000..d76ccb16 Binary files /dev/null and b/tools/extra/img_12h/alexnet_fp16_train.png differ diff --git a/tools/extra/img_12h/alexnet_test.png b/tools/extra/img_12h/alexnet_test.png new file mode 100644 index 00000000..6743e79b Binary files /dev/null and b/tools/extra/img_12h/alexnet_test.png differ diff --git a/tools/extra/img_12h/alexnet_train.png b/tools/extra/img_12h/alexnet_train.png new file mode 100644 index 00000000..45489490 Binary files /dev/null and b/tools/extra/img_12h/alexnet_train.png differ diff --git a/tools/extra/img_12h/googlenet_fp16_test.png b/tools/extra/img_12h/googlenet_fp16_test.png new file mode 100644 index 00000000..06c45c27 Binary files /dev/null and b/tools/extra/img_12h/googlenet_fp16_test.png differ diff --git a/tools/extra/img_12h/googlenet_fp16_train.png b/tools/extra/img_12h/googlenet_fp16_train.png new file mode 100644 index 00000000..65c1f958 Binary files /dev/null and b/tools/extra/img_12h/googlenet_fp16_train.png differ diff --git a/tools/extra/img_12h/googlenet_test.png b/tools/extra/img_12h/googlenet_test.png new file mode 100644 index 00000000..5b05cfa5 Binary files /dev/null and b/tools/extra/img_12h/googlenet_test.png differ diff --git a/tools/extra/img_12h/googlenet_train.png b/tools/extra/img_12h/googlenet_train.png new file mode 100644 index 00000000..e11326cd Binary files /dev/null and b/tools/extra/img_12h/googlenet_train.png differ diff --git a/tools/extra/img_12h/resnet50_fp16_test.png b/tools/extra/img_12h/resnet50_fp16_test.png new file mode 100644 index 00000000..adb43df9 Binary files /dev/null and b/tools/extra/img_12h/resnet50_fp16_test.png differ diff --git a/tools/extra/img_12h/resnet50_fp16_train.png b/tools/extra/img_12h/resnet50_fp16_train.png new file mode 100644 index 00000000..abacdb0a Binary files /dev/null and b/tools/extra/img_12h/resnet50_fp16_train.png differ diff --git a/tools/extra/img_12h/resnet50_test.png b/tools/extra/img_12h/resnet50_test.png new file mode 100644 index 00000000..424d60d5 Binary files /dev/null and b/tools/extra/img_12h/resnet50_test.png differ diff --git a/tools/extra/img_12h/resnet50_train.png b/tools/extra/img_12h/resnet50_train.png new file mode 100644 index 00000000..75cf8062 Binary files /dev/null and b/tools/extra/img_12h/resnet50_train.png differ diff --git a/tools/extra/img_48h/alexnet_fp16_test.png b/tools/extra/img_48h/alexnet_fp16_test.png new file mode 100644 index 00000000..4042750f Binary files /dev/null and b/tools/extra/img_48h/alexnet_fp16_test.png differ diff --git a/tools/extra/img_48h/alexnet_fp16_train.png b/tools/extra/img_48h/alexnet_fp16_train.png new file mode 100644 index 00000000..e977c187 Binary files /dev/null and b/tools/extra/img_48h/alexnet_fp16_train.png differ diff --git a/tools/extra/img_48h/alexnet_test.png b/tools/extra/img_48h/alexnet_test.png new file mode 100644 index 00000000..01fc5fc4 Binary files /dev/null and b/tools/extra/img_48h/alexnet_test.png differ diff --git a/tools/extra/img_48h/alexnet_train.png b/tools/extra/img_48h/alexnet_train.png new file mode 100644 index 00000000..b5363c57 Binary files /dev/null and b/tools/extra/img_48h/alexnet_train.png differ diff --git a/tools/extra/img_48h/googlenet_test.png b/tools/extra/img_48h/googlenet_test.png new file mode 100644 index 00000000..012ff247 Binary files /dev/null and b/tools/extra/img_48h/googlenet_test.png differ diff --git a/tools/extra/img_48h/googlenet_train.png b/tools/extra/img_48h/googlenet_train.png new file mode 100644 index 00000000..7c0d99f7 Binary files /dev/null and b/tools/extra/img_48h/googlenet_train.png differ diff --git a/tools/extra/img_48h/resnet50_fp16_test.png b/tools/extra/img_48h/resnet50_fp16_test.png new file mode 100644 index 00000000..3ac77557 Binary files /dev/null and b/tools/extra/img_48h/resnet50_fp16_test.png differ diff --git a/tools/extra/img_48h/resnet50_fp16_train.png b/tools/extra/img_48h/resnet50_fp16_train.png new file mode 100644 index 00000000..af4d80ad Binary files /dev/null and b/tools/extra/img_48h/resnet50_fp16_train.png differ diff --git a/tools/extra/img_48h/resnet50_test.png b/tools/extra/img_48h/resnet50_test.png new file mode 100644 index 00000000..1c679d9f Binary files /dev/null and b/tools/extra/img_48h/resnet50_test.png differ diff --git a/tools/extra/img_48h/resnet50_train.png b/tools/extra/img_48h/resnet50_train.png new file mode 100644 index 00000000..df7d301b Binary files /dev/null and b/tools/extra/img_48h/resnet50_train.png differ diff --git a/tools/extra/resnet50_fp16_test_plot b/tools/extra/resnet50_fp16_test_plot new file mode 100755 index 00000000..b24d005a --- /dev/null +++ b/tools/extra/resnet50_fp16_test_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "resnet50_fp16_test.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Test accuracy vs. training time" +set xlabel "Training time" +set ylabel "Test accuracy" +plot "resnet50_fp16.log.test" using 1:3 title "resnet50_fp16" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/resnet50_fp16_train_plot b/tools/extra/resnet50_fp16_train_plot new file mode 100755 index 00000000..ce85c260 --- /dev/null +++ b/tools/extra/resnet50_fp16_train_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "resnet50_fp16_train.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training iterations" +set ylabel "Training loss" +plot "resnet50_fp16.log.train" using 1:3 title "resnet50_fp16" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/resnet50_test_plot b/tools/extra/resnet50_test_plot new file mode 100755 index 00000000..b8cc998d --- /dev/null +++ b/tools/extra/resnet50_test_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "resnet50_test.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Test accuracy vs. training time" +set xlabel "Training time" +set ylabel "Test accuracy" +plot "resnet50.log.test" using 1:3 title "resnet50" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/tools/extra/resnet50_train_plot b/tools/extra/resnet50_train_plot new file mode 100755 index 00000000..096dfb6f --- /dev/null +++ b/tools/extra/resnet50_train_plot @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/tools/extra/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "resnet50_train.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training iterations" +set ylabel "Training loss" +plot "resnet50.log.train" using 1:3 title "resnet50" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist"