diff --git a/Bing/LICENSE b/Bing/LICENSE
new file mode 100644
index 000000000..7f8edefa4
--- /dev/null
+++ b/Bing/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2014, Ming-Ming Cheng & Shuai Zheng
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the {organization} nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Bing/README.md b/Bing/README.md
new file mode 100644
index 000000000..7118cf0e6
--- /dev/null
+++ b/Bing/README.md
@@ -0,0 +1,63 @@
+Objectness Proposal Generator with BING
+==========
+BING Objectness proposal estimator Linux Ubuntu 14.04/Mac OSX Yosemite/iOS 8.1 version implementation,
+runs at 1000 FPS at a Dell 7600 workstation with Linux Ubuntu 14.04.
+## INSTALL
+To make this program running fast, you need to enable release mode:
+> cmake -DCMAKE_BUILD_TYPE=Release ../
+
+## DEMO
+If you run this in Ubuntu 14.04 or other similar Linux system, feel free to
+change the path in main.cpp
+> DataSetVOC voc2007("YOUR_PATH_TO_THE_VOC2007DATA");
+
+Notice that WinRecall.m is generated by this code, you do not need them. If you
+want to make this code working with other datasets, one simple solution is to
+make the folders similar to the VOC 2007 one.
+
+## Introduction
+This is the 1000 FPS BING objectness linux version library for efficient
+objectness proposal estimator,We would appreciate if you could cite and refer to
+the papers below.
+```
+@inproceedings{BingObj2014,
+ title={{BING}: Binarized Normed Gradients for Objectness Estimation at 300fps},
+ author={Ming-Ming Cheng and Ziming Zhang and Wen-Yan Lin and Philip H. S. Torr},
+ booktitle={IEEE CVPR},
+ year={2014},
+}
+```
+```
+@inproceedings{depthobjectproposals_GCPR2015,
+author = {Shuai Zheng and Victor Adrian Prisacariu and Melinos Averkiou and Ming-Ming Cheng and Niloy J. Mitra and Jamie Shotton and Philip H. S. Torr and Carsten Rother},
+title = {Object Proposal Estimation in Depth Images using Compact 3D Shape Manifolds},
+booktitle = {German Conference on Pattern Recognition (GCPR)},
+year = {2015}
+}
+```
+The original author Ming-Ming Cheng has already released the source code for
+windows 64-bit platform. In this library, we intend to release the code for the
+linux/mac/iOS users. You can maintain the code with Qt Creator IDE.
+
+Please find the original windows code / FAQ / Paper from this link:
+http://mmcheng.net/bing/
+
+## FAQ
+In order to make the code running as the original version in windows, you need
+to download the images/annotations PASCAL VOC 2007 data from the website.
+(http://pascallin.ecs.soton.ac.uk/challenges/VOC/voc2007/#testdata)
+
+We have tested the code, it produces the same accuracy results as the original windows
+version, while it runs at 1111 FPS(frame per second) at Ubuntu 12.04 with a Dell T7600
+workstation computer, which has two Intel Xeon E5-2687W (3.1GHz, 1600MHz) and 64 GB
+1600MHz DDR3 Memory.
+
+## Author Info
+Author: Ming-Ming Cheng removethisifyouarehuman-cmm.thu@gmail.com
+Linux Author: Shuai Zheng (Kyle) removethisifyouarehuman-szhengcvpr@gmail.com
+Please find more information from http://kylezheng.org/objectproposal/
+Date: 19, February
+
+## License
+BSD license.
+
diff --git a/Bing/Src/CMakeLists.txt b/Bing/Src/CMakeLists.txt
new file mode 100644
index 000000000..9c1af52de
--- /dev/null
+++ b/Bing/Src/CMakeLists.txt
@@ -0,0 +1,30 @@
+project(BING_linux)
+cmake_minimum_required(VERSION 2.8)
+
+find_package(OpenMP REQUIRED)
+
+# compile LibLinear
+include_directories("LibLinear")
+file(GLOB SOURCES "LibLinear/*.cpp" "LibLinear/blas/*.c")
+add_library(LibLinear STATIC ${SOURCES})
+
+#OPENCV
+#include_directories(/usr/local/include)
+#link_directories(/usr/local/lib)
+##if this does not work, then try to uncomment the things below.
+find_package( OpenCV REQUIRED )
+if(OpenCV_FOUND)
+ include_directories( ${OpenCV_INCLUDE_DIRS} )
+endif( OpenCV_FOUND )
+list( APPEND CMAKE_CXX_FLAGS "-std=c++0x ${CMAKE_CXX_FLAGS} -fopenmp -ftree-vectorize")
+#list( APPEND CMAKE_CXX_FLAGS "-std=c++0x ${CMAKE_CXX_FLAGS} -fopenmp -ftest-coverage -fprofile-arcs")
+
+# compile BING
+file(GLOB SOURCES "*.cpp")
+add_library(BING STATIC ${SOURCES})
+
+add_executable(${PROJECT_NAME} main.cpp)
+
+set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wall")
+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wall")
+target_link_libraries(${PROJECT_NAME} opencv_core opencv_imgproc opencv_highgui opencv_imgcodecs ${EXTERNAL_LIBS} BING LibLinear)
diff --git a/Bing/Src/CMakeLists.txt.user b/Bing/Src/CMakeLists.txt.user
new file mode 100644
index 000000000..685b8403b
--- /dev/null
+++ b/Bing/Src/CMakeLists.txt.user
@@ -0,0 +1,176 @@
+
+
+
+
+
+ ProjectExplorer.Project.ActiveTarget
+ 0
+
+
+ ProjectExplorer.Project.EditorSettings
+
+ true
+ false
+ true
+
+ Cpp
+
+ CppGlobal
+
+
+
+ QmlJS
+
+ QmlJSGlobal
+
+
+ 2
+ System
+ false
+ 4
+ false
+ true
+ 1
+ true
+ 0
+ true
+ 0
+ 8
+ true
+ 1
+ true
+ true
+ true
+ false
+
+
+
+ ProjectExplorer.Project.PluginSettings
+
+
+
+ ProjectExplorer.Project.Target.0
+
+ Desktop
+
+ CMakeProjectManager.DefaultCMakeTarget
+ 0
+ 0
+ 0
+
+ /home/bittnt/BING/BING_beta3/build
+ ProjectExplorer.ToolChain.Gcc:{966d53c9-fe6c-4bac-8571-0e5a33bebf05}
+ ProjectExplorer.ToolChain.Gcc:{966d53c9-fe6c-4bac-8571-0e5a33bebf05}
+
+
+
+
+ false
+ true
+ Make
+
+ CMakeProjectManager.MakeStep
+
+ 1
+ Build
+
+ ProjectExplorer.BuildSteps.Build
+
+
+
+ clean
+
+ true
+ true
+ Make
+
+ CMakeProjectManager.MakeStep
+
+ 1
+ Clean
+
+ ProjectExplorer.BuildSteps.Clean
+
+ 2
+ false
+
+ all
+
+ CMakeProjectManager.CMakeBuildConfiguration
+
+ 1
+
+
+ 0
+ Deploy
+
+ ProjectExplorer.BuildSteps.Deploy
+
+ 1
+ No deployment
+
+ ProjectExplorer.DefaultDeployConfiguration
+
+ 1
+
+ true
+
+ false
+ false
+ false
+ false
+ true
+ 0.01
+ 10
+ true
+ 25
+
+ true
+ /usr/bin/valgrind
+
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+
+ 2
+ BING_linux
+
+ false
+
+
+ BING_linux
+
+ CMakeProjectManager.CMakeRunConfiguration.
+ 3768
+ true
+ false
+ true
+
+ 1
+
+
+
+ ProjectExplorer.Project.TargetCount
+ 1
+
+
+ ProjectExplorer.Project.Updater.EnvironmentId
+ {1c480e06-4a57-40ca-8868-29ff19bb1281}
+
+
+ ProjectExplorer.Project.Updater.FileVersion
+ 11
+
+
diff --git a/Bing/Src/CmFile.cpp b/Bing/Src/CmFile.cpp
new file mode 100644
index 000000000..804c2ec3b
--- /dev/null
+++ b/Bing/Src/CmFile.cpp
@@ -0,0 +1,173 @@
+#include "kyheader.h"
+
+
+#ifdef _WIN32
+#include
+#include
+#include
+#include
+#else
+#include
+#include
+#include
+#include
+#endif
+
+
+// Get image names from a wildcard. Eg: GetNames("D:\\*.jpg", imgNames);
+int CmFile::GetNames(CStr &_nameW, vecS &_names)
+{
+ string _dir = GetFolder(_nameW);
+ _names.clear();
+
+ DIR *dir;
+ struct dirent *ent;
+ if((dir = opendir(_dir.c_str()))!=NULL){
+ //print all the files and directories within directory
+ while((ent = readdir(dir))!=NULL){
+ if(ent->d_name[0] == '.')
+ continue;
+ if(ent->d_type ==4)
+ continue;
+ _names.push_back(ent->d_name);
+ }
+ closedir(dir);
+ } else {
+ perror("");
+ return EXIT_FAILURE;
+ }
+ return (int)_names.size();
+}
+int CmFile::GetNames(CStr &_nameW, vecS &_names, string &_dir)
+{
+ _dir = GetFolder(_nameW);
+ _names.clear();
+
+ DIR *dir;
+ struct dirent *ent;
+ if((dir = opendir(_dir.c_str()))!=NULL){
+ //print all the files and directories within directory
+ while((ent = readdir(dir))!=NULL){
+ if(ent->d_name[0] == '.')
+ continue;
+ if(ent->d_type ==4)
+ continue;
+ _names.push_back(ent->d_name);
+ }
+ closedir(dir);
+ } else {
+ perror("");
+ return EXIT_FAILURE;
+ }
+ return (int)_names.size();
+}
+int CmFile::GetSubFolders(CStr &folder, vecS &subFolders)
+{
+ subFolders.clear();
+ string nameWC = GetFolder(folder);//folder + "/*";
+
+ DIR *dir;
+ struct dirent *ent;
+ if((dir = opendir(nameWC.c_str()))!=NULL){
+ while((ent = readdir(dir))!=NULL){
+ if(ent->d_name[0] == '.')
+ continue;
+ if(ent->d_type == 4){
+ subFolders.push_back(ent->d_name);
+ }
+ }
+ closedir(dir);
+ } else {
+ perror("");
+ return EXIT_FAILURE;
+ }
+ return (int)subFolders.size();
+}
+int CmFile::GetNames(CStr& rootFolder, CStr &fileW, vecS &names)
+{
+ GetNames(rootFolder + fileW, names);
+ vecS subFolders, tmpNames;
+ int subNum = CmFile::GetSubFolders(rootFolder, subFolders);//
+ for (int i = 0; i < subNum; i++){
+ subFolders[i] += "/";
+ int subNum = GetNames(rootFolder + subFolders[i], fileW, tmpNames);
+ for (int j = 0; j < subNum; j++)
+ names.push_back(subFolders[i] + tmpNames[j]);
+ }
+ return (int)names.size();
+}
+int CmFile::GetNamesNE(CStr& nameWC, vecS &names)
+{
+ string dir = string();
+ string ext = string();
+ int fNum = GetNames(nameWC, names, dir);
+ ext = GetExtention(nameWC);
+ for (int i = 0; i < fNum; i++)
+ names[i] = GetNameNE(names[i]);
+ return fNum;
+}
+int CmFile::GetNamesNE(CStr& nameWC, vecS &names, string &dir, string &ext)
+{
+ int fNum = GetNames(nameWC, names, dir);
+ ext = GetExtention(nameWC);
+ for (int i = 0; i < fNum; i++)
+ names[i] = GetNameNE(names[i]);
+ return fNum;
+}
+int CmFile::GetNamesNE(CStr& rootFolder, CStr &fileW, vecS &names)
+{
+ int fNum = GetNames(rootFolder, fileW, names);
+ int extS = GetExtention(fileW).size();
+ for (int i = 0; i < fNum; i++)
+ names[i].resize(names[i].size() - extS);
+ return fNum;
+}
+bool CmFile::MkDir(CStr &_path)
+{
+ if(_path.size() == 0)
+ return false;
+ static char buffer[1024];
+ strcpy(buffer, _S(_path));
+#ifdef _WIN32
+ for (int i = 0; buffer[i] != 0; i ++) {
+ if (buffer[i] == '\\' || buffer[i] == '/') {
+ buffer[i] = '\0';
+ CreateDirectoryA(buffer, 0);
+ buffer[i] = '/';
+ }
+ }
+ return CreateDirectoryA(_S(_path), 0);
+#else
+ for (int i = 0; buffer[i] != 0; i ++) {
+ if (buffer[i] == '\\' || buffer[i] == '/') {
+ buffer[i] = '\0';
+ mkdir(buffer, 0755);
+ buffer[i] = '/';
+ }
+ }
+ return mkdir(_S(_path), 0755);
+#endif
+}
+vecS CmFile::loadStrList(CStr &fName)
+{
+ ifstream fIn(fName);
+ string line;
+ vecS strs;
+ while(getline(fIn, line) && line.size()){
+ unsigned sz = line.size();
+ line.resize(sz - 1); //Please use script to convert the VOC format data into the OpenCV format data
+ //line.resize(sz);
+ strs.push_back(line);
+ }
+ return strs;
+}
+bool CmFile::writeStrList(CStr &fName, const vecS &strs)
+{
+ FILE *f = fopen(_S(fName), "w");
+ if (f == NULL)
+ return false;
+ for (size_t i = 0; i < strs.size(); i++)
+ fprintf(f, "%s\n", _S(strs[i]));
+ fclose(f);
+ return true;
+}
diff --git a/Bing/Src/CmFile.h b/Bing/Src/CmFile.h
new file mode 100644
index 000000000..ca6330fd2
--- /dev/null
+++ b/Bing/Src/CmFile.h
@@ -0,0 +1,75 @@
+#pragma once
+#ifdef _WIN32
+#include
+#else
+#include
+#include
+#include
+#include
+#endif
+struct CmFile
+{
+ static inline string GetFolder(CStr& path);
+ static inline string GetName(CStr& path);
+ static inline string GetNameNE(CStr& path);
+ static inline string GetPathNE(CStr& path);
+
+ // Get file names from a wildcard. Eg: GetNames("D:\\*.jpg", imgNames);
+ static int GetNames(CStr &nameW, vecS &names, string &_dir);
+ static int GetNames(CStr &nameW, vecS &names);
+ static int GetNames(CStr& rootFolder, CStr &fileW, vecS &names);
+ static int GetNamesNE(CStr& nameWC, vecS &names, string &dir, string &ext);
+ static int GetNamesNE(CStr& nameWC, vecS &names);
+ static int GetNamesNE(CStr& rootFolder, CStr &fileW, vecS &names);
+ static inline string GetExtention(CStr name);
+
+ static int GetSubFolders(CStr& folder, vecS& subFolders);
+
+ static inline string GetWkDir();
+
+ static bool MkDir(CStr& path);
+ static vecS loadStrList(CStr &fName);
+ static bool writeStrList(CStr &fName, const vecS &strs);
+};
+
+/************************************************************************/
+/* Implementation of inline functions */
+/************************************************************************/
+string CmFile::GetFolder(CStr& path)
+{
+ return path.substr(0, path.find_last_of("\\/")+1);
+}
+
+string CmFile::GetName(CStr& path)
+{
+ int start = path.find_last_of("\\/")+1;
+ int end = path.find_last_not_of(' ')+1;
+ return path.substr(start, end - start);
+}
+
+string CmFile::GetNameNE(CStr& path)
+{
+ int start = path.find_last_of("\\/")+1;
+ int end = path.find_last_of('.');
+ if (end >= 0)
+ return path.substr(start, end - start);
+ else
+ return path.substr(start, path.find_last_not_of(' ')+1 - start);
+}
+
+string CmFile::GetPathNE(CStr& path)
+{
+ int end = path.find_last_of('.');
+ if (end >= 0)
+ return path.substr(0, end);
+ else
+ return path.substr(0, path.find_last_not_of(' ') + 1);
+}
+
+string CmFile::GetExtention(CStr name)
+{
+ return name.substr(name.find_last_of('.'));
+}
+/************************************************************************/
+/* Implementations */
+/************************************************************************/
diff --git a/Bing/Src/CmShow.cpp b/Bing/Src/CmShow.cpp
new file mode 100644
index 000000000..7c7c593b0
--- /dev/null
+++ b/Bing/Src/CmShow.cpp
@@ -0,0 +1,82 @@
+#include "kyheader.h"
+#include "CmShow.h"
+
+
+
+typedef pair CostiIdx;
+Mat CmShow::HistBins(CMat& color3f, CMat& val, CStr& title, bool descendShow, CMat &with)
+{
+ // Prepare data
+ int H = 300, spaceH = 6, barH = 10, n = color3f.cols;
+ CV_Assert(color3f.size() == val.size() && color3f.rows == 1);
+ Mat binVal1i, binColor3b, width1i;
+ if (with.size() == val.size())
+ with.convertTo(width1i, CV_32S, 400/sum(with).val[0]); // Default shown width
+ else
+ width1i = Mat(1, n, CV_32S, Scalar(10)); // Default bin width = 10
+ int W = cvRound(sum(width1i).val[0]);
+ color3f.convertTo(binColor3b, CV_8UC3, 255);
+ double maxVal, minVal;
+ minMaxLoc(val, &minVal, &maxVal);
+ printf("%g\n", H/max(maxVal, -minVal));
+ val.convertTo(binVal1i, CV_32S, 20000);
+ Size szShow(W, H + spaceH + barH);
+ szShow.height += minVal < 0 && !descendShow ? H + spaceH : 0;
+ Mat showImg3b(szShow, CV_8UC3, Scalar(255, 255, 255));
+ int* binH = (int*)(binVal1i.data);
+ Vec3b* binColor = (Vec3b*)(binColor3b.data);
+ int* binW = (int*)(width1i.data);
+ vector costIdx(n);
+ if (descendShow){
+ for (int i = 0; i < n; i++)
+ costIdx[i] = make_pair(binH[i], i);
+ sort(costIdx.begin(), costIdx.end(), std::greater());
+ }
+
+ // Show image
+ for (int i = 0, x = 0; i < n; i++){
+ int idx = descendShow ? costIdx[i].second : i;
+ int h = descendShow ? abs(binH[idx]) : binH[idx];
+ Scalar color(binColor[idx]);
+ Rect reg(x, H + spaceH, binW[idx], barH);
+ showImg3b(reg) = color; // Draw bar
+ rectangle(showImg3b, reg, Scalar(0));
+
+ reg.height = abs(h);
+ reg.y = h >= 0 ? H - h : H + 2 * spaceH + barH;
+ showImg3b(reg) = color;
+ rectangle(showImg3b, reg, Scalar(0));
+
+ x += binW[idx];
+ }
+ imshow(title, showImg3b);
+ return showImg3b;
+}
+
+void CmShow::showTinyMat(CStr &title, CMat &m)
+{
+ int scale = 50, sz = m.rows * m.cols;
+ while (sz > 200){
+ scale /= 2;
+ sz /= 4;
+ }
+
+ Mat img;
+ resize(m, img, Size(), scale, scale, CV_INTER_NN);
+ if (img.channels() == 3)
+ cvtColor(img, img, CV_RGB2BGR);
+ SaveShow(img, title);
+}
+
+void CmShow::SaveShow(CMat& img, CStr& title)
+{
+ if (title.size() == 0)
+ return;
+
+ int mDepth = CV_MAT_DEPTH(img.type());
+ double scale = (mDepth == CV_32F || mDepth == CV_64F ? 255 : 1);
+ if (title.size() > 4 && title[title.size() - 4] == '.')
+ imwrite(title, img*scale);
+ else if (title.size())
+ imshow(title, img);
+}
diff --git a/Bing/Src/CmShow.h b/Bing/Src/CmShow.h
new file mode 100644
index 000000000..11d99a046
--- /dev/null
+++ b/Bing/Src/CmShow.h
@@ -0,0 +1,9 @@
+#pragma once
+class CmShow
+{
+public:
+ static Mat HistBins(CMat& color3f, CMat& val, CStr& title, bool descendShow = false, CMat &with = Mat());
+ static void showTinyMat(CStr &title, CMat &m);
+ static inline void SaveShow(CMat& img, CStr& title);
+};
+
diff --git a/Bing/Src/CmTimer.h b/Bing/Src/CmTimer.h
new file mode 100644
index 000000000..7883567f9
--- /dev/null
+++ b/Bing/Src/CmTimer.h
@@ -0,0 +1,89 @@
+#pragma once
+#include
+class CmTimer
+{
+public:
+ CmTimer(CStr t):title(t) { is_started = false; gettimeofday(&start_clock,NULL); gettimeofday(&end_clock,NULL); n_starts = 0; }
+
+ ~CmTimer(){ if (is_started) printf("CmTimer '%s' is started and is being destroyed.\n", title.c_str()); }
+
+ inline void Start();
+ inline void Stop();
+ inline void Reset();
+
+ inline bool Report();
+ inline bool StopAndReport() { Stop(); return Report(); }
+ inline float TimeInSeconds();
+
+private:
+ CStr title;
+
+ bool is_started;
+ struct timeval start_clock, end_clock;
+ //clock_t start_clock;
+ //clock_t cumulative_clock;
+ unsigned int n_starts;
+};
+
+/************************************************************************/
+/* Implementations */
+/************************************************************************/
+
+void CmTimer::Start()
+{
+ if (is_started){
+ printf("CmTimer '%s' is already started. Nothing done.\n", title.c_str());
+ return;
+ }
+
+ is_started = true;
+ n_starts++;
+ //start_clock = clock();
+ gettimeofday(&start_clock,NULL);
+}
+
+void CmTimer::Stop()
+{
+ if (!is_started){
+ printf("CmTimer '%s' is started. Nothing done\n", title.c_str());
+ return;
+ }
+ gettimeofday(&end_clock,NULL);
+ //cumulative_clock += clock() - start_clock;
+ is_started = false;
+}
+
+void CmTimer::Reset()
+{
+ if (is_started) {
+ printf("CmTimer '%s'is started during reset request.\n Only reset cumulative time.\n");
+ return;
+ }
+ gettimeofday(&start_clock,NULL);
+ gettimeofday(&end_clock,NULL);
+ //cumulative_clock = 0;
+}
+
+bool CmTimer::Report()
+{
+ if (is_started){
+ printf("CmTimer '%s' is started.\n Cannot provide a time report.", title.c_str());
+ return false;
+ }
+
+ float timeUsed = TimeInSeconds();
+ printf("[%s] CumuTime: %gs, #run: %d, AvgTime: %gs\n", title.c_str(), timeUsed, n_starts, timeUsed/n_starts);
+ return true;
+}
+
+float CmTimer::TimeInSeconds()
+{
+ if (is_started){
+ printf("CmTimer '%s' is started. Nothing done\n", title.c_str());
+ return 0;
+ }
+ return double((end_clock.tv_sec - start_clock.tv_sec) * 1000000u +
+ end_clock.tv_usec - start_clock.tv_usec) / 1.e6;
+ //return float(cumulative_clock) / CLOCKS_PER_SEC;
+}
+
diff --git a/Bing/Src/DataSetVOC.cpp b/Bing/Src/DataSetVOC.cpp
new file mode 100644
index 000000000..bb99a7de0
--- /dev/null
+++ b/Bing/Src/DataSetVOC.cpp
@@ -0,0 +1,224 @@
+#include "kyheader.h"
+#include "DataSetVOC.h"
+
+
+DataSetVOC::DataSetVOC(CStr &_wkDir)
+{
+ wkDir = _wkDir;
+ resDir = wkDir + "Results/";
+ localDir = wkDir + "Local/";
+ imgPathW = wkDir + "JPEGImages/%s.jpg";
+ annoPathW = wkDir + "Annotations/%s.yml";
+ CmFile::MkDir(resDir);
+ CmFile::MkDir(localDir);
+
+ trainSet = CmFile::loadStrList(wkDir + "ImageSets/Main/train.txt");
+ testSet = CmFile::loadStrList(wkDir + "ImageSets/Main/test.txt");
+ classNames = CmFile::loadStrList(wkDir + "ImageSets/Main/class.txt");
+
+ // testSet.insert(testSet.end(), trainSet.begin(), trainSet.end());
+ // testSet.resize(min(1000, (int)testSet.size()));
+
+ trainNum = trainSet.size();
+ testNum = testSet.size();
+}
+
+
+Vec4i getMaskRange(CMat &mask1u, int ext = 0)
+{
+ int maxX = INT_MIN, maxY = INT_MIN, minX = INT_MAX, minY = INT_MAX, rows = mask1u.rows, cols = mask1u.cols;
+ for (int r = 0; r < rows; r++) {
+ const byte* data = mask1u.ptr(r);
+ for (int c = 0; c < cols; c++)
+ if (data[c] > 10) {
+ maxX = max(maxX, c);
+ minX = min(minX, c);
+ maxY = max(maxY, r);
+ minY = min(minY, r);
+ }
+ }
+
+ maxX = maxX + ext + 1 < cols ? maxX + ext + 1 : cols;
+ maxY = maxY + ext + 1 < rows ? maxY + ext + 1 : rows;
+ minX = minX - ext > 0 ? minX - ext : 0;
+ minY = minY - ext > 0 ? minY - ext : 0;
+
+ return Vec4i(minX + 1, minY + 1, maxX, maxY); // Rect(minX, minY, maxX - minX, maxY - minY);
+}
+
+
+DataSetVOC::~DataSetVOC(void)
+{
+}
+
+void DataSetVOC::loadAnnotations()
+{
+ gtTrainBoxes.resize(trainNum);
+ gtTrainClsIdx.resize(trainNum);
+ for (int i = 0; i < trainNum; i++)
+ if (!loadBBoxes(trainSet[i], gtTrainBoxes[i], gtTrainClsIdx[i]))
+ return;
+
+ gtTestBoxes.resize(testNum);
+ gtTestClsIdx.resize(testNum);
+ for (int i = 0; i < testNum; i++)
+ if(!loadBBoxes(testSet[i], gtTestBoxes[i], gtTestClsIdx[i]))
+ return;
+ printf("Load annotations finished\n");
+}
+
+void DataSetVOC::loadDataGenericOverCls()
+{
+ vecS allSet = trainSet;
+ allSet.insert(allSet.end(), testSet.begin(), testSet.end());
+ int imgN = (int)allSet.size();
+ trainSet.clear(), testSet.clear();
+ trainSet.reserve(imgN), testSet.reserve(imgN);
+ vector> gtBoxes(imgN);
+ vector gtClsIdx(imgN);
+ for (int i = 0; i < imgN; i++){
+ if (!loadBBoxes(allSet[i], gtBoxes[i], gtClsIdx[i]))
+ return;
+ vector trainBoxes, testBoxes;
+ vecI trainIdx, testIdx;
+ for (size_t j = 0; j < gtBoxes[i].size(); j++)
+ if (gtClsIdx[i][j] < 6){
+ trainBoxes.push_back(gtBoxes[i][j]);
+ trainIdx.push_back(gtClsIdx[i][j]);
+ }
+ else{
+ testBoxes.push_back(gtBoxes[i][j]);
+ testIdx.push_back(gtClsIdx[i][j]);
+ }
+ if (trainBoxes.size()){
+ trainSet.push_back(allSet[i]);
+ gtTrainBoxes.push_back(trainBoxes);
+ gtTrainClsIdx.push_back(trainIdx);
+ }
+ else{
+ testSet.push_back(allSet[i]);
+ gtTestBoxes.push_back(testBoxes);
+ gtTestClsIdx.push_back(testIdx);
+ }
+ }
+ trainNum = trainSet.size();
+ testNum = testSet.size();
+ printf("Load annotations (generic over classes) finished\n");
+}
+
+void DataSetVOC::loadBox(const FileNode &fn, vector &boxes, vecI &clsIdx){
+ string isDifficult;
+ fn["difficult"]>>isDifficult;
+ if (isDifficult == "1")
+ return;
+
+ string strXmin, strYmin, strXmax, strYmax;
+ fn["bndbox"]["xmin"] >> strXmin;
+ fn["bndbox"]["ymin"] >> strYmin;
+ fn["bndbox"]["xmax"] >> strXmax;
+ fn["bndbox"]["ymax"] >> strYmax;
+ boxes.push_back(Vec4i(atoi(_S(strXmin)), atoi(_S(strYmin)), atoi(_S(strXmax)), atoi(_S(strYmax))));
+
+ string clsName;
+ fn["name"]>>clsName;
+ clsIdx.push_back(findFromList(clsName, classNames));
+ CV_Assert_(clsIdx[clsIdx.size() - 1] >= 0, ("Invalidate class name\n"));
+}
+
+bool DataSetVOC::loadBBoxes(CStr &nameNE, vector &boxes, vecI &clsIdx)
+{
+ string fName = format(_S(annoPathW), _S(nameNE));
+ FileStorage fs(fName, FileStorage::READ);
+ FileNode fn = fs["annotation"]["object"];
+ boxes.clear();
+ clsIdx.clear();
+ if (fn.isSeq()){
+ for (FileNodeIterator it = fn.begin(), it_end = fn.end(); it != it_end; it++){
+ loadBox(*it, boxes, clsIdx);
+ }
+ }
+ else
+ loadBox(fn, boxes, clsIdx);
+ return true;
+}
+
+// Needs to call yml.m in this solution before running this function.
+bool DataSetVOC::cvt2OpenCVYml(CStr &annoDir)
+{
+ vecS namesNE;
+ int imgNum = CmFile::GetNamesNE(annoDir + "*.yaml", namesNE);
+ printf("Converting annotations to OpenCV yml format:\n");
+ for (int i = 0; i < imgNum; i++){
+ printf("%d/%d %s.yaml\r", i, imgNum, _S(namesNE[i]));
+ string fPath = annoDir + namesNE[i];
+ cvt2OpenCVYml(fPath + ".yaml", fPath + ".yml");
+ }
+ return true;
+}
+
+// Needs to call yml.m in this solution before running this function.
+bool DataSetVOC::cvt2OpenCVYml(CStr &yamlName, CStr &ymlName)
+{
+ ifstream f(yamlName);
+ FILE *fO = fopen(_S(ymlName), "w");
+ if (!f.is_open() && fO == NULL)
+ return false;
+ fprintf(fO, "%s\n", "%YAML:1.0\n");
+ string line;
+
+ int addIdent = 0;
+ while(getline(f, line)){
+ if (line.substr(0, 12) == " filename: ")
+ line = " filename: \"" + line.substr(12) + "\"";
+ int tmp = line.find_first_of('-');
+ if (tmp != string::npos){
+ bool allSpace = true;
+ for (int k = 0; k < tmp; k++)
+ if (line[k] != ' ')
+ allSpace = false;
+ if (allSpace)
+ addIdent = tmp;
+ }
+ for (int k = 0; k < addIdent; k++)
+ fprintf(fO, " ");
+ fprintf(fO, "%s\n", _S(line));
+ }
+ fclose(fO);
+
+ FileStorage fs(ymlName, FileStorage::READ);
+ string tmp;
+ fs["annotation"]["folder"]>>tmp;
+ return true;
+}
+
+
+// Get training and testing for demonstrating the generative of the objectness over classes
+void DataSetVOC::getTrainTest()
+{
+ const int TRAIN_CLS_NUM = 6;
+ string trainCls[TRAIN_CLS_NUM] = {"bird", "car", "cat", "cow", "dog", "sheep"};
+
+}
+
+void DataSetVOC::getXmlStrVOC(CStr &fName, string &buf)
+{
+ ifstream fin(fName);
+ string strLine;
+ buf.clear();
+ buf.reserve(100000);
+ buf += "\n\n";
+ while (getline(fin, strLine) && strLine.size()) {
+ int startP = strLine.find_first_of(">") + 1;
+ int endP = strLine.find_last_of("<");
+ if (endP > startP){
+ string val = keepXmlChar(strLine.substr(startP, endP - startP));
+ if (val.size() < endP - startP)
+ strLine = strLine.substr(0, startP) + val + strLine.substr(endP);
+ }
+ buf += strLine + "\n";
+ }
+ buf += "\n";
+ //FileStorage fs(buf, FileStorage::READ + FileStorage::MEMORY);
+ ofstream fout("D:/t.xml");
+ fout<< buf;
+}
diff --git a/Bing/Src/DataSetVOC.h b/Bing/Src/DataSetVOC.h
new file mode 100644
index 000000000..4c1ec38af
--- /dev/null
+++ b/Bing/Src/DataSetVOC.h
@@ -0,0 +1,71 @@
+#pragma once
+
+struct DataSetVOC
+{
+ DataSetVOC(CStr &wkDir);
+ ~DataSetVOC(void);
+
+ // Organization structure data for the dataset
+ string wkDir; // Root working directory, all other directories are relative to this one
+ string resDir, localDir; // Directory for saving results and local data
+ string imgPathW, annoPathW; // Image and annotation path
+
+ // Information for training and testing
+ int trainNum, testNum;
+ vecS trainSet, testSet; // File names (NE) for training and testing images
+ vecS classNames; // Object class names
+ vector> gtTrainBoxes, gtTestBoxes; // Ground truth bounding boxes for training and testing images
+ vector gtTrainClsIdx, gtTestClsIdx; // Object class indexes
+
+
+ // Load annotations
+ void loadAnnotations();
+
+ static bool cvt2OpenCVYml(CStr &annoDir); // Needs to call yml.m in this solution before running this function.
+
+ static inline double interUnio(const Vec4i &box1, const Vec4i &box2);
+
+ // Get training and testing for demonstrating the generative of the objectness over classes
+ void getTrainTest();
+
+public: // Used for testing the ability of generic over classes
+ void loadDataGenericOverCls();
+
+private:
+ void loadBox(const FileNode &fn, vector &boxes, vecI &clsIdx);
+ bool loadBBoxes(CStr &nameNE, vector &boxes, vecI &clsIdx);
+ static void getXmlStrVOC(CStr &fName, string &buf);
+ static inline string keepXmlChar(CStr &str);
+ static bool cvt2OpenCVYml(CStr &yamlName, CStr &ymlName); // Needs to call yml.m in this solution before running this function.
+};
+
+string DataSetVOC::keepXmlChar(CStr &_str)
+{
+ string str = _str;
+ int sz = (int)str.size(), count = 0;
+ for (int i = 0; i < sz; i++){
+ char c = str[i];
+ if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == ' ' || c == '.')
+ str[count++] = str[i];
+ }
+ str.resize(count);
+ return str;
+}
+
+double DataSetVOC::interUnio(const Vec4i &bb, const Vec4i &bbgt)
+{
+ int bi[4];
+ bi[0] = max(bb[0], bbgt[0]);
+ bi[1] = max(bb[1], bbgt[1]);
+ bi[2] = min(bb[2], bbgt[2]);
+ bi[3] = min(bb[3], bbgt[3]);
+
+ double iw = bi[2] - bi[0] + 1;
+ double ih = bi[3] - bi[1] + 1;
+ double ov = 0;
+ if (iw>0 && ih>0){
+ double ua = (bb[2]-bb[0]+1)*(bb[3]-bb[1]+1)+(bbgt[2]-bbgt[0]+1)*(bbgt[3]-bbgt[1]+1)-iw*ih;
+ ov = iw*ih/ua;
+ }
+ return ov;
+}
diff --git a/Bing/Src/FilterTIG.cpp b/Bing/Src/FilterTIG.cpp
new file mode 100644
index 000000000..8643011e7
--- /dev/null
+++ b/Bing/Src/FilterTIG.cpp
@@ -0,0 +1,78 @@
+#include "kyheader.h"
+#include "FilterTIG.h"
+#include "CmShow.h"
+
+
+void FilterTIG::update(CMat &w1f){
+ CV_Assert(w1f.cols * w1f.rows == D && w1f.type() == CV_32F && w1f.isContinuous());
+ float b[D], residuals[D];
+ memcpy(residuals, w1f.data, sizeof(float)*D);
+ for (int i = 0; i < NUM_COMP; i++){
+ float avg = 0;
+ for (int j = 0; j < D; j++){
+ b[j] = residuals[j] >= 0.0f ? 1.0f : -1.0f;
+ avg += residuals[j] * b[j];
+ }
+ avg /= D;
+ _coeffs1[i] = avg, _coeffs2[i] = avg*2, _coeffs4[i] = avg*4, _coeffs8[i] = avg*8;
+ for (int j = 0; j < D; j++)
+ residuals[j] -= avg*b[j];
+ UINT64 tig = 0;
+ for (int j = 0; j < D; j++)
+ tig = (tig << 1) | (b[j] > 0 ? 1 : 0);
+ _bTIGs[i] = tig;
+ }
+}
+
+void FilterTIG::reconstruct(Mat &w1f){
+ w1f = Mat::zeros(8, 8, CV_32F);
+ float *weight = (float*)w1f.data;
+ for (int i = 0; i < NUM_COMP; i++){
+ UINT64 tig = _bTIGs[i];
+ for (int j = 0; j < D; j++)
+ weight[j] += _coeffs1[i] * (((tig >> (63-j)) & 1) ? 1 : -1);
+ }
+}
+
+// For a W by H gradient magnitude map, find a W-7 by H-7 CV_32F matching score map
+// Please refer to my paper for definition of the variables used in this function
+Mat FilterTIG::matchTemplate(const Mat &mag1u){
+ const int H = mag1u.rows, W = mag1u.cols;
+ const Size sz(W+1, H+1); // Expand original size to avoid dealing with boundary conditions
+ Mat_ Tig1 = Mat_::zeros(sz), Tig2 = Mat_::zeros(sz);
+ Mat_ Tig4 = Mat_::zeros(sz), Tig8 = Mat_::zeros(sz);
+ Mat_ Row1 = Mat_::zeros(sz), Row2 = Mat_::zeros(sz);
+ Mat_ Row4 = Mat_::zeros(sz), Row8 = Mat_::zeros(sz);
+ Mat_ scores(sz);
+ for(int y = 1; y <= H; y++){
+ const byte* G = mag1u.ptr(y-1);
+ INT64* T1 = Tig1.ptr(y); // Binary TIG of current row
+ INT64* T2 = Tig2.ptr(y);
+ INT64* T4 = Tig4.ptr(y);
+ INT64* T8 = Tig8.ptr(y);
+ INT64* Tu1 = Tig1.ptr(y-1); // Binary TIG of upper row
+ INT64* Tu2 = Tig2.ptr(y-1);
+ INT64* Tu4 = Tig4.ptr(y-1);
+ INT64* Tu8 = Tig8.ptr(y-1);
+ byte* R1 = Row1.ptr(y);
+ byte* R2 = Row2.ptr(y);
+ byte* R4 = Row4.ptr(y);
+ byte* R8 = Row8.ptr(y);
+ float *s = scores.ptr(y);
+ for (int x = 1; x <= W; x++) {
+ byte g = G[x-1];
+ R1[x] = (R1[x-1] << 1) | ((g >> 4) & 1);
+ R2[x] = (R2[x-1] << 1) | ((g >> 5) & 1);
+ R4[x] = (R4[x-1] << 1) | ((g >> 6) & 1);
+ R8[x] = (R8[x-1] << 1) | ((g >> 7) & 1);
+ T1[x] = (Tu1[x] << 8) | R1[x];
+ T2[x] = (Tu2[x] << 8) | R2[x];
+ T4[x] = (Tu4[x] << 8) | R4[x];
+ T8[x] = (Tu8[x] << 8) | R8[x];
+ s[x] = dot(T1[x], T2[x], T4[x], T8[x]);
+ }
+ }
+ Mat matchCost1f;
+ scores(Rect(8, 8, W-7, H-7)).copyTo(matchCost1f);
+ return matchCost1f;
+}
diff --git a/Bing/Src/FilterTIG.h b/Bing/Src/FilterTIG.h
new file mode 100644
index 000000000..d8b3a2a7f
--- /dev/null
+++ b/Bing/Src/FilterTIG.h
@@ -0,0 +1,45 @@
+#pragma once
+
+class FilterTIG
+{
+public:
+ void update(CMat &w);
+
+ // For a W by H gradient magnitude map, find a W-7 by H-7 CV_32F matching score map
+ Mat matchTemplate(const Mat &mag1u);
+
+ inline float dot(const INT64 tig1, const INT64 tig2, const INT64 tig4, const INT64 tig8);
+
+public:
+ void reconstruct(Mat &w); // For illustration purpose
+
+private:
+ static const int NUM_COMP = 2; // Number of components
+ static const int D = 64; // Dimension of TIG
+ INT64 _bTIGs[NUM_COMP]; // Binary TIG features
+ float _coeffs1[NUM_COMP]; // Coefficients of binary TIG features
+
+ // For efficiently deals with different bits in CV_8U gradient map
+ float _coeffs2[NUM_COMP], _coeffs4[NUM_COMP], _coeffs8[NUM_COMP];
+};
+
+
+inline float FilterTIG::dot(const INT64 tig1, const INT64 tig2, const INT64 tig4, const INT64 tig8)
+{
+ INT64 bcT1 = __builtin_popcountll(tig1);
+ INT64 bcT2 = __builtin_popcountll(tig2);
+ INT64 bcT4 = __builtin_popcountll(tig4);
+ INT64 bcT8 = __builtin_popcountll(tig8);
+
+ INT64 bc01 = (__builtin_popcountll(_bTIGs[0] & tig1) << 1) - bcT1;
+ INT64 bc02 = ((__builtin_popcountll(_bTIGs[0] & tig2) << 1) - bcT2) << 1;
+ INT64 bc04 = ((__builtin_popcountll(_bTIGs[0] & tig4) << 1) - bcT4) << 2;
+ INT64 bc08 = ((__builtin_popcountll(_bTIGs[0] & tig8) << 1) - bcT8) << 3;
+
+ INT64 bc11 = (__builtin_popcountll(_bTIGs[1] & tig1) << 1) - bcT1;
+ INT64 bc12 = ((__builtin_popcountll(_bTIGs[1] & tig2) << 1) - bcT2) << 1;
+ INT64 bc14 = ((__builtin_popcountll(_bTIGs[1] & tig4) << 1) - bcT4) << 2;
+ INT64 bc18 = ((__builtin_popcountll(_bTIGs[1] & tig8) << 1) - bcT8) << 3;
+
+ return _coeffs1[0] * (bc01 + bc02 + bc04 + bc08) + _coeffs1[1] * (bc11 + bc12 + bc14 + bc18);
+}
diff --git a/Bing/Src/ImgContrastBB.h b/Bing/Src/ImgContrastBB.h
new file mode 100644
index 000000000..71c5fb5e9
--- /dev/null
+++ b/Bing/Src/ImgContrastBB.h
@@ -0,0 +1,62 @@
+#pragma once
+
+struct ImgContrastBB
+{
+ ImgContrastBB(CStr &imgPath);
+ ImgContrastBB(CMat &img3u);
+
+ inline float contrastVal(Vec4i ¢er);
+ inline int regSum(Vec4i &box, Vec3i &sumColor); // Return region size and sum color
+
+private:
+ Mat iImg;
+ int _w, _h;
+ inline void assertBBox(Vec4i ¢er, CStr &name);
+};
+
+ImgContrastBB::ImgContrastBB(CStr &imgPath)
+{
+ Mat img3u = imread(imgPath);
+ integral(img3u, iImg, CV_32SC3);
+ _w = img3u.cols;
+ _h = img3u.rows;
+}
+
+ImgContrastBB::ImgContrastBB(CMat &img3u)
+{
+ integral(img3u, iImg, CV_32SC3);
+ _w = img3u.cols;
+ _h = img3u.rows;
+}
+
+int ImgContrastBB::regSum(Vec4i &box, Vec3i &sumColor)
+{
+ int x1 = box[0] - 1, y1 = box[1] - 1, x2 = box[2] - 1, y2 = box[3] - 1;
+ sumColor = iImg.at(y2, x2) + iImg.at(y1, x1) - iImg.at(y1, x2) - iImg.at(y2, x1);
+ return (x2 - x1)*(y2 - y1);
+}
+
+
+float ImgContrastBB::contrastVal(Vec4i ¢er)
+{
+ int wd = (center[2] - center[0])/2, hd = (center[3] - center[1])/2;
+ Vec4i surround(max(center[0] - wd, 1), max(center[1] - hd, 1), min(center[2] + wd, _w), min(center[3] + hd, _h));
+ Vec3i cColr, sColr;
+
+ assertBBox(center, "Center");
+ assertBBox(center, "Surround");
+ int cSz = regSum(center, cColr);
+ int sSz = regSum(surround, sColr);
+
+ sColr -= cColr;
+ sSz -= cSz;
+ sColr /= sSz;
+ cColr /= cSz;
+ return sqrtf((float)(sqr(sColr[0] - cColr[0]) + sqr(sColr[1] - cColr[1]) + sqr(sColr[2] - cColr[2])))/100.0f;
+}
+
+void ImgContrastBB::assertBBox(Vec4i ¢er, CStr &name)
+{
+ if (center[0] < 1 || center[1] < 1 || center[2] > _w || center[3] > _h)
+ printf("%s: (%d, %d, %d, %d), (%d, %d)\n", _S(name), center[0], center[1], center[2], center[3], _w, _h);
+}
\ No newline at end of file
diff --git a/Bing/Src/LibLinear/LibLinear.vcxproj b/Bing/Src/LibLinear/LibLinear.vcxproj
new file mode 100644
index 000000000..62d1e650e
--- /dev/null
+++ b/Bing/Src/LibLinear/LibLinear.vcxproj
@@ -0,0 +1,94 @@
+
+
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ {86266F16-8B7E-4666-A12F-96E351579ADA}
+ Win32Proj
+ LibLinear
+
+
+
+ StaticLibrary
+ true
+ MultiByte
+ v110
+
+
+ StaticLibrary
+ false
+ true
+ MultiByte
+ v110
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Level3
+ Disabled
+ WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+
+
+
+
+ Level3
+
+
+ MaxSpeed
+ true
+ true
+ WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Bing/Src/LibLinear/README.1.93.txt b/Bing/Src/LibLinear/README.1.93.txt
new file mode 100644
index 000000000..3a659e080
--- /dev/null
+++ b/Bing/Src/LibLinear/README.1.93.txt
@@ -0,0 +1,531 @@
+LIBLINEAR is a simple package for solving large-scale regularized linear
+classification and regression. It currently supports
+- L2-regularized logistic regression/L2-loss support vector classification/L1-loss support vector classification
+- L1-regularized L2-loss support vector classification/L1-regularized logistic regression
+- L2-regularized L2-loss support vector regression/L1-loss support vector regression.
+This document explains the usage of LIBLINEAR.
+
+To get started, please read the ``Quick Start'' section first.
+For developers, please check the ``Library Usage'' section to learn
+how to integrate LIBLINEAR in your software.
+
+Table of Contents
+=================
+
+- When to use LIBLINEAR but not LIBSVM
+- Quick Start
+- Installation
+- `train' Usage
+- `predict' Usage
+- Examples
+- Library Usage
+- Building Windows Binaries
+- Additional Information
+- MATLAB/OCTAVE interface
+- PYTHON interface
+
+When to use LIBLINEAR but not LIBSVM
+====================================
+
+There are some large data for which with/without nonlinear mappings
+gives similar performances. Without using kernels, one can
+efficiently train a much larger set via linear classification/regression.
+These data usually have a large number of features. Document classification
+is an example.
+
+Warning: While generally liblinear is very fast, its default solver
+may be slow under certain situations (e.g., data not scaled or C is
+large). See Appendix B of our SVM guide about how to handle such
+cases.
+http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf
+
+Warning: If you are a beginner and your data sets are not large, you
+should consider LIBSVM first.
+
+LIBSVM page:
+http://www.csie.ntu.edu.tw/~cjlin/libsvm
+
+
+Quick Start
+===========
+
+See the section ``Installation'' for installing LIBLINEAR.
+
+After installation, there are programs `train' and `predict' for
+training and testing, respectively.
+
+About the data format, please check the README file of LIBSVM. Note
+that feature index must start from 1 (but not 0).
+
+A sample classification data included in this package is `heart_scale'.
+
+Type `train heart_scale', and the program will read the training
+data and output the model file `heart_scale.model'. If you have a test
+set called heart_scale.t, then type `predict heart_scale.t
+heart_scale.model output' to see the prediction accuracy. The `output'
+file contains the predicted class labels.
+
+For more information about `train' and `predict', see the sections
+`train' Usage and `predict' Usage.
+
+To obtain good performances, sometimes one needs to scale the
+data. Please check the program `svm-scale' of LIBSVM. For large and
+sparse data, use `-l 0' to keep the sparsity.
+
+Installation
+============
+
+On Unix systems, type `make' to build the `train' and `predict'
+programs. Run them without arguments to show the usages.
+
+On other systems, consult `Makefile' to build them (e.g., see
+'Building Windows binaries' in this file) or use the pre-built
+binaries (Windows binaries are in the directory `windows').
+
+This software uses some level-1 BLAS subroutines. The needed functions are
+included in this package. If a BLAS library is available on your
+machine, you may use it by modifying the Makefile: Unmark the following line
+
+ #LIBS ?= -lblas
+
+and mark
+
+ LIBS ?= blas/blas.a
+
+`train' Usage
+=============
+
+Usage: train [options] training_set_file [model_file]
+options:
+-s type : set type of solver (default 1)
+ for multi-class classification
+ 0 -- L2-regularized logistic regression (primal)
+ 1 -- L2-regularized L2-loss support vector classification (dual)
+ 2 -- L2-regularized L2-loss support vector classification (primal)
+ 3 -- L2-regularized L1-loss support vector classification (dual)
+ 4 -- support vector classification by Crammer and Singer
+ 5 -- L1-regularized L2-loss support vector classification
+ 6 -- L1-regularized logistic regression
+ 7 -- L2-regularized logistic regression (dual)
+ for regression
+ 11 -- L2-regularized L2-loss support vector regression (primal)
+ 12 -- L2-regularized L2-loss support vector regression (dual)
+ 13 -- L2-regularized L1-loss support vector regression (dual)
+-c cost : set the parameter C (default 1)
+-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
+-e epsilon : set tolerance of termination criterion
+ -s 0 and 2
+ |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,
+ where f is the primal function and pos/neg are # of
+ positive/negative data (default 0.01)
+ -s 11
+ |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001)
+ -s 1, 3, 4 and 7
+ Dual maximal violation <= eps; similar to libsvm (default 0.1)
+ -s 5 and 6
+ |f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf,
+ where f is the primal function (default 0.01)
+ -s 12 and 13\n"
+ |f'(alpha)|_1 <= eps |f'(alpha0)|,
+ where f is the dual function (default 0.1)
+-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)
+-wi weight: weights adjust the parameter C of different classes (see README for details)
+-v n: n-fold cross validation mode
+-q : quiet mode (no outputs)
+
+Option -v randomly splits the data into n parts and calculates cross
+validation accuracy on them.
+
+Formulations:
+
+For L2-regularized logistic regression (-s 0), we solve
+
+min_w w^Tw/2 + C \sum log(1 + exp(-y_i w^Tx_i))
+
+For L2-regularized L2-loss SVC dual (-s 1), we solve
+
+min_alpha 0.5(alpha^T (Q + I/2/C) alpha) - e^T alpha
+ s.t. 0 <= alpha_i,
+
+For L2-regularized L2-loss SVC (-s 2), we solve
+
+min_w w^Tw/2 + C \sum max(0, 1- y_i w^Tx_i)^2
+
+For L2-regularized L1-loss SVC dual (-s 3), we solve
+
+min_alpha 0.5(alpha^T Q alpha) - e^T alpha
+ s.t. 0 <= alpha_i <= C,
+
+For L1-regularized L2-loss SVC (-s 5), we solve
+
+min_w \sum |w_j| + C \sum max(0, 1- y_i w^Tx_i)^2
+
+For L1-regularized logistic regression (-s 6), we solve
+
+min_w \sum |w_j| + C \sum log(1 + exp(-y_i w^Tx_i))
+
+For L2-regularized logistic regression (-s 7), we solve
+
+min_alpha 0.5(alpha^T Q alpha) + \sum alpha_i*log(alpha_i) + \sum (C-alpha_i)*log(C-alpha_i) - a constant
+ s.t. 0 <= alpha_i <= C,
+
+where
+
+Q is a matrix with Q_ij = y_i y_j x_i^T x_j.
+
+For L2-regularized L2-loss SVR (-s 11), we solve
+
+min_w w^Tw/2 + C \sum max(0, |y_i-w^Tx_i|-epsilon)^2
+
+For L2-regularized L2-loss SVR dual (-s 12), we solve
+
+min_beta 0.5(beta^T (Q + lambda I/2/C) beta) - y^T beta + \sum |beta_i|
+
+For L2-regularized L1-loss SVR dual (-s 13), we solve
+
+min_beta 0.5(beta^T Q beta) - y^T beta + \sum |beta_i|
+ s.t. -C <= beta_i <= C,
+
+where
+
+Q is a matrix with Q_ij = x_i^T x_j.
+
+If bias >= 0, w becomes [w; w_{n+1}] and x becomes [x; bias].
+
+The primal-dual relationship implies that -s 1 and -s 2 give the same
+model, -s 0 and -s 7 give the same, and -s 11 and -s 12 give the same.
+
+We implement 1-vs-the rest multi-class strategy for classification.
+In training i vs. non_i, their C parameters are (weight from -wi)*C
+and C, respectively. If there are only two classes, we train only one
+model. Thus weight1*C vs. weight2*C is used. See examples below.
+
+We also implement multi-class SVM by Crammer and Singer (-s 4):
+
+min_{w_m, \xi_i} 0.5 \sum_m ||w_m||^2 + C \sum_i \xi_i
+ s.t. w^T_{y_i} x_i - w^T_m x_i >= \e^m_i - \xi_i \forall m,i
+
+where e^m_i = 0 if y_i = m,
+ e^m_i = 1 if y_i != m,
+
+Here we solve the dual problem:
+
+min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
+ s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i
+
+where w_m(\alpha) = \sum_i \alpha^m_i x_i,
+and C^m_i = C if m = y_i,
+ C^m_i = 0 if m != y_i.
+
+`predict' Usage
+===============
+
+Usage: predict [options] test_file model_file output_file
+options:
+-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only
+-q : quiet mode (no outputs)
+
+Note that -b is only needed in the prediction phase. This is different
+from the setting of LIBSVM.
+
+Examples
+========
+
+> train data_file
+
+Train linear SVM with L2-loss function.
+
+> train -s 0 data_file
+
+Train a logistic regression model.
+
+> train -v 5 -e 0.001 data_file
+
+Do five-fold cross-validation using L2-loss svm.
+Use a smaller stopping tolerance 0.001 than the default
+0.1 if you want more accurate solutions.
+
+> train -c 10 -w1 2 -w2 5 -w3 2 four_class_data_file
+
+Train four classifiers:
+positive negative Cp Cn
+class 1 class 2,3,4. 20 10
+class 2 class 1,3,4. 50 10
+class 3 class 1,2,4. 20 10
+class 4 class 1,2,3. 10 10
+
+> train -c 10 -w3 1 -w2 5 two_class_data_file
+
+If there are only two classes, we train ONE model.
+The C values for the two classes are 10 and 50.
+
+> predict -b 1 test_file data_file.model output_file
+
+Output probability estimates (for logistic regression only).
+
+Library Usage
+=============
+
+- Function: model* train(const struct problem *prob,
+ const struct parameter *param);
+
+ This function constructs and returns a linear classification
+ or regression model according to the given training data and
+ parameters.
+
+ struct problem describes the problem:
+
+ struct problem
+ {
+ int l, n;
+ int *y;
+ struct feature_node **x;
+ double bias;
+ };
+
+ where `l' is the number of training data. If bias >= 0, we assume
+ that one additional feature is added to the end of each data
+ instance. `n' is the number of feature (including the bias feature
+ if bias >= 0). `y' is an array containing the target values. (integers
+ in classification, real numbers in regression) And `x' is an array
+ of pointers, each of which points to a sparse representation (array
+ of feature_node) of one training vector.
+
+ For example, if we have the following training data:
+
+ LABEL ATTR1 ATTR2 ATTR3 ATTR4 ATTR5
+ ----- ----- ----- ----- ----- -----
+ 1 0 0.1 0.2 0 0
+ 2 0 0.1 0.3 -1.2 0
+ 1 0.4 0 0 0 0
+ 2 0 0.1 0 1.4 0.5
+ 3 -0.1 -0.2 0.1 1.1 0.1
+
+ and bias = 1, then the components of problem are:
+
+ l = 5
+ n = 6
+
+ y -> 1 2 1 2 3
+
+ x -> [ ] -> (2,0.1) (3,0.2) (6,1) (-1,?)
+ [ ] -> (2,0.1) (3,0.3) (4,-1.2) (6,1) (-1,?)
+ [ ] -> (1,0.4) (6,1) (-1,?)
+ [ ] -> (2,0.1) (4,1.4) (5,0.5) (6,1) (-1,?)
+ [ ] -> (1,-0.1) (2,-0.2) (3,0.1) (4,1.1) (5,0.1) (6,1) (-1,?)
+
+ struct parameter describes the parameters of a linear classification
+ or regression model:
+
+ struct parameter
+ {
+ int solver_type;
+
+ /* these are for training only */
+ double eps; /* stopping criteria */
+ double C;
+ int nr_weight;
+ int *weight_label;
+ double* weight;
+ double p;
+ };
+
+ solver_type can be one of L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL.
+ for classification
+ L2R_LR L2-regularized logistic regression (primal)
+ L2R_L2LOSS_SVC_DUAL L2-regularized L2-loss support vector classification (dual)
+ L2R_L2LOSS_SVC L2-regularized L2-loss support vector classification (primal)
+ L2R_L1LOSS_SVC_DUAL L2-regularized L1-loss support vector classification (dual)
+ MCSVM_CS support vector classification by Crammer and Singer
+ L1R_L2LOSS_SVC L1-regularized L2-loss support vector classification
+ L1R_LR L1-regularized logistic regression
+ L2R_LR_DUAL L2-regularized logistic regression (dual)
+ for regression
+ L2R_L2LOSS_SVR L2-regularized L2-loss support vector regression (primal)
+ L2R_L2LOSS_SVR_DUAL L2-regularized L2-loss support vector regression (dual)
+ L2R_L1LOSS_SVR_DUAL L2-regularized L1-loss support vector regression (dual)
+
+ C is the cost of constraints violation.
+ p is the sensitiveness of loss of support vector regression.
+ eps is the stopping criterion.
+
+ nr_weight, weight_label, and weight are used to change the penalty
+ for some classes (If the weight for a class is not changed, it is
+ set to 1). This is useful for training classifier using unbalanced
+ input data or with asymmetric misclassification cost.
+
+ nr_weight is the number of elements in the array weight_label and
+ weight. Each weight[i] corresponds to weight_label[i], meaning that
+ the penalty of class weight_label[i] is scaled by a factor of weight[i].
+
+ If you do not want to change penalty for any of the classes,
+ just set nr_weight to 0.
+
+ *NOTE* To avoid wrong parameters, check_parameter() should be
+ called before train().
+
+ struct model stores the model obtained from the training procedure:
+
+ struct model
+ {
+ struct parameter param;
+ int nr_class; /* number of classes */
+ int nr_feature;
+ double *w;
+ int *label; /* label of each class */
+ double bias;
+ };
+
+ param describes the parameters used to obtain the model.
+
+ nr_class and nr_feature are the number of classes and features,
+ respectively. nr_class = 2 for regression.
+
+ The nr_feature*nr_class array w gives feature weights. We use one
+ against the rest for multi-class classification, so each feature
+ index corresponds to nr_class weight values. Weights are
+ organized in the following way
+
+ +------------------+------------------+------------+
+ | nr_class weights | nr_class weights | ...
+ | for 1st feature | for 2nd feature |
+ +------------------+------------------+------------+
+
+ If bias >= 0, x becomes [x; bias]. The number of features is
+ increased by one, so w is a (nr_feature+1)*nr_class array. The
+ value of bias is stored in the variable bias.
+
+ The array label stores class labels.
+
+- Function: void cross_validation(const problem *prob, const parameter *param, int nr_fold, double *target);
+
+ This function conducts cross validation. Data are separated to
+ nr_fold folds. Under given parameters, sequentially each fold is
+ validated using the model from training the remaining. Predicted
+ labels in the validation process are stored in the array called
+ target.
+
+ The format of prob is same as that for train().
+
+- Function: double predict(const model *model_, const feature_node *x);
+
+ For a classification model, the predicted class for x is returned.
+ For a regression model, the function value of x calculated using
+ the model is returned.
+
+- Function: double predict_values(const struct model *model_,
+ const struct feature_node *x, double* dec_values);
+
+ This function gives nr_w decision values in the array dec_values.
+ nr_w=1 if regression is applied or the number of classes is two. An exception is
+ multi-class svm by Crammer and Singer (-s 4), where nr_w = 2 if there are two classes. For all other situations, nr_w is the
+ number of classes.
+
+ We implement one-vs-the rest multi-class strategy (-s 0,1,2,3,5,6,7)
+ and multi-class svm by Crammer and Singer (-s 4) for multi-class SVM.
+ The class with the highest decision value is returned.
+
+- Function: double predict_probability(const struct model *model_,
+ const struct feature_node *x, double* prob_estimates);
+
+ This function gives nr_class probability estimates in the array
+ prob_estimates. nr_class can be obtained from the function
+ get_nr_class. The class with the highest probability is
+ returned. Currently, we support only the probability outputs of
+ logistic regression.
+
+- Function: int get_nr_feature(const model *model_);
+
+ The function gives the number of attributes of the model.
+
+- Function: int get_nr_class(const model *model_);
+
+ The function gives the number of classes of the model.
+ For a regression model, 2 is returned.
+
+- Function: void get_labels(const model *model_, int* label);
+
+ This function outputs the name of labels into an array called label.
+ For a regression model, label is unchanged.
+
+- Function: const char *check_parameter(const struct problem *prob,
+ const struct parameter *param);
+
+ This function checks whether the parameters are within the feasible
+ range of the problem. This function should be called before calling
+ train() and cross_validation(). It returns NULL if the
+ parameters are feasible, otherwise an error message is returned.
+
+- Function: int save_model(const char *model_file_name,
+ const struct model *model_);
+
+ This function saves a model to a file; returns 0 on success, or -1
+ if an error occurs.
+
+- Function: struct model *load_model(const char *model_file_name);
+
+ This function returns a pointer to the model read from the file,
+ or a null pointer if the model could not be loaded.
+
+- Function: void free_model_content(struct model *model_ptr);
+
+ This function frees the memory used by the entries in a model structure.
+
+- Function: void free_and_destroy_model(struct model **model_ptr_ptr);
+
+ This function frees the memory used by a model and destroys the model
+ structure.
+
+- Function: void destroy_param(struct parameter *param);
+
+ This function frees the memory used by a parameter set.
+
+- Function: void set_print_string_function(void (*print_func)(const char *));
+
+ Users can specify their output format by a function. Use
+ set_print_string_function(NULL);
+ for default printing to stdout.
+
+Building Windows Binaries
+=========================
+
+Windows binaries are in the directory `windows'. To build them via
+Visual C++, use the following steps:
+
+1. Open a dos command box and change to liblinear directory. If
+environment variables of VC++ have not been set, type
+
+"C:\Program Files\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat"
+
+You may have to modify the above command according which version of
+VC++ or where it is installed.
+
+2. Type
+
+nmake -f Makefile.win clean all
+
+
+MATLAB/OCTAVE Interface
+=======================
+
+Please check the file README in the directory `matlab'.
+
+PYTHON Interface
+================
+
+Please check the file README in the directory `python'.
+
+Additional Information
+======================
+
+If you find LIBLINEAR helpful, please cite it as
+
+R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
+LIBLINEAR: A Library for Large Linear Classification, Journal of
+Machine Learning Research 9(2008), 1871-1874. Software available at
+http://www.csie.ntu.edu.tw/~cjlin/liblinear
+
+For any questions and comments, please send your email to
+cjlin@csie.ntu.edu.tw
+
+
diff --git a/Bing/Src/LibLinear/blas/Makefile b/Bing/Src/LibLinear/blas/Makefile
new file mode 100644
index 000000000..895fd244c
--- /dev/null
+++ b/Bing/Src/LibLinear/blas/Makefile
@@ -0,0 +1,22 @@
+AR = ar rcv
+RANLIB = ranlib
+
+HEADERS = blas.h blasp.h
+FILES = dnrm2.o daxpy.o ddot.o dscal.o
+
+CFLAGS = $(OPTFLAGS)
+FFLAGS = $(OPTFLAGS)
+
+blas: $(FILES) $(HEADERS)
+ $(AR) blas.a $(FILES)
+ $(RANLIB) blas.a
+
+clean:
+ - rm -f *.o
+ - rm -f *.a
+ - rm -f *~
+
+.c.o:
+ $(CC) $(CFLAGS) -c $*.c
+
+
diff --git a/Bing/Src/LibLinear/blas/blas.h b/Bing/Src/LibLinear/blas/blas.h
new file mode 100644
index 000000000..558893a04
--- /dev/null
+++ b/Bing/Src/LibLinear/blas/blas.h
@@ -0,0 +1,25 @@
+/* blas.h -- C header file for BLAS Ver 1.0 */
+/* Jesse Bennett March 23, 2000 */
+
+/** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed."
+
+ - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */
+
+#ifndef BLAS_INCLUDE
+#define BLAS_INCLUDE
+
+/* Data types specific to BLAS implementation */
+typedef struct { float r, i; } fcomplex;
+typedef struct { double r, i; } dcomplex;
+typedef int blasbool;
+
+#include "blasp.h" /* Prototypes for all BLAS functions */
+
+#define FALSE 0
+#define TRUE 1
+
+/* Macro functions */
+#define MIN(a,b) ((a) <= (b) ? (a) : (b))
+#define MAX(a,b) ((a) >= (b) ? (a) : (b))
+
+#endif
diff --git a/Bing/Src/LibLinear/blas/blasp.h b/Bing/Src/LibLinear/blas/blasp.h
new file mode 100644
index 000000000..745836db8
--- /dev/null
+++ b/Bing/Src/LibLinear/blas/blasp.h
@@ -0,0 +1,430 @@
+/* blasp.h -- C prototypes for BLAS Ver 1.0 */
+/* Jesse Bennett March 23, 2000 */
+
+/* Functions listed in alphabetical order */
+
+#ifdef F2C_COMPAT
+
+void cdotc_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
+ fcomplex *cy, int *incy);
+
+void cdotu_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
+ fcomplex *cy, int *incy);
+
+double sasum_(int *n, float *sx, int *incx);
+
+double scasum_(int *n, fcomplex *cx, int *incx);
+
+double scnrm2_(int *n, fcomplex *x, int *incx);
+
+double sdot_(int *n, float *sx, int *incx, float *sy, int *incy);
+
+double snrm2_(int *n, float *x, int *incx);
+
+void zdotc_(dcomplex *dotval, int *n, dcomplex *cx, int *incx,
+ dcomplex *cy, int *incy);
+
+void zdotu_(dcomplex *dotval, int *n, dcomplex *cx, int *incx,
+ dcomplex *cy, int *incy);
+
+#else
+
+fcomplex cdotc_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
+
+fcomplex cdotu_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
+
+float sasum_(int *n, float *sx, int *incx);
+
+float scasum_(int *n, fcomplex *cx, int *incx);
+
+float scnrm2_(int *n, fcomplex *x, int *incx);
+
+float sdot_(int *n, float *sx, int *incx, float *sy, int *incy);
+
+float snrm2_(int *n, float *x, int *incx);
+
+dcomplex zdotc_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
+
+dcomplex zdotu_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
+
+#endif
+
+/* Remaining functions listed in alphabetical order */
+
+int caxpy_(int *n, fcomplex *ca, fcomplex *cx, int *incx, fcomplex *cy,
+ int *incy);
+
+int ccopy_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
+
+int cgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
+ fcomplex *alpha, fcomplex *a, int *lda, fcomplex *x, int *incx,
+ fcomplex *beta, fcomplex *y, int *incy);
+
+int cgemm_(char *transa, char *transb, int *m, int *n, int *k,
+ fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, int *ldb,
+ fcomplex *beta, fcomplex *c, int *ldc);
+
+int cgemv_(char *trans, int *m, int *n, fcomplex *alpha, fcomplex *a,
+ int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y,
+ int *incy);
+
+int cgerc_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx,
+ fcomplex *y, int *incy, fcomplex *a, int *lda);
+
+int cgeru_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx,
+ fcomplex *y, int *incy, fcomplex *a, int *lda);
+
+int chbmv_(char *uplo, int *n, int *k, fcomplex *alpha, fcomplex *a,
+ int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y,
+ int *incy);
+
+int chemm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha,
+ fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
+ fcomplex *c, int *ldc);
+
+int chemv_(char *uplo, int *n, fcomplex *alpha, fcomplex *a, int *lda,
+ fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, int *incy);
+
+int cher_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx,
+ fcomplex *a, int *lda);
+
+int cher2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx,
+ fcomplex *y, int *incy, fcomplex *a, int *lda);
+
+int cher2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
+ fcomplex *a, int *lda, fcomplex *b, int *ldb, float *beta,
+ fcomplex *c, int *ldc);
+
+int cherk_(char *uplo, char *trans, int *n, int *k, float *alpha,
+ fcomplex *a, int *lda, float *beta, fcomplex *c, int *ldc);
+
+int chpmv_(char *uplo, int *n, fcomplex *alpha, fcomplex *ap, fcomplex *x,
+ int *incx, fcomplex *beta, fcomplex *y, int *incy);
+
+int chpr_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx,
+ fcomplex *ap);
+
+int chpr2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx,
+ fcomplex *y, int *incy, fcomplex *ap);
+
+int crotg_(fcomplex *ca, fcomplex *cb, float *c, fcomplex *s);
+
+int cscal_(int *n, fcomplex *ca, fcomplex *cx, int *incx);
+
+int csscal_(int *n, float *sa, fcomplex *cx, int *incx);
+
+int cswap_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
+
+int csymm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha,
+ fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
+ fcomplex *c, int *ldc);
+
+int csyr2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
+ fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
+ fcomplex *c, int *ldc);
+
+int csyrk_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
+ fcomplex *a, int *lda, fcomplex *beta, fcomplex *c, int *ldc);
+
+int ctbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ fcomplex *a, int *lda, fcomplex *x, int *incx);
+
+int ctbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ fcomplex *a, int *lda, fcomplex *x, int *incx);
+
+int ctpmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap,
+ fcomplex *x, int *incx);
+
+int ctpsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap,
+ fcomplex *x, int *incx);
+
+int ctrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b,
+ int *ldb);
+
+int ctrmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a,
+ int *lda, fcomplex *x, int *incx);
+
+int ctrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b,
+ int *ldb);
+
+int ctrsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a,
+ int *lda, fcomplex *x, int *incx);
+
+int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
+ int *incy);
+
+int dcopy_(int *n, double *sx, int *incx, double *sy, int *incy);
+
+int dgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
+ double *alpha, double *a, int *lda, double *x, int *incx,
+ double *beta, double *y, int *incy);
+
+int dgemm_(char *transa, char *transb, int *m, int *n, int *k,
+ double *alpha, double *a, int *lda, double *b, int *ldb,
+ double *beta, double *c, int *ldc);
+
+int dgemv_(char *trans, int *m, int *n, double *alpha, double *a,
+ int *lda, double *x, int *incx, double *beta, double *y,
+ int *incy);
+
+int dger_(int *m, int *n, double *alpha, double *x, int *incx,
+ double *y, int *incy, double *a, int *lda);
+
+int drot_(int *n, double *sx, int *incx, double *sy, int *incy,
+ double *c, double *s);
+
+int drotg_(double *sa, double *sb, double *c, double *s);
+
+int dsbmv_(char *uplo, int *n, int *k, double *alpha, double *a,
+ int *lda, double *x, int *incx, double *beta, double *y,
+ int *incy);
+
+int dscal_(int *n, double *sa, double *sx, int *incx);
+
+int dspmv_(char *uplo, int *n, double *alpha, double *ap, double *x,
+ int *incx, double *beta, double *y, int *incy);
+
+int dspr_(char *uplo, int *n, double *alpha, double *x, int *incx,
+ double *ap);
+
+int dspr2_(char *uplo, int *n, double *alpha, double *x, int *incx,
+ double *y, int *incy, double *ap);
+
+int dswap_(int *n, double *sx, int *incx, double *sy, int *incy);
+
+int dsymm_(char *side, char *uplo, int *m, int *n, double *alpha,
+ double *a, int *lda, double *b, int *ldb, double *beta,
+ double *c, int *ldc);
+
+int dsymv_(char *uplo, int *n, double *alpha, double *a, int *lda,
+ double *x, int *incx, double *beta, double *y, int *incy);
+
+int dsyr_(char *uplo, int *n, double *alpha, double *x, int *incx,
+ double *a, int *lda);
+
+int dsyr2_(char *uplo, int *n, double *alpha, double *x, int *incx,
+ double *y, int *incy, double *a, int *lda);
+
+int dsyr2k_(char *uplo, char *trans, int *n, int *k, double *alpha,
+ double *a, int *lda, double *b, int *ldb, double *beta,
+ double *c, int *ldc);
+
+int dsyrk_(char *uplo, char *trans, int *n, int *k, double *alpha,
+ double *a, int *lda, double *beta, double *c, int *ldc);
+
+int dtbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ double *a, int *lda, double *x, int *incx);
+
+int dtbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ double *a, int *lda, double *x, int *incx);
+
+int dtpmv_(char *uplo, char *trans, char *diag, int *n, double *ap,
+ double *x, int *incx);
+
+int dtpsv_(char *uplo, char *trans, char *diag, int *n, double *ap,
+ double *x, int *incx);
+
+int dtrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, double *alpha, double *a, int *lda, double *b,
+ int *ldb);
+
+int dtrmv_(char *uplo, char *trans, char *diag, int *n, double *a,
+ int *lda, double *x, int *incx);
+
+int dtrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, double *alpha, double *a, int *lda, double *b,
+ int *ldb);
+
+int dtrsv_(char *uplo, char *trans, char *diag, int *n, double *a,
+ int *lda, double *x, int *incx);
+
+
+int saxpy_(int *n, float *sa, float *sx, int *incx, float *sy, int *incy);
+
+int scopy_(int *n, float *sx, int *incx, float *sy, int *incy);
+
+int sgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
+ float *alpha, float *a, int *lda, float *x, int *incx,
+ float *beta, float *y, int *incy);
+
+int sgemm_(char *transa, char *transb, int *m, int *n, int *k,
+ float *alpha, float *a, int *lda, float *b, int *ldb,
+ float *beta, float *c, int *ldc);
+
+int sgemv_(char *trans, int *m, int *n, float *alpha, float *a,
+ int *lda, float *x, int *incx, float *beta, float *y,
+ int *incy);
+
+int sger_(int *m, int *n, float *alpha, float *x, int *incx,
+ float *y, int *incy, float *a, int *lda);
+
+int srot_(int *n, float *sx, int *incx, float *sy, int *incy,
+ float *c, float *s);
+
+int srotg_(float *sa, float *sb, float *c, float *s);
+
+int ssbmv_(char *uplo, int *n, int *k, float *alpha, float *a,
+ int *lda, float *x, int *incx, float *beta, float *y,
+ int *incy);
+
+int sscal_(int *n, float *sa, float *sx, int *incx);
+
+int sspmv_(char *uplo, int *n, float *alpha, float *ap, float *x,
+ int *incx, float *beta, float *y, int *incy);
+
+int sspr_(char *uplo, int *n, float *alpha, float *x, int *incx,
+ float *ap);
+
+int sspr2_(char *uplo, int *n, float *alpha, float *x, int *incx,
+ float *y, int *incy, float *ap);
+
+int sswap_(int *n, float *sx, int *incx, float *sy, int *incy);
+
+int ssymm_(char *side, char *uplo, int *m, int *n, float *alpha,
+ float *a, int *lda, float *b, int *ldb, float *beta,
+ float *c, int *ldc);
+
+int ssymv_(char *uplo, int *n, float *alpha, float *a, int *lda,
+ float *x, int *incx, float *beta, float *y, int *incy);
+
+int ssyr_(char *uplo, int *n, float *alpha, float *x, int *incx,
+ float *a, int *lda);
+
+int ssyr2_(char *uplo, int *n, float *alpha, float *x, int *incx,
+ float *y, int *incy, float *a, int *lda);
+
+int ssyr2k_(char *uplo, char *trans, int *n, int *k, float *alpha,
+ float *a, int *lda, float *b, int *ldb, float *beta,
+ float *c, int *ldc);
+
+int ssyrk_(char *uplo, char *trans, int *n, int *k, float *alpha,
+ float *a, int *lda, float *beta, float *c, int *ldc);
+
+int stbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ float *a, int *lda, float *x, int *incx);
+
+int stbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ float *a, int *lda, float *x, int *incx);
+
+int stpmv_(char *uplo, char *trans, char *diag, int *n, float *ap,
+ float *x, int *incx);
+
+int stpsv_(char *uplo, char *trans, char *diag, int *n, float *ap,
+ float *x, int *incx);
+
+int strmm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, float *alpha, float *a, int *lda, float *b,
+ int *ldb);
+
+int strmv_(char *uplo, char *trans, char *diag, int *n, float *a,
+ int *lda, float *x, int *incx);
+
+int strsm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, float *alpha, float *a, int *lda, float *b,
+ int *ldb);
+
+int strsv_(char *uplo, char *trans, char *diag, int *n, float *a,
+ int *lda, float *x, int *incx);
+
+int zaxpy_(int *n, dcomplex *ca, dcomplex *cx, int *incx, dcomplex *cy,
+ int *incy);
+
+int zcopy_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
+
+int zdscal_(int *n, double *sa, dcomplex *cx, int *incx);
+
+int zgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
+ dcomplex *alpha, dcomplex *a, int *lda, dcomplex *x, int *incx,
+ dcomplex *beta, dcomplex *y, int *incy);
+
+int zgemm_(char *transa, char *transb, int *m, int *n, int *k,
+ dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, int *ldb,
+ dcomplex *beta, dcomplex *c, int *ldc);
+
+int zgemv_(char *trans, int *m, int *n, dcomplex *alpha, dcomplex *a,
+ int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y,
+ int *incy);
+
+int zgerc_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx,
+ dcomplex *y, int *incy, dcomplex *a, int *lda);
+
+int zgeru_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx,
+ dcomplex *y, int *incy, dcomplex *a, int *lda);
+
+int zhbmv_(char *uplo, int *n, int *k, dcomplex *alpha, dcomplex *a,
+ int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y,
+ int *incy);
+
+int zhemm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha,
+ dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
+ dcomplex *c, int *ldc);
+
+int zhemv_(char *uplo, int *n, dcomplex *alpha, dcomplex *a, int *lda,
+ dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, int *incy);
+
+int zher_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx,
+ dcomplex *a, int *lda);
+
+int zher2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx,
+ dcomplex *y, int *incy, dcomplex *a, int *lda);
+
+int zher2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
+ dcomplex *a, int *lda, dcomplex *b, int *ldb, double *beta,
+ dcomplex *c, int *ldc);
+
+int zherk_(char *uplo, char *trans, int *n, int *k, double *alpha,
+ dcomplex *a, int *lda, double *beta, dcomplex *c, int *ldc);
+
+int zhpmv_(char *uplo, int *n, dcomplex *alpha, dcomplex *ap, dcomplex *x,
+ int *incx, dcomplex *beta, dcomplex *y, int *incy);
+
+int zhpr_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx,
+ dcomplex *ap);
+
+int zhpr2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx,
+ dcomplex *y, int *incy, dcomplex *ap);
+
+int zrotg_(dcomplex *ca, dcomplex *cb, double *c, dcomplex *s);
+
+int zscal_(int *n, dcomplex *ca, dcomplex *cx, int *incx);
+
+int zswap_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
+
+int zsymm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha,
+ dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
+ dcomplex *c, int *ldc);
+
+int zsyr2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
+ dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
+ dcomplex *c, int *ldc);
+
+int zsyrk_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
+ dcomplex *a, int *lda, dcomplex *beta, dcomplex *c, int *ldc);
+
+int ztbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ dcomplex *a, int *lda, dcomplex *x, int *incx);
+
+int ztbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
+ dcomplex *a, int *lda, dcomplex *x, int *incx);
+
+int ztpmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap,
+ dcomplex *x, int *incx);
+
+int ztpsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap,
+ dcomplex *x, int *incx);
+
+int ztrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b,
+ int *ldb);
+
+int ztrmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
+ int *lda, dcomplex *x, int *incx);
+
+int ztrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
+ int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b,
+ int *ldb);
+
+int ztrsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
+ int *lda, dcomplex *x, int *incx);
diff --git a/Bing/Src/LibLinear/blas/daxpy.c b/Bing/Src/LibLinear/blas/daxpy.c
new file mode 100644
index 000000000..58f345a53
--- /dev/null
+++ b/Bing/Src/LibLinear/blas/daxpy.c
@@ -0,0 +1,49 @@
+#include "blas.h"
+
+int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
+ int *incy)
+{
+ long int i, m, ix, iy, nn, iincx, iincy;
+ register double ssa;
+
+ /* constant times a vector plus a vector.
+ uses unrolled loop for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*) */
+
+ /* Dereference inputs */
+ nn = *n;
+ ssa = *sa;
+ iincx = *incx;
+ iincy = *incy;
+
+ if( nn > 0 && ssa != 0.0 )
+ {
+ if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
+ {
+ m = nn-3;
+ for (i = 0; i < m; i += 4)
+ {
+ sy[i] += ssa * sx[i];
+ sy[i+1] += ssa * sx[i+1];
+ sy[i+2] += ssa * sx[i+2];
+ sy[i+3] += ssa * sx[i+3];
+ }
+ for ( ; i < nn; ++i) /* clean-up loop */
+ sy[i] += ssa * sx[i];
+ }
+ else /* code for unequal increments or equal increments not equal to 1 */
+ {
+ ix = iincx >= 0 ? 0 : (1 - nn) * iincx;
+ iy = iincy >= 0 ? 0 : (1 - nn) * iincy;
+ for (i = 0; i < nn; i++)
+ {
+ sy[iy] += ssa * sx[ix];
+ ix += iincx;
+ iy += iincy;
+ }
+ }
+ }
+
+ return 0;
+} /* daxpy_ */
diff --git a/Bing/Src/LibLinear/blas/ddot.c b/Bing/Src/LibLinear/blas/ddot.c
new file mode 100644
index 000000000..a64a2808f
--- /dev/null
+++ b/Bing/Src/LibLinear/blas/ddot.c
@@ -0,0 +1,50 @@
+#include "blas.h"
+
+double ddot_(int *n, double *sx, int *incx, double *sy, int *incy)
+{
+ long int i, m, nn, iincx, iincy;
+ double stemp;
+ long int ix, iy;
+
+ /* forms the dot product of two vectors.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*) */
+
+ /* Dereference inputs */
+ nn = *n;
+ iincx = *incx;
+ iincy = *incy;
+
+ stemp = 0.0;
+ if (nn > 0)
+ {
+ if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
+ {
+ m = nn-4;
+ for (i = 0; i < m; i += 5)
+ stemp += sx[i] * sy[i] + sx[i+1] * sy[i+1] + sx[i+2] * sy[i+2] +
+ sx[i+3] * sy[i+3] + sx[i+4] * sy[i+4];
+
+ for ( ; i < nn; i++) /* clean-up loop */
+ stemp += sx[i] * sy[i];
+ }
+ else /* code for unequal increments or equal increments not equal to 1 */
+ {
+ ix = 0;
+ iy = 0;
+ if (iincx < 0)
+ ix = (1 - nn) * iincx;
+ if (iincy < 0)
+ iy = (1 - nn) * iincy;
+ for (i = 0; i < nn; i++)
+ {
+ stemp += sx[ix] * sy[iy];
+ ix += iincx;
+ iy += iincy;
+ }
+ }
+ }
+
+ return stemp;
+} /* ddot_ */
diff --git a/Bing/Src/LibLinear/blas/dnrm2.c b/Bing/Src/LibLinear/blas/dnrm2.c
new file mode 100644
index 000000000..e50cdf777
--- /dev/null
+++ b/Bing/Src/LibLinear/blas/dnrm2.c
@@ -0,0 +1,62 @@
+#include /* Needed for fabs() and sqrt() */
+#include "blas.h"
+
+double dnrm2_(int *n, double *x, int *incx)
+{
+ long int ix, nn, iincx;
+ double norm, scale, absxi, ssq, temp;
+
+/* DNRM2 returns the euclidean norm of a vector via the function
+ name, so that
+
+ DNRM2 := sqrt( x'*x )
+
+ -- This version written on 25-October-1982.
+ Modified on 14-October-1993 to inline the call to SLASSQ.
+ Sven Hammarling, Nag Ltd. */
+
+ /* Dereference inputs */
+ nn = *n;
+ iincx = *incx;
+
+ if( nn > 0 && iincx > 0 )
+ {
+ if (nn == 1)
+ {
+ norm = fabs(x[0]);
+ }
+ else
+ {
+ scale = 0.0;
+ ssq = 1.0;
+
+ /* The following loop is equivalent to this call to the LAPACK
+ auxiliary routine: CALL SLASSQ( N, X, INCX, SCALE, SSQ ) */
+
+ for (ix=(nn-1)*iincx; ix>=0; ix-=iincx)
+ {
+ if (x[ix] != 0.0)
+ {
+ absxi = fabs(x[ix]);
+ if (scale < absxi)
+ {
+ temp = scale / absxi;
+ ssq = ssq * (temp * temp) + 1.0;
+ scale = absxi;
+ }
+ else
+ {
+ temp = absxi / scale;
+ ssq += temp * temp;
+ }
+ }
+ }
+ norm = scale * sqrt(ssq);
+ }
+ }
+ else
+ norm = 0.0;
+
+ return norm;
+
+} /* dnrm2_ */
diff --git a/Bing/Src/LibLinear/blas/dscal.c b/Bing/Src/LibLinear/blas/dscal.c
new file mode 100644
index 000000000..a0eca0c39
--- /dev/null
+++ b/Bing/Src/LibLinear/blas/dscal.c
@@ -0,0 +1,44 @@
+#include "blas.h"
+
+int dscal_(int *n, double *sa, double *sx, int *incx)
+{
+ long int i, m, nincx, nn, iincx;
+ double ssa;
+
+ /* scales a vector by a constant.
+ uses unrolled loops for increment equal to 1.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*) */
+
+ /* Dereference inputs */
+ nn = *n;
+ iincx = *incx;
+ ssa = *sa;
+
+ if (nn > 0 && iincx > 0)
+ {
+ if (iincx == 1) /* code for increment equal to 1 */
+ {
+ m = nn-4;
+ for (i = 0; i < m; i += 5)
+ {
+ sx[i] = ssa * sx[i];
+ sx[i+1] = ssa * sx[i+1];
+ sx[i+2] = ssa * sx[i+2];
+ sx[i+3] = ssa * sx[i+3];
+ sx[i+4] = ssa * sx[i+4];
+ }
+ for ( ; i < nn; ++i) /* clean-up loop */
+ sx[i] = ssa * sx[i];
+ }
+ else /* code for increment not equal to 1 */
+ {
+ nincx = nn * iincx;
+ for (i = 0; i < nincx; i += iincx)
+ sx[i] = ssa * sx[i];
+ }
+ }
+
+ return 0;
+} /* dscal_ */
diff --git a/Bing/Src/LibLinear/linear.cpp b/Bing/Src/LibLinear/linear.cpp
new file mode 100644
index 000000000..1dc8056f1
--- /dev/null
+++ b/Bing/Src/LibLinear/linear.cpp
@@ -0,0 +1,2811 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include "linear.h"
+#include "tron.h"
+typedef signed char schar;
+template static inline void swap(T& x, T& y) { T t=x; x=y; y=t; }
+#ifndef min
+template static inline T min(T x,T y) { return (x static inline T max(T x,T y) { return (x>y)?x:y; }
+#endif
+template static inline void clone(T*& dst, S* src, int n)
+{
+ dst = new T[n];
+ memcpy((void *)dst,(void *)src,sizeof(T)*n);
+}
+#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
+#define INF HUGE_VAL
+
+static void print_string_stdout(const char *s)
+{
+ fputs(s,stdout);
+ fflush(stdout);
+}
+
+static void (*liblinear_print_string) (const char *) = &print_string_stdout;
+
+#if 1
+static void info(const char *fmt,...)
+{
+ char buf[BUFSIZ];
+ va_list ap;
+ va_start(ap,fmt);
+ vsprintf(buf,fmt,ap);
+ va_end(ap);
+ (*liblinear_print_string)(buf);
+}
+#else
+static void info(const char *fmt,...) {}
+#endif
+
+class l2r_lr_fun: public function
+{
+public:
+ l2r_lr_fun(const problem *prob, double *C);
+ ~l2r_lr_fun();
+
+ double fun(double *w);
+ void grad(double *w, double *g);
+ void Hv(double *s, double *Hs);
+
+ int get_nr_variable(void);
+
+private:
+ void Xv(double *v, double *Xv);
+ void XTv(double *v, double *XTv);
+
+ double *C;
+ double *z;
+ double *D;
+ const problem *prob;
+};
+
+l2r_lr_fun::l2r_lr_fun(const problem *prob, double *C)
+{
+ int l=prob->l;
+
+ this->prob = prob;
+
+ z = new double[l];
+ D = new double[l];
+ this->C = C;
+}
+
+l2r_lr_fun::~l2r_lr_fun()
+{
+ delete[] z;
+ delete[] D;
+}
+
+
+double l2r_lr_fun::fun(double *w)
+{
+ int i;
+ double f=0;
+ double *y=prob->y;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+
+ Xv(w, z);
+
+ for(i=0;i= 0)
+ f += C[i]*log(1 + exp(-yz));
+ else
+ f += C[i]*(-yz+log(1 + exp(yz)));
+ }
+
+ return(f);
+}
+
+void l2r_lr_fun::grad(double *w, double *g)
+{
+ int i;
+ double *y=prob->y;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+
+ for(i=0;in;
+}
+
+void l2r_lr_fun::Hv(double *s, double *Hs)
+{
+ int i;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+ double *wa = new double[l];
+
+ Xv(s, wa);
+ for(i=0;il;
+ feature_node **x=prob->x;
+
+ for(i=0;iindex!=-1)
+ {
+ Xv[i]+=v[s->index-1]*s->value;
+ s++;
+ }
+ }
+}
+
+void l2r_lr_fun::XTv(double *v, double *XTv)
+{
+ int i;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+ feature_node **x=prob->x;
+
+ for(i=0;iindex!=-1)
+ {
+ XTv[s->index-1]+=v[i]*s->value;
+ s++;
+ }
+ }
+}
+
+class l2r_l2_svc_fun: public function
+{
+public:
+ l2r_l2_svc_fun(const problem *prob, double *C);
+ ~l2r_l2_svc_fun();
+
+ double fun(double *w);
+ void grad(double *w, double *g);
+ void Hv(double *s, double *Hs);
+
+ int get_nr_variable(void);
+
+protected:
+ void Xv(double *v, double *Xv);
+ void subXv(double *v, double *Xv);
+ void subXTv(double *v, double *XTv);
+
+ double *C;
+ double *z;
+ double *D;
+ int *I;
+ int sizeI;
+ const problem *prob;
+};
+
+l2r_l2_svc_fun::l2r_l2_svc_fun(const problem *prob, double *C)
+{
+ int l=prob->l;
+
+ this->prob = prob;
+
+ z = new double[l];
+ D = new double[l];
+ I = new int[l];
+ this->C = C;
+}
+
+l2r_l2_svc_fun::~l2r_l2_svc_fun()
+{
+ delete[] z;
+ delete[] D;
+ delete[] I;
+}
+
+double l2r_l2_svc_fun::fun(double *w)
+{
+ int i;
+ double f=0;
+ double *y=prob->y;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+
+ Xv(w, z);
+
+ for(i=0;i 0)
+ f += C[i]*d*d;
+ }
+
+ return(f);
+}
+
+void l2r_l2_svc_fun::grad(double *w, double *g)
+{
+ int i;
+ double *y=prob->y;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+
+ sizeI = 0;
+ for (i=0;in;
+}
+
+void l2r_l2_svc_fun::Hv(double *s, double *Hs)
+{
+ int i;
+ int w_size=get_nr_variable();
+ double *wa = new double[sizeI];
+
+ subXv(s, wa);
+ for(i=0;il;
+ feature_node **x=prob->x;
+
+ for(i=0;iindex!=-1)
+ {
+ Xv[i]+=v[s->index-1]*s->value;
+ s++;
+ }
+ }
+}
+
+void l2r_l2_svc_fun::subXv(double *v, double *Xv)
+{
+ int i;
+ feature_node **x=prob->x;
+
+ for(i=0;iindex!=-1)
+ {
+ Xv[i]+=v[s->index-1]*s->value;
+ s++;
+ }
+ }
+}
+
+void l2r_l2_svc_fun::subXTv(double *v, double *XTv)
+{
+ int i;
+ int w_size=get_nr_variable();
+ feature_node **x=prob->x;
+
+ for(i=0;iindex!=-1)
+ {
+ XTv[s->index-1]+=v[i]*s->value;
+ s++;
+ }
+ }
+}
+
+class l2r_l2_svr_fun: public l2r_l2_svc_fun
+{
+public:
+ l2r_l2_svr_fun(const problem *prob, double *C, double p);
+
+ double fun(double *w);
+ void grad(double *w, double *g);
+
+private:
+ double p;
+};
+
+l2r_l2_svr_fun::l2r_l2_svr_fun(const problem *prob, double *C, double p):
+ l2r_l2_svc_fun(prob, C)
+{
+ this->p = p;
+}
+
+double l2r_l2_svr_fun::fun(double *w)
+{
+ int i;
+ double f=0;
+ double *y=prob->y;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+ double d;
+
+ Xv(w, z);
+
+ for(i=0;i p)
+ f += C[i]*(d-p)*(d-p);
+ }
+
+ return(f);
+}
+
+void l2r_l2_svr_fun::grad(double *w, double *g)
+{
+ int i;
+ double *y=prob->y;
+ int l=prob->l;
+ int w_size=get_nr_variable();
+ double d;
+
+ sizeI = 0;
+ for(i=0;i p)
+ {
+ z[sizeI] = C[i]*(d-p);
+ I[sizeI] = i;
+ sizeI++;
+ }
+
+ }
+ subXTv(z, g);
+
+ for(i=0;iy[i])
+// To support weights for instances, use GETI(i) (i)
+
+class Solver_MCSVM_CS
+{
+ public:
+ Solver_MCSVM_CS(const problem *prob, int nr_class, double *C, double eps=0.1, int max_iter=100000);
+ ~Solver_MCSVM_CS();
+ void Solve(double *w);
+ private:
+ void solve_sub_problem(double A_i, int yi, double C_yi, int active_i, double *alpha_new);
+ bool be_shrunk(int i, int m, int yi, double alpha_i, double minG);
+ double *B, *C, *G;
+ int w_size, l;
+ int nr_class;
+ int max_iter;
+ double eps;
+ const problem *prob;
+};
+
+Solver_MCSVM_CS::Solver_MCSVM_CS(const problem *prob, int nr_class, double *weighted_C, double eps, int max_iter)
+{
+ this->w_size = prob->n;
+ this->l = prob->l;
+ this->nr_class = nr_class;
+ this->eps = eps;
+ this->max_iter = max_iter;
+ this->prob = prob;
+ this->B = new double[nr_class];
+ this->G = new double[nr_class];
+ this->C = weighted_C;
+}
+
+Solver_MCSVM_CS::~Solver_MCSVM_CS()
+{
+ delete[] B;
+ delete[] G;
+}
+
+int compare_double(const void *a, const void *b)
+{
+ if(*(double *)a > *(double *)b)
+ return -1;
+ if(*(double *)a < *(double *)b)
+ return 1;
+ return 0;
+}
+
+void Solver_MCSVM_CS::solve_sub_problem(double A_i, int yi, double C_yi, int active_i, double *alpha_new)
+{
+ int r;
+ double *D;
+
+ clone(D, B, active_i);
+ if(yi < active_i)
+ D[yi] += A_i*C_yi;
+ qsort(D, active_i, sizeof(double), compare_double);
+
+ double beta = D[0] - A_i*C_yi;
+ for(r=1;ry[i] == m
+ // alpha[i*nr_class+m] <= 0 if prob->y[i] != m
+ // If initial alpha isn't zero, uncomment the for loop below to initialize w
+ for(i=0;ix[i];
+ QD[i] = 0;
+ while(xi->index != -1)
+ {
+ double val = xi->value;
+ QD[i] += val*val;
+
+ // Uncomment the for loop if initial alpha isn't zero
+ // for(m=0; mindex-1)*nr_class+m] += alpha[i*nr_class+m]*val;
+ xi++;
+ }
+ active_size_i[i] = nr_class;
+ y_index[i] = (int)prob->y[i];
+ index[i] = i;
+ }
+
+ while(iter < max_iter)
+ {
+ double stopping = -INF;
+ for(i=0;i 0)
+ {
+ for(m=0;mx[i];
+ while(xi->index!= -1)
+ {
+ double *w_i = &w[(xi->index-1)*nr_class];
+ for(m=0;mvalue);
+ xi++;
+ }
+
+ double minG = INF;
+ double maxG = -INF;
+ for(m=0;m maxG)
+ maxG = G[m];
+ }
+ if(y_index[i] < active_size_i[i])
+ if(alpha_i[(int) prob->y[i]] < C[GETI(i)] && G[y_index[i]] < minG)
+ minG = G[y_index[i]];
+
+ for(m=0;mm)
+ {
+ if(!be_shrunk(i, active_size_i[i], y_index[i],
+ alpha_i[alpha_index_i[active_size_i[i]]], minG))
+ {
+ swap(alpha_index_i[m], alpha_index_i[active_size_i[i]]);
+ swap(G[m], G[active_size_i[i]]);
+ if(y_index[i] == active_size_i[i])
+ y_index[i] = m;
+ else if(y_index[i] == m)
+ y_index[i] = active_size_i[i];
+ break;
+ }
+ active_size_i[i]--;
+ }
+ }
+ }
+
+ if(active_size_i[i] <= 1)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+
+ if(maxG-minG <= 1e-12)
+ continue;
+ else
+ stopping = max(maxG - minG, stopping);
+
+ for(m=0;m= 1e-12)
+ {
+ d_ind[nz_d] = alpha_index_i[m];
+ d_val[nz_d] = d;
+ nz_d++;
+ }
+ }
+
+ xi = prob->x[i];
+ while(xi->index != -1)
+ {
+ double *w_i = &w[(xi->index-1)*nr_class];
+ for(m=0;mvalue;
+ xi++;
+ }
+ }
+ }
+
+ iter++;
+ if(iter % 10 == 0)
+ {
+ info(".");
+ }
+
+ if(stopping < eps_shrink)
+ {
+ if(stopping < eps && start_from_all == true)
+ break;
+ else
+ {
+ active_size = l;
+ for(i=0;i= max_iter)
+ info("\nWARNING: reaching max number of iterations\n");
+
+ // calculate objective value
+ double v = 0;
+ int nSV = 0;
+ for(i=0;i 0)
+ nSV++;
+ }
+ for(i=0;iy[i]];
+ info("Objective value = %lf\n",v);
+ info("nSV = %d\n",nSV);
+
+ delete [] alpha;
+ delete [] alpha_new;
+ delete [] index;
+ delete [] QD;
+ delete [] d_ind;
+ delete [] d_val;
+ delete [] alpha_index;
+ delete [] y_index;
+ delete [] active_size_i;
+}
+
+// A coordinate descent algorithm for
+// L1-loss and L2-loss SVM dual problems
+//
+// min_\alpha 0.5(\alpha^T (Q + D)\alpha) - e^T \alpha,
+// s.t. 0 <= \alpha_i <= upper_bound_i,
+//
+// where Qij = yi yj xi^T xj and
+// D is a diagonal matrix
+//
+// In L1-SVM case:
+// upper_bound_i = Cp if y_i = 1
+// upper_bound_i = Cn if y_i = -1
+// D_ii = 0
+// In L2-SVM case:
+// upper_bound_i = INF
+// D_ii = 1/(2*Cp) if y_i = 1
+// D_ii = 1/(2*Cn) if y_i = -1
+//
+// Given:
+// x, y, Cp, Cn
+// eps is the stopping tolerance
+//
+// solution will be put in w
+//
+// See Algorithm 3 of Hsieh et al., ICML 2008
+
+#undef GETI
+#define GETI(i) (y[i]+1)
+// To support weights for instances, use GETI(i) (i)
+
+static void solve_l2r_l1l2_svc(
+ const problem *prob, double *w, double eps,
+ double Cp, double Cn, int solver_type)
+{
+ int l = prob->l;
+ int w_size = prob->n;
+ int i, s, iter = 0;
+ double C, d, G;
+ double *QD = new double[l];
+ int max_iter = 1000;
+ int *index = new int[l];
+ double *alpha = new double[l];
+ schar *y = new schar[l];
+ int active_size = l;
+
+ // PG: projected gradient, for shrinking and stopping
+ double PG;
+ double PGmax_old = INF;
+ double PGmin_old = -INF;
+ double PGmax_new, PGmin_new;
+
+ // default solver_type: L2R_L2LOSS_SVC_DUAL
+ double diag[3] = {0.5/Cn, 0, 0.5/Cp};
+ double upper_bound[3] = {INF, 0, INF};
+ if(solver_type == L2R_L1LOSS_SVC_DUAL)
+ {
+ diag[0] = 0;
+ diag[2] = 0;
+ upper_bound[0] = Cn;
+ upper_bound[2] = Cp;
+ }
+
+ for(i=0; iy[i] > 0)
+ {
+ y[i] = +1;
+ }
+ else
+ {
+ y[i] = -1;
+ }
+ }
+
+ // Initial alpha can be set here. Note that
+ // 0 <= alpha[i] <= upper_bound[GETI(i)]
+ for(i=0; ix[i];
+ while (xi->index != -1)
+ {
+ double val = xi->value;
+ QD[i] += val*val;
+ w[xi->index-1] += y[i]*alpha[i]*val;
+ xi++;
+ }
+ index[i] = i;
+ }
+
+ while (iter < max_iter)
+ {
+ PGmax_new = -INF;
+ PGmin_new = INF;
+
+ for (i=0; ix[i];
+ while(xi->index!= -1)
+ {
+ G += w[xi->index-1]*(xi->value);
+ xi++;
+ }
+ G = G*yi-1;
+
+ C = upper_bound[GETI(i)];
+ G += alpha[i]*diag[GETI(i)];
+
+ PG = 0;
+ if (alpha[i] == 0)
+ {
+ if (G > PGmax_old)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+ else if (G < 0)
+ PG = G;
+ }
+ else if (alpha[i] == C)
+ {
+ if (G < PGmin_old)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+ else if (G > 0)
+ PG = G;
+ }
+ else
+ PG = G;
+
+ PGmax_new = max(PGmax_new, PG);
+ PGmin_new = min(PGmin_new, PG);
+
+ if(fabs(PG) > 1.0e-12)
+ {
+ double alpha_old = alpha[i];
+ alpha[i] = min(max(alpha[i] - G/QD[i], 0.0), C);
+ d = (alpha[i] - alpha_old)*yi;
+ xi = prob->x[i];
+ while (xi->index != -1)
+ {
+ w[xi->index-1] += d*xi->value;
+ xi++;
+ }
+ }
+ }
+
+ iter++;
+ if(iter % 10 == 0)
+ info(".");
+
+ if(PGmax_new - PGmin_new <= eps)
+ {
+ if(active_size == l)
+ break;
+ else
+ {
+ active_size = l;
+ info("*");
+ PGmax_old = INF;
+ PGmin_old = -INF;
+ continue;
+ }
+ }
+ PGmax_old = PGmax_new;
+ PGmin_old = PGmin_new;
+ if (PGmax_old <= 0)
+ PGmax_old = INF;
+ if (PGmin_old >= 0)
+ PGmin_old = -INF;
+ }
+
+ info("\noptimization finished, #iter = %d\n",iter);
+ if (iter >= max_iter)
+ info("\nWARNING: reaching max number of iterations\nUsing -s 2 may be faster (also see FAQ)\n\n");
+
+ // calculate objective value
+
+ double v = 0;
+ int nSV = 0;
+ for(i=0; i 0)
+ ++nSV;
+ }
+ info("Objective value = %lf\n",v/2);
+ info("nSV = %d\n",nSV);
+
+ delete [] QD;
+ delete [] alpha;
+ delete [] y;
+ delete [] index;
+}
+
+
+// A coordinate descent algorithm for
+// L1-loss and L2-loss epsilon-SVR dual problem
+//
+// min_\beta 0.5\beta^T (Q + diag(lambda)) \beta - p \sum_{i=1}^l|\beta_i| + \sum_{i=1}^l yi\beta_i,
+// s.t. -upper_bound_i <= \beta_i <= upper_bound_i,
+//
+// where Qij = xi^T xj and
+// D is a diagonal matrix
+//
+// In L1-SVM case:
+// upper_bound_i = C
+// lambda_i = 0
+// In L2-SVM case:
+// upper_bound_i = INF
+// lambda_i = 1/(2*C)
+//
+// Given:
+// x, y, p, C
+// eps is the stopping tolerance
+//
+// solution will be put in w
+//
+// See Algorithm 4 of Ho and Lin, 2012
+
+#undef GETI
+#define GETI(i) (0)
+// To support weights for instances, use GETI(i) (i)
+
+static void solve_l2r_l1l2_svr(
+ const problem *prob, double *w, const parameter *param,
+ int solver_type)
+{
+ int l = prob->l;
+ double C = param->C;
+ double p = param->p;
+ int w_size = prob->n;
+ double eps = param->eps;
+ int i, s, iter = 0;
+ int max_iter = 1000;
+ int active_size = l;
+ int *index = new int[l];
+
+ double d, G, H;
+ double Gmax_old = INF;
+ double Gmax_new, Gnorm1_new;
+ double Gnorm1_init;
+ double *beta = new double[l];
+ double *QD = new double[l];
+ double *y = prob->y;
+
+ // L2R_L2LOSS_SVR_DUAL
+ double lambda[1], upper_bound[1];
+ lambda[0] = 0.5/C;
+ upper_bound[0] = INF;
+
+ if(solver_type == L2R_L1LOSS_SVR_DUAL)
+ {
+ lambda[0] = 0;
+ upper_bound[0] = C;
+ }
+
+ // Initial beta can be set here. Note that
+ // -upper_bound <= beta[i] <= upper_bound
+ for(i=0; ix[i];
+ while(xi->index != -1)
+ {
+ double val = xi->value;
+ QD[i] += val*val;
+ w[xi->index-1] += beta[i]*val;
+ xi++;
+ }
+
+ index[i] = i;
+ }
+
+
+ while(iter < max_iter)
+ {
+ Gmax_new = 0;
+ Gnorm1_new = 0;
+
+ for(i=0; ix[i];
+ while(xi->index != -1)
+ {
+ int ind = xi->index-1;
+ double val = xi->value;
+ G += val*w[ind];
+ xi++;
+ }
+
+ double Gp = G+p;
+ double Gn = G-p;
+ double violation = 0;
+ if(beta[i] == 0)
+ {
+ if(Gp < 0)
+ violation = -Gp;
+ else if(Gn > 0)
+ violation = Gn;
+ else if(Gp>Gmax_old && Gn<-Gmax_old)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+ }
+ else if(beta[i] >= upper_bound[GETI(i)])
+ {
+ if(Gp > 0)
+ violation = Gp;
+ else if(Gp < -Gmax_old)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+ }
+ else if(beta[i] <= -upper_bound[GETI(i)])
+ {
+ if(Gn < 0)
+ violation = -Gn;
+ else if(Gn > Gmax_old)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+ }
+ else if(beta[i] > 0)
+ violation = fabs(Gp);
+ else
+ violation = fabs(Gn);
+
+ Gmax_new = max(Gmax_new, violation);
+ Gnorm1_new += violation;
+
+ // obtain Newton direction d
+ if(Gp < H*beta[i])
+ d = -Gp/H;
+ else if(Gn > H*beta[i])
+ d = -Gn/H;
+ else
+ d = -beta[i];
+
+ if(fabs(d) < 1.0e-12)
+ continue;
+
+ double beta_old = beta[i];
+ beta[i] = min(max(beta[i]+d, -upper_bound[GETI(i)]), upper_bound[GETI(i)]);
+ d = beta[i]-beta_old;
+
+ if(d != 0)
+ {
+ xi = prob->x[i];
+ while(xi->index != -1)
+ {
+ w[xi->index-1] += d*xi->value;
+ xi++;
+ }
+ }
+ }
+
+ if(iter == 0)
+ Gnorm1_init = Gnorm1_new;
+ iter++;
+ if(iter % 10 == 0)
+ info(".");
+
+ if(Gnorm1_new <= eps*Gnorm1_init)
+ {
+ if(active_size == l)
+ break;
+ else
+ {
+ active_size = l;
+ info("*");
+ Gmax_old = INF;
+ continue;
+ }
+ }
+
+ Gmax_old = Gmax_new;
+ }
+
+ info("\noptimization finished, #iter = %d\n", iter);
+ if(iter >= max_iter)
+ info("\nWARNING: reaching max number of iterations\nUsing -s 11 may be faster\n\n");
+
+ // calculate objective value
+ double v = 0;
+ int nSV = 0;
+ for(i=0; il;
+ int w_size = prob->n;
+ int i, s, iter = 0;
+ double *xTx = new double[l];
+ int max_iter = 1000;
+ int *index = new int[l];
+ double *alpha = new double[2*l]; // store alpha and C - alpha
+ schar *y = new schar[l];
+ int max_inner_iter = 100; // for inner Newton
+ double innereps = 1e-2;
+ double innereps_min = min(1e-8, eps);
+ double upper_bound[3] = {Cn, 0, Cp};
+
+ for(i=0; iy[i] > 0)
+ {
+ y[i] = +1;
+ }
+ else
+ {
+ y[i] = -1;
+ }
+ }
+
+ // Initial alpha can be set here. Note that
+ // 0 < alpha[i] < upper_bound[GETI(i)]
+ // alpha[2*i] + alpha[2*i+1] = upper_bound[GETI(i)]
+ for(i=0; ix[i];
+ while (xi->index != -1)
+ {
+ double val = xi->value;
+ xTx[i] += val*val;
+ w[xi->index-1] += y[i]*alpha[2*i]*val;
+ xi++;
+ }
+ index[i] = i;
+ }
+
+ while (iter < max_iter)
+ {
+ for (i=0; ix[i];
+ while (xi->index != -1)
+ {
+ ywTx += w[xi->index-1]*xi->value;
+ xi++;
+ }
+ ywTx *= y[i];
+ double a = xisq, b = ywTx;
+
+ // Decide to minimize g_1(z) or g_2(z)
+ int ind1 = 2*i, ind2 = 2*i+1, sign = 1;
+ if(0.5*a*(alpha[ind2]-alpha[ind1])+b < 0)
+ {
+ ind1 = 2*i+1;
+ ind2 = 2*i;
+ sign = -1;
+ }
+
+ // g_t(z) = z*log(z) + (C-z)*log(C-z) + 0.5a(z-alpha_old)^2 + sign*b(z-alpha_old)
+ double alpha_old = alpha[ind1];
+ double z = alpha_old;
+ if(C - z < 0.5 * C)
+ z = 0.1*z;
+ double gp = a*(z-alpha_old)+sign*b+log(z/(C-z));
+ Gmax = max(Gmax, fabs(gp));
+
+ // Newton method on the sub-problem
+ const double eta = 0.1; // xi in the paper
+ int inner_iter = 0;
+ while (inner_iter <= max_inner_iter)
+ {
+ if(fabs(gp) < innereps)
+ break;
+ double gpp = a + C/(C-z)/z;
+ double tmpz = z - gp/gpp;
+ if(tmpz <= 0)
+ z *= eta;
+ else // tmpz in (0, C)
+ z = tmpz;
+ gp = a*(z-alpha_old)+sign*b+log(z/(C-z));
+ newton_iter++;
+ inner_iter++;
+ }
+
+ if(inner_iter > 0) // update w
+ {
+ alpha[ind1] = z;
+ alpha[ind2] = C-z;
+ xi = prob->x[i];
+ while (xi->index != -1)
+ {
+ w[xi->index-1] += sign*(z-alpha_old)*yi*xi->value;
+ xi++;
+ }
+ }
+ }
+
+ iter++;
+ if(iter % 10 == 0)
+ info(".");
+
+ if(Gmax < eps)
+ break;
+
+ if(newton_iter <= l/10)
+ innereps = max(innereps_min, 0.1*innereps);
+
+ }
+
+ info("\noptimization finished, #iter = %d\n",iter);
+ if (iter >= max_iter)
+ info("\nWARNING: reaching max number of iterations\nUsing -s 0 may be faster (also see FAQ)\n\n");
+
+ // calculate objective value
+
+ double v = 0;
+ for(i=0; il;
+ int w_size = prob_col->n;
+ int j, s, iter = 0;
+ int max_iter = 1000;
+ int active_size = w_size;
+ int max_num_linesearch = 20;
+
+ double sigma = 0.01;
+ double d, G_loss, G, H;
+ double Gmax_old = INF;
+ double Gmax_new, Gnorm1_new;
+ double Gnorm1_init;
+ double d_old, d_diff;
+ double loss_old, loss_new;
+ double appxcond, cond;
+
+ int *index = new int[w_size];
+ schar *y = new schar[l];
+ double *b = new double[l]; // b = 1-ywTx
+ double *xj_sq = new double[w_size];
+ feature_node *x;
+
+ double C[3] = {Cn,0,Cp};
+
+ // Initial w can be set here.
+ for(j=0; jy[j] > 0)
+ y[j] = 1;
+ else
+ y[j] = -1;
+ }
+ for(j=0; jx[j];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ x->value *= y[ind]; // x->value stores yi*xij
+ double val = x->value;
+ b[ind] -= w[j]*val;
+ xj_sq[j] += C[GETI(ind)]*val*val;
+ x++;
+ }
+ }
+
+ while(iter < max_iter)
+ {
+ Gmax_new = 0;
+ Gnorm1_new = 0;
+
+ for(j=0; jx[j];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ if(b[ind] > 0)
+ {
+ double val = x->value;
+ double tmp = C[GETI(ind)]*val;
+ G_loss -= tmp*b[ind];
+ H += tmp*val;
+ }
+ x++;
+ }
+ G_loss *= 2;
+
+ G = G_loss;
+ H *= 2;
+ H = max(H, 1e-12);
+
+ double Gp = G+1;
+ double Gn = G-1;
+ double violation = 0;
+ if(w[j] == 0)
+ {
+ if(Gp < 0)
+ violation = -Gp;
+ else if(Gn > 0)
+ violation = Gn;
+ else if(Gp>Gmax_old/l && Gn<-Gmax_old/l)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+ }
+ else if(w[j] > 0)
+ violation = fabs(Gp);
+ else
+ violation = fabs(Gn);
+
+ Gmax_new = max(Gmax_new, violation);
+ Gnorm1_new += violation;
+
+ // obtain Newton direction d
+ if(Gp < H*w[j])
+ d = -Gp/H;
+ else if(Gn > H*w[j])
+ d = -Gn/H;
+ else
+ d = -w[j];
+
+ if(fabs(d) < 1.0e-12)
+ continue;
+
+ double delta = fabs(w[j]+d)-fabs(w[j]) + G*d;
+ d_old = 0;
+ int num_linesearch;
+ for(num_linesearch=0; num_linesearch < max_num_linesearch; num_linesearch++)
+ {
+ d_diff = d_old - d;
+ cond = fabs(w[j]+d)-fabs(w[j]) - sigma*delta;
+
+ appxcond = xj_sq[j]*d*d + G_loss*d + cond;
+ if(appxcond <= 0)
+ {
+ x = prob_col->x[j];
+ while(x->index != -1)
+ {
+ b[x->index-1] += d_diff*x->value;
+ x++;
+ }
+ break;
+ }
+
+ if(num_linesearch == 0)
+ {
+ loss_old = 0;
+ loss_new = 0;
+ x = prob_col->x[j];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ if(b[ind] > 0)
+ loss_old += C[GETI(ind)]*b[ind]*b[ind];
+ double b_new = b[ind] + d_diff*x->value;
+ b[ind] = b_new;
+ if(b_new > 0)
+ loss_new += C[GETI(ind)]*b_new*b_new;
+ x++;
+ }
+ }
+ else
+ {
+ loss_new = 0;
+ x = prob_col->x[j];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ double b_new = b[ind] + d_diff*x->value;
+ b[ind] = b_new;
+ if(b_new > 0)
+ loss_new += C[GETI(ind)]*b_new*b_new;
+ x++;
+ }
+ }
+
+ cond = cond + loss_new - loss_old;
+ if(cond <= 0)
+ break;
+ else
+ {
+ d_old = d;
+ d *= 0.5;
+ delta *= 0.5;
+ }
+ }
+
+ w[j] += d;
+
+ // recompute b[] if line search takes too many steps
+ if(num_linesearch >= max_num_linesearch)
+ {
+ info("#");
+ for(int i=0; ix[i];
+ while(x->index != -1)
+ {
+ b[x->index-1] -= w[i]*x->value;
+ x++;
+ }
+ }
+ }
+ }
+
+ if(iter == 0)
+ Gnorm1_init = Gnorm1_new;
+ iter++;
+ if(iter % 10 == 0)
+ info(".");
+
+ if(Gnorm1_new <= eps*Gnorm1_init)
+ {
+ if(active_size == w_size)
+ break;
+ else
+ {
+ active_size = w_size;
+ info("*");
+ Gmax_old = INF;
+ continue;
+ }
+ }
+
+ Gmax_old = Gmax_new;
+ }
+
+ info("\noptimization finished, #iter = %d\n", iter);
+ if(iter >= max_iter)
+ info("\nWARNING: reaching max number of iterations\n");
+
+ // calculate objective value
+
+ double v = 0;
+ int nnz = 0;
+ for(j=0; jx[j];
+ while(x->index != -1)
+ {
+ x->value *= prob_col->y[x->index-1]; // restore x->value
+ x++;
+ }
+ if(w[j] != 0)
+ {
+ v += fabs(w[j]);
+ nnz++;
+ }
+ }
+ for(j=0; j 0)
+ v += C[GETI(j)]*b[j]*b[j];
+
+ info("Objective value = %lf\n", v);
+ info("#nonzeros/#features = %d/%d\n", nnz, w_size);
+
+ delete [] index;
+ delete [] y;
+ delete [] b;
+ delete [] xj_sq;
+}
+
+// A coordinate descent algorithm for
+// L1-regularized logistic regression problems
+//
+// min_w \sum |wj| + C \sum log(1+exp(-yi w^T xi)),
+//
+// Given:
+// x, y, Cp, Cn
+// eps is the stopping tolerance
+//
+// solution will be put in w
+//
+// See Yuan et al. (2011) and appendix of LIBLINEAR paper, Fan et al. (2008)
+
+#undef GETI
+#define GETI(i) (y[i]+1)
+// To support weights for instances, use GETI(i) (i)
+
+static void solve_l1r_lr(
+ const problem *prob_col, double *w, double eps,
+ double Cp, double Cn)
+{
+ int l = prob_col->l;
+ int w_size = prob_col->n;
+ int j, s, newton_iter=0, iter=0;
+ int max_newton_iter = 100;
+ int max_iter = 1000;
+ int max_num_linesearch = 20;
+ int active_size;
+ int QP_active_size;
+
+ double nu = 1e-12;
+ double inner_eps = 1;
+ double sigma = 0.01;
+ double w_norm, w_norm_new;
+ double z, G, H;
+ double Gnorm1_init;
+ double Gmax_old = INF;
+ double Gmax_new, Gnorm1_new;
+ double QP_Gmax_old = INF;
+ double QP_Gmax_new, QP_Gnorm1_new;
+ double delta, negsum_xTd, cond;
+
+ int *index = new int[w_size];
+ schar *y = new schar[l];
+ double *Hdiag = new double[w_size];
+ double *Grad = new double[w_size];
+ double *wpd = new double[w_size];
+ double *xjneg_sum = new double[w_size];
+ double *xTd = new double[l];
+ double *exp_wTx = new double[l];
+ double *exp_wTx_new = new double[l];
+ double *tau = new double[l];
+ double *D = new double[l];
+ feature_node *x;
+
+ double C[3] = {Cn,0,Cp};
+
+ // Initial w can be set here.
+ for(j=0; jy[j] > 0)
+ y[j] = 1;
+ else
+ y[j] = -1;
+
+ exp_wTx[j] = 0;
+ }
+
+ w_norm = 0;
+ for(j=0; jx[j];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ double val = x->value;
+ exp_wTx[ind] += w[j]*val;
+ if(y[ind] == -1)
+ xjneg_sum[j] += C[GETI(ind)]*val;
+ x++;
+ }
+ }
+ for(j=0; jx[j];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ Hdiag[j] += x->value*x->value*D[ind];
+ tmp += x->value*tau[ind];
+ x++;
+ }
+ Grad[j] = -tmp + xjneg_sum[j];
+
+ double Gp = Grad[j]+1;
+ double Gn = Grad[j]-1;
+ double violation = 0;
+ if(w[j] == 0)
+ {
+ if(Gp < 0)
+ violation = -Gp;
+ else if(Gn > 0)
+ violation = Gn;
+ //outer-level shrinking
+ else if(Gp>Gmax_old/l && Gn<-Gmax_old/l)
+ {
+ active_size--;
+ swap(index[s], index[active_size]);
+ s--;
+ continue;
+ }
+ }
+ else if(w[j] > 0)
+ violation = fabs(Gp);
+ else
+ violation = fabs(Gn);
+
+ Gmax_new = max(Gmax_new, violation);
+ Gnorm1_new += violation;
+ }
+
+ if(newton_iter == 0)
+ Gnorm1_init = Gnorm1_new;
+
+ if(Gnorm1_new <= eps*Gnorm1_init)
+ break;
+
+ iter = 0;
+ QP_Gmax_old = INF;
+ QP_active_size = active_size;
+
+ for(int i=0; ix[j];
+ G = Grad[j] + (wpd[j]-w[j])*nu;
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ G += x->value*D[ind]*xTd[ind];
+ x++;
+ }
+
+ double Gp = G+1;
+ double Gn = G-1;
+ double violation = 0;
+ if(wpd[j] == 0)
+ {
+ if(Gp < 0)
+ violation = -Gp;
+ else if(Gn > 0)
+ violation = Gn;
+ //inner-level shrinking
+ else if(Gp>QP_Gmax_old/l && Gn<-QP_Gmax_old/l)
+ {
+ QP_active_size--;
+ swap(index[s], index[QP_active_size]);
+ s--;
+ continue;
+ }
+ }
+ else if(wpd[j] > 0)
+ violation = fabs(Gp);
+ else
+ violation = fabs(Gn);
+
+ QP_Gmax_new = max(QP_Gmax_new, violation);
+ QP_Gnorm1_new += violation;
+
+ // obtain solution of one-variable problem
+ if(Gp < H*wpd[j])
+ z = -Gp/H;
+ else if(Gn > H*wpd[j])
+ z = -Gn/H;
+ else
+ z = -wpd[j];
+
+ if(fabs(z) < 1.0e-12)
+ continue;
+ z = min(max(z,-10.0),10.0);
+
+ wpd[j] += z;
+
+ x = prob_col->x[j];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ xTd[ind] += x->value*z;
+ x++;
+ }
+ }
+
+ iter++;
+
+ if(QP_Gnorm1_new <= inner_eps*Gnorm1_init)
+ {
+ //inner stopping
+ if(QP_active_size == active_size)
+ break;
+ //active set reactivation
+ else
+ {
+ QP_active_size = active_size;
+ QP_Gmax_old = INF;
+ continue;
+ }
+ }
+
+ QP_Gmax_old = QP_Gmax_new;
+ }
+
+ if(iter >= max_iter)
+ info("WARNING: reaching max number of inner iterations\n");
+
+ delta = 0;
+ w_norm_new = 0;
+ for(j=0; j= max_num_linesearch)
+ {
+ for(int i=0; ix[i];
+ while(x->index != -1)
+ {
+ exp_wTx[x->index-1] += w[i]*x->value;
+ x++;
+ }
+ }
+
+ for(int i=0; i= max_newton_iter)
+ info("WARNING: reaching max number of iterations\n");
+
+ // calculate objective value
+
+ double v = 0;
+ int nnz = 0;
+ for(j=0; jl;
+ int n = prob->n;
+ int nnz = 0;
+ int *col_ptr = new int[n+1];
+ feature_node *x_space;
+ prob_col->l = l;
+ prob_col->n = n;
+ prob_col->y = new double[l];
+ prob_col->x = new feature_node*[n];
+
+ for(i=0; iy[i] = prob->y[i];
+
+ for(i=0; ix[i];
+ while(x->index != -1)
+ {
+ nnz++;
+ col_ptr[x->index]++;
+ x++;
+ }
+ }
+ for(i=1; ix[i] = &x_space[col_ptr[i]];
+
+ for(i=0; ix[i];
+ while(x->index != -1)
+ {
+ int ind = x->index-1;
+ x_space[col_ptr[ind]].index = i+1; // starts from 1
+ x_space[col_ptr[ind]].value = x->value;
+ col_ptr[ind]++;
+ x++;
+ }
+ }
+ for(i=0; il;
+ int max_nr_class = 16;
+ int nr_class = 0;
+ int *label = Malloc(int,max_nr_class);
+ int *count = Malloc(int,max_nr_class);
+ int *data_label = Malloc(int,l);
+ int i;
+
+ for(i=0;iy[i];
+ int j;
+ for(j=0;jeps;
+ int pos = 0;
+ int neg = 0;
+ for(int i=0;il;i++)
+ if(prob->y[i] > 0)
+ pos++;
+ neg = prob->l - pos;
+
+ double primal_solver_tol = eps*max(min(pos,neg), 1)/prob->l;
+
+ function *fun_obj=NULL;
+ switch(param->solver_type)
+ {
+ case L2R_LR:
+ {
+ double *C = new double[prob->l];
+ for(int i = 0; i < prob->l; i++)
+ {
+ if(prob->y[i] > 0)
+ C[i] = Cp;
+ else
+ C[i] = Cn;
+ }
+ fun_obj=new l2r_lr_fun(prob, C);
+ TRON tron_obj(fun_obj, primal_solver_tol);
+ tron_obj.set_print_string(liblinear_print_string);
+ tron_obj.tron(w);
+ delete fun_obj;
+ delete C;
+ break;
+ }
+ case L2R_L2LOSS_SVC:
+ {
+ double *C = new double[prob->l];
+ for(int i = 0; i < prob->l; i++)
+ {
+ if(prob->y[i] > 0)
+ C[i] = Cp;
+ else
+ C[i] = Cn;
+ }
+ fun_obj=new l2r_l2_svc_fun(prob, C);
+ TRON tron_obj(fun_obj, primal_solver_tol);
+ tron_obj.set_print_string(liblinear_print_string);
+ tron_obj.tron(w);
+ delete fun_obj;
+ delete C;
+ break;
+ }
+ case L2R_L2LOSS_SVC_DUAL:
+ solve_l2r_l1l2_svc(prob, w, eps, Cp, Cn, L2R_L2LOSS_SVC_DUAL);
+ break;
+ case L2R_L1LOSS_SVC_DUAL:
+ solve_l2r_l1l2_svc(prob, w, eps, Cp, Cn, L2R_L1LOSS_SVC_DUAL);
+ break;
+ case L1R_L2LOSS_SVC:
+ {
+ problem prob_col;
+ feature_node *x_space = NULL;
+ transpose(prob, &x_space ,&prob_col);
+ solve_l1r_l2_svc(&prob_col, w, primal_solver_tol, Cp, Cn);
+ delete [] prob_col.y;
+ delete [] prob_col.x;
+ delete [] x_space;
+ break;
+ }
+ case L1R_LR:
+ {
+ problem prob_col;
+ feature_node *x_space = NULL;
+ transpose(prob, &x_space ,&prob_col);
+ solve_l1r_lr(&prob_col, w, primal_solver_tol, Cp, Cn);
+ delete [] prob_col.y;
+ delete [] prob_col.x;
+ delete [] x_space;
+ break;
+ }
+ case L2R_LR_DUAL:
+ solve_l2r_lr_dual(prob, w, eps, Cp, Cn);
+ break;
+ case L2R_L2LOSS_SVR:
+ {
+ double *C = new double[prob->l];
+ for(int i = 0; i < prob->l; i++)
+ C[i] = param->C;
+
+ fun_obj=new l2r_l2_svr_fun(prob, C, param->p);
+ TRON tron_obj(fun_obj, param->eps);
+ tron_obj.set_print_string(liblinear_print_string);
+ tron_obj.tron(w);
+ delete fun_obj;
+ delete C;
+ break;
+
+ }
+ case L2R_L1LOSS_SVR_DUAL:
+ solve_l2r_l1l2_svr(prob, w, param, L2R_L1LOSS_SVR_DUAL);
+ break;
+ case L2R_L2LOSS_SVR_DUAL:
+ solve_l2r_l1l2_svr(prob, w, param, L2R_L2LOSS_SVR_DUAL);
+ break;
+ default:
+ fprintf(stderr, "ERROR: unknown solver_type\n");
+ break;
+ }
+}
+
+//
+// Interface functions
+//
+model* train(const problem *prob, const parameter *param)
+{
+ int i,j;
+ int l = prob->l;
+ int n = prob->n;
+ int w_size = prob->n;
+ model *model_ = Malloc(model,1);
+
+ if(prob->bias>=0)
+ model_->nr_feature=n-1;
+ else
+ model_->nr_feature=n;
+ model_->param = *param;
+ model_->bias = prob->bias;
+
+ if(param->solver_type == L2R_L2LOSS_SVR ||
+ param->solver_type == L2R_L1LOSS_SVR_DUAL ||
+ param->solver_type == L2R_L2LOSS_SVR_DUAL)
+ {
+ model_->w = Malloc(double, w_size);
+ model_->nr_class = 2;
+ model_->label = NULL;
+ train_one(prob, param, &model_->w[0], 0, 0);
+ }
+ else
+ {
+ int nr_class;
+ int *label = NULL;
+ int *start = NULL;
+ int *count = NULL;
+ int *perm = Malloc(int,l);
+
+ // group training data of the same class
+ group_classes(prob,&nr_class,&label,&start,&count,perm);
+
+ model_->nr_class=nr_class;
+ model_->label = Malloc(int,nr_class);
+ for(i=0;ilabel[i] = label[i];
+
+ // calculate weighted C
+ double *weighted_C = Malloc(double, nr_class);
+ for(i=0;iC;
+ for(i=0;inr_weight;i++)
+ {
+ for(j=0;jweight_label[i] == label[j])
+ break;
+ if(j == nr_class)
+ fprintf(stderr,"WARNING: class label %d specified in weight is not found\n", param->weight_label[i]);
+ else
+ weighted_C[j] *= param->weight[i];
+ }
+
+ // constructing the subproblem
+ feature_node **x = Malloc(feature_node *,l);
+ for(i=0;ix[perm[i]];
+
+ int k;
+ problem sub_prob;
+ sub_prob.l = l;
+ sub_prob.n = n;
+ sub_prob.x = Malloc(feature_node *,sub_prob.l);
+ sub_prob.y = Malloc(double,sub_prob.l);
+
+ for(k=0; ksolver_type == MCSVM_CS)
+ {
+ model_->w=Malloc(double, n*nr_class);
+ for(i=0;ieps);
+ Solver.Solve(model_->w);
+ }
+ else
+ {
+ if(nr_class == 2)
+ {
+ model_->w=Malloc(double, w_size);
+
+ int e0 = start[0]+count[0];
+ k=0;
+ for(; kw[0], weighted_C[0], weighted_C[1]);
+ }
+ else
+ {
+ model_->w=Malloc(double, w_size*nr_class);
+ double *w=Malloc(double, w_size);
+ for(i=0;iC);
+
+ for(int j=0;jw[j*nr_class+i] = w[j];
+ }
+ free(w);
+ }
+
+ }
+
+ free(x);
+ free(label);
+ free(start);
+ free(count);
+ free(perm);
+ free(sub_prob.x);
+ free(sub_prob.y);
+ free(weighted_C);
+ }
+ return model_;
+}
+
+void cross_validation(const problem *prob, const parameter *param, int nr_fold, double *target)
+{
+ int i;
+ int *fold_start = Malloc(int,nr_fold+1);
+ int l = prob->l;
+ int *perm = Malloc(int,l);
+
+ for(i=0;ibias;
+ subprob.n = prob->n;
+ subprob.l = l-(end-begin);
+ subprob.x = Malloc(struct feature_node*,subprob.l);
+ subprob.y = Malloc(double,subprob.l);
+
+ k=0;
+ for(j=0;jx[perm[j]];
+ subprob.y[k] = prob->y[perm[j]];
+ ++k;
+ }
+ for(j=end;jx[perm[j]];
+ subprob.y[k] = prob->y[perm[j]];
+ ++k;
+ }
+ struct model *submodel = train(&subprob,param);
+ for(j=begin;jx[perm[j]]);
+ free_and_destroy_model(&submodel);
+ free(subprob.x);
+ free(subprob.y);
+ }
+ free(fold_start);
+ free(perm);
+}
+
+double predict_values(const struct model *model_, const struct feature_node *x, double *dec_values)
+{
+ int idx;
+ int n;
+ if(model_->bias>=0)
+ n=model_->nr_feature+1;
+ else
+ n=model_->nr_feature;
+ double *w=model_->w;
+ int nr_class=model_->nr_class;
+ int i;
+ int nr_w;
+ if(nr_class==2 && model_->param.solver_type != MCSVM_CS)
+ nr_w = 1;
+ else
+ nr_w = nr_class;
+
+ const feature_node *lx=x;
+ for(i=0;iindex)!=-1; lx++)
+ {
+ // the dimension of testing data may exceed that of training
+ if(idx<=n)
+ for(i=0;ivalue;
+ }
+
+ if(nr_class==2)
+ {
+ if(model_->param.solver_type == L2R_L2LOSS_SVR ||
+ model_->param.solver_type == L2R_L1LOSS_SVR_DUAL ||
+ model_->param.solver_type == L2R_L2LOSS_SVR_DUAL)
+ return dec_values[0];
+ else
+ return (dec_values[0]>0)?model_->label[0]:model_->label[1];
+ }
+ else
+ {
+ int dec_max_idx = 0;
+ for(i=1;i dec_values[dec_max_idx])
+ dec_max_idx = i;
+ }
+ return model_->label[dec_max_idx];
+ }
+}
+
+double predict(const model *model_, const feature_node *x)
+{
+ double *dec_values = Malloc(double, model_->nr_class);
+ double label=predict_values(model_, x, dec_values);
+ free(dec_values);
+ return label;
+}
+
+double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates)
+{
+ if(check_probability_model(model_))
+ {
+ int i;
+ int nr_class=model_->nr_class;
+ int nr_w;
+ if(nr_class==2)
+ nr_w = 1;
+ else
+ nr_w = nr_class;
+
+ double label=predict_values(model_, x, prob_estimates);
+ for(i=0;inr_feature;
+ int n;
+ const parameter& param = model_->param;
+
+ if(model_->bias>=0)
+ n=nr_feature+1;
+ else
+ n=nr_feature;
+ int w_size = n;
+ FILE *fp = fopen(model_file_name,"w");
+ if(fp==NULL) return -1;
+
+ char *old_locale = strdup(setlocale(LC_ALL, NULL));
+ setlocale(LC_ALL, "C");
+
+ int nr_w;
+ if(model_->nr_class==2 && model_->param.solver_type != MCSVM_CS)
+ nr_w=1;
+ else
+ nr_w=model_->nr_class;
+
+ fprintf(fp, "solver_type %s\n", solver_type_table[param.solver_type]);
+ fprintf(fp, "nr_class %d\n", model_->nr_class);
+
+ if(model_->label)
+ {
+ fprintf(fp, "label");
+ for(i=0; inr_class; i++)
+ fprintf(fp, " %d", model_->label[i]);
+ fprintf(fp, "\n");
+ }
+
+ fprintf(fp, "nr_feature %d\n", nr_feature);
+
+ fprintf(fp, "bias %.16g\n", model_->bias);
+
+ fprintf(fp, "w\n");
+ for(i=0; iw[i*nr_w+j]);
+ fprintf(fp, "\n");
+ }
+
+ setlocale(LC_ALL, old_locale);
+ free(old_locale);
+
+ if (ferror(fp) != 0 || fclose(fp) != 0) return -1;
+ else return 0;
+}
+
+struct model *load_model(const char *model_file_name)
+{
+ FILE *fp = fopen(model_file_name,"r");
+ if(fp==NULL) return NULL;
+
+ int i;
+ int nr_feature;
+ int n;
+ int nr_class;
+ double bias;
+ model *model_ = Malloc(model,1);
+ parameter& param = model_->param;
+
+ model_->label = NULL;
+
+ char *old_locale = strdup(setlocale(LC_ALL, NULL));
+ setlocale(LC_ALL, "C");
+
+ char cmd[81];
+ while(1)
+ {
+ fscanf(fp,"%80s",cmd);
+ if(strcmp(cmd,"solver_type")==0)
+ {
+ fscanf(fp,"%80s",cmd);
+ int i;
+ for(i=0;solver_type_table[i];i++)
+ {
+ if(strcmp(solver_type_table[i],cmd)==0)
+ {
+ param.solver_type=i;
+ break;
+ }
+ }
+ if(solver_type_table[i] == NULL)
+ {
+ fprintf(stderr,"unknown solver type.\n");
+
+ setlocale(LC_ALL, old_locale);
+ free(model_->label);
+ free(model_);
+ free(old_locale);
+ return NULL;
+ }
+ }
+ else if(strcmp(cmd,"nr_class")==0)
+ {
+ fscanf(fp,"%d",&nr_class);
+ model_->nr_class=nr_class;
+ }
+ else if(strcmp(cmd,"nr_feature")==0)
+ {
+ fscanf(fp,"%d",&nr_feature);
+ model_->nr_feature=nr_feature;
+ }
+ else if(strcmp(cmd,"bias")==0)
+ {
+ fscanf(fp,"%lf",&bias);
+ model_->bias=bias;
+ }
+ else if(strcmp(cmd,"w")==0)
+ {
+ break;
+ }
+ else if(strcmp(cmd,"label")==0)
+ {
+ int nr_class = model_->nr_class;
+ model_->label = Malloc(int,nr_class);
+ for(int i=0;ilabel[i]);
+ }
+ else
+ {
+ fprintf(stderr,"unknown text in model file: [%s]\n",cmd);
+ setlocale(LC_ALL, old_locale);
+ free(model_->label);
+ free(model_);
+ free(old_locale);
+ return NULL;
+ }
+ }
+
+ nr_feature=model_->nr_feature;
+ if(model_->bias>=0)
+ n=nr_feature+1;
+ else
+ n=nr_feature;
+ int w_size = n;
+ int nr_w;
+ if(nr_class==2 && param.solver_type != MCSVM_CS)
+ nr_w = 1;
+ else
+ nr_w = nr_class;
+
+ model_->w=Malloc(double, w_size*nr_w);
+ for(i=0; iw[i*nr_w+j]);
+ fscanf(fp, "\n");
+ }
+
+ setlocale(LC_ALL, old_locale);
+ free(old_locale);
+
+ if (ferror(fp) != 0 || fclose(fp) != 0) return NULL;
+
+ return model_;
+}
+
+int get_nr_feature(const model *model_)
+{
+ return model_->nr_feature;
+}
+
+int get_nr_class(const model *model_)
+{
+ return model_->nr_class;
+}
+
+void get_labels(const model *model_, int* label)
+{
+ if (model_->label != NULL)
+ for(int i=0;inr_class;i++)
+ label[i] = model_->label[i];
+}
+
+void free_model_content(struct model *model_ptr)
+{
+ if(model_ptr->w != NULL)
+ free(model_ptr->w);
+ if(model_ptr->label != NULL)
+ free(model_ptr->label);
+}
+
+void free_and_destroy_model(struct model **model_ptr_ptr)
+{
+ struct model *model_ptr = *model_ptr_ptr;
+ if(model_ptr != NULL)
+ {
+ free_model_content(model_ptr);
+ free(model_ptr);
+ }
+}
+
+void destroy_param(parameter* param)
+{
+ if(param->weight_label != NULL)
+ free(param->weight_label);
+ if(param->weight != NULL)
+ free(param->weight);
+}
+
+const char *check_parameter(const problem *prob, const parameter *param)
+{
+ if(param->eps <= 0)
+ return "eps <= 0";
+
+ if(param->C <= 0)
+ return "C <= 0";
+
+ if(param->p < 0)
+ return "p < 0";
+
+ if(param->solver_type != L2R_LR
+ && param->solver_type != L2R_L2LOSS_SVC_DUAL
+ && param->solver_type != L2R_L2LOSS_SVC
+ && param->solver_type != L2R_L1LOSS_SVC_DUAL
+ && param->solver_type != MCSVM_CS
+ && param->solver_type != L1R_L2LOSS_SVC
+ && param->solver_type != L1R_LR
+ && param->solver_type != L2R_LR_DUAL
+ && param->solver_type != L2R_L2LOSS_SVR
+ && param->solver_type != L2R_L2LOSS_SVR_DUAL
+ && param->solver_type != L2R_L1LOSS_SVR_DUAL)
+ return "unknown solver type";
+
+ return NULL;
+}
+
+int check_probability_model(const struct model *model_)
+{
+ return (model_->param.solver_type==L2R_LR ||
+ model_->param.solver_type==L2R_LR_DUAL ||
+ model_->param.solver_type==L1R_LR);
+}
+
+void set_print_string_function(void (*print_func)(const char*))
+{
+ if (print_func == NULL)
+ liblinear_print_string = &print_string_stdout;
+ else
+ liblinear_print_string = print_func;
+}
+
diff --git a/Bing/Src/LibLinear/linear.h b/Bing/Src/LibLinear/linear.h
new file mode 100644
index 000000000..22a356743
--- /dev/null
+++ b/Bing/Src/LibLinear/linear.h
@@ -0,0 +1,74 @@
+#ifndef _LIBLINEAR_H
+#define _LIBLINEAR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct feature_node
+{
+ int index;
+ double value;
+};
+
+struct problem
+{
+ int l, n;
+ double *y;
+ struct feature_node **x;
+ double bias; /* < 0 if no bias term */
+};
+
+enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR = 11, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL }; /* solver_type */
+
+struct parameter
+{
+ int solver_type;
+
+ /* these are for training only */
+ double eps; /* stopping criteria */
+ double C;
+ int nr_weight;
+ int *weight_label;
+ double* weight;
+ double p;
+};
+
+struct model
+{
+ struct parameter param;
+ int nr_class; /* number of classes */
+ int nr_feature;
+ double *w;
+ int *label; /* label of each class */
+ double bias;
+};
+
+struct model* train(const struct problem *prob, const struct parameter *param);
+void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, double *target);
+
+double predict_values(const struct model *model_, const struct feature_node *x, double* dec_values);
+double predict(const struct model *model_, const struct feature_node *x);
+double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
+
+int save_model(const char *model_file_name, const struct model *model_);
+struct model *load_model(const char *model_file_name);
+
+int get_nr_feature(const struct model *model_);
+int get_nr_class(const struct model *model_);
+void get_labels(const struct model *model_, int* label);
+
+void free_model_content(struct model *model_ptr);
+void free_and_destroy_model(struct model **model_ptr_ptr);
+void destroy_param(struct parameter *param);
+
+const char *check_parameter(const struct problem *prob, const struct parameter *param);
+int check_probability_model(const struct model *model);
+void set_print_string_function(void (*print_func) (const char*));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LIBLINEAR_H */
+
diff --git a/Bing/Src/LibLinear/train.c b/Bing/Src/LibLinear/train.c
new file mode 100644
index 000000000..858229dab
--- /dev/null
+++ b/Bing/Src/LibLinear/train.c
@@ -0,0 +1,401 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include "linear.h"
+#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
+#define INF HUGE_VAL
+
+#pragma warning(disable:4996)
+
+void print_null(const char *s) {}
+
+void exit_with_help()
+{
+ printf(
+ "Usage: train [options] training_set_file [model_file]\n"
+ "options:\n"
+ "-s type : set type of solver (default 1)\n"
+ " for multi-class classification\n"
+ " 0 -- L2-regularized logistic regression (primal)\n"
+ " 1 -- L2-regularized L2-loss support vector classification (dual)\n"
+ " 2 -- L2-regularized L2-loss support vector classification (primal)\n"
+ " 3 -- L2-regularized L1-loss support vector classification (dual)\n"
+ " 4 -- support vector classification by Crammer and Singer\n"
+ " 5 -- L1-regularized L2-loss support vector classification\n"
+ " 6 -- L1-regularized logistic regression\n"
+ " 7 -- L2-regularized logistic regression (dual)\n"
+ " for regression\n"
+ " 11 -- L2-regularized L2-loss support vector regression (primal)\n"
+ " 12 -- L2-regularized L2-loss support vector regression (dual)\n"
+ " 13 -- L2-regularized L1-loss support vector regression (dual)\n"
+ "-c cost : set the parameter C (default 1)\n"
+ "-p epsilon : set the epsilon in loss function of SVR (default 0.1)\n"
+ "-e epsilon : set tolerance of termination criterion\n"
+ " -s 0 and 2\n"
+ " |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n"
+ " where f is the primal function and pos/neg are # of\n"
+ " positive/negative data (default 0.01)\n"
+ " -s 11\n"
+ " |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001)\n"
+ " -s 1, 3, 4, and 7\n"
+ " Dual maximal violation <= eps; similar to libsvm (default 0.1)\n"
+ " -s 5 and 6\n"
+ " |f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,\n"
+ " where f is the primal function (default 0.01)\n"
+ " -s 12 and 13\n"
+ " |f'(alpha)|_1 <= eps |f'(alpha0)|,\n"
+ " where f is the dual function (default 0.1)\n"
+ "-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n"
+ "-wi weight: weights adjust the parameter C of different classes (see README for details)\n"
+ "-v n: n-fold cross validation mode\n"
+ "-q : quiet mode (no outputs)\n"
+ );
+ exit(1);
+}
+
+void exit_input_error(int line_num)
+{
+ fprintf(stderr,"Wrong input format at line %d\n", line_num);
+ exit(1);
+}
+
+static char *line = NULL;
+static int max_line_len;
+
+static char* readline(FILE *input)
+{
+ int len;
+
+ if(fgets(line,max_line_len,input) == NULL)
+ return NULL;
+
+ while(strrchr(line,'\n') == NULL)
+ {
+ max_line_len *= 2;
+ line = (char *) realloc(line,max_line_len);
+ len = (int) strlen(line);
+ if(fgets(line+len,max_line_len-len,input) == NULL)
+ break;
+ }
+ return line;
+}
+
+void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name);
+void read_problem(const char *filename);
+void do_cross_validation();
+
+struct feature_node *x_space;
+struct parameter param;
+struct problem prob;
+struct model* model_;
+int flag_cross_validation;
+int nr_fold;
+double bias;
+
+int main(int argc, char **argv)
+{
+ char input_file_name[1024];
+ char model_file_name[1024];
+ const char *error_msg;
+
+ parse_command_line(argc, argv, input_file_name, model_file_name);
+ read_problem(input_file_name);
+ error_msg = check_parameter(&prob,¶m);
+
+ if(error_msg)
+ {
+ fprintf(stderr,"ERROR: %s\n",error_msg);
+ exit(1);
+ }
+
+ if(flag_cross_validation)
+ {
+ do_cross_validation();
+ }
+ else
+ {
+ model_=train(&prob, ¶m);
+ if(save_model(model_file_name, model_))
+ {
+ fprintf(stderr,"can't save model to file %s\n",model_file_name);
+ exit(1);
+ }
+ free_and_destroy_model(&model_);
+ }
+ destroy_param(¶m);
+ free(prob.y);
+ free(prob.x);
+ free(x_space);
+ free(line);
+
+ return 0;
+}
+
+void do_cross_validation()
+{
+ int i;
+ int total_correct = 0;
+ double total_error = 0;
+ double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
+ double *target = Malloc(double, prob.l);
+
+ cross_validation(&prob,¶m,nr_fold,target);
+ if(param.solver_type == L2R_L2LOSS_SVR ||
+ param.solver_type == L2R_L1LOSS_SVR_DUAL ||
+ param.solver_type == L2R_L2LOSS_SVR_DUAL)
+ {
+ for(i=0;i=argc)
+ exit_with_help();
+ switch(argv[i-1][1])
+ {
+ case 's':
+ param.solver_type = atoi(argv[i]);
+ break;
+
+ case 'c':
+ param.C = atof(argv[i]);
+ break;
+
+ case 'p':
+ param.p = atof(argv[i]);
+ break;
+
+ case 'e':
+ param.eps = atof(argv[i]);
+ break;
+
+ case 'B':
+ bias = atof(argv[i]);
+ break;
+
+ case 'w':
+ ++param.nr_weight;
+ param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight);
+ param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight);
+ param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
+ param.weight[param.nr_weight-1] = atof(argv[i]);
+ break;
+
+ case 'v':
+ flag_cross_validation = 1;
+ nr_fold = atoi(argv[i]);
+ if(nr_fold < 2)
+ {
+ fprintf(stderr,"n-fold cross validation: n must >= 2\n");
+ exit_with_help();
+ }
+ break;
+
+ case 'q':
+ print_func = &print_null;
+ i--;
+ break;
+
+ default:
+ fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]);
+ exit_with_help();
+ break;
+ }
+ }
+
+ set_print_string_function(print_func);
+
+ // determine filenames
+ if(i>=argc)
+ exit_with_help();
+
+ strcpy(input_file_name, argv[i]);
+
+ if(i max_index)
+ max_index = inst_max_index;
+
+ if(prob.bias >= 0)
+ x_space[j++].value = prob.bias;
+
+ x_space[j++].index = -1;
+ }
+
+ if(prob.bias >= 0)
+ {
+ prob.n=max_index+1;
+ for(i=1;iindex = prob.n;
+ x_space[j-2].index = prob.n;
+ }
+ else
+ prob.n=max_index;
+
+ fclose(fp);
+}
diff --git a/Bing/Src/LibLinear/tron.cpp b/Bing/Src/LibLinear/tron.cpp
new file mode 100644
index 000000000..b54bedf18
--- /dev/null
+++ b/Bing/Src/LibLinear/tron.cpp
@@ -0,0 +1,235 @@
+#include
+#include
+#include
+#include
+#include "tron.h"
+
+#ifndef min
+template static inline T min(T x,T y) { return (x static inline T max(T x,T y) { return (x>y)?x:y; }
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern double dnrm2_(int *, double *, int *);
+extern double ddot_(int *, double *, int *, double *, int *);
+extern int daxpy_(int *, double *, double *, int *, double *, int *);
+extern int dscal_(int *, double *, double *, int *);
+
+#ifdef __cplusplus
+}
+#endif
+
+static void default_print(const char *buf)
+{
+ fputs(buf,stdout);
+ fflush(stdout);
+}
+
+void TRON::info(const char *fmt,...)
+{
+ char buf[BUFSIZ];
+ va_list ap;
+ va_start(ap,fmt);
+ vsprintf(buf,fmt,ap);
+ va_end(ap);
+ (*tron_print_string)(buf);
+}
+
+TRON::TRON(const function *fun_obj, double eps, int max_iter)
+{
+ this->fun_obj=const_cast(fun_obj);
+ this->eps=eps;
+ this->max_iter=max_iter;
+ tron_print_string = default_print;
+}
+
+TRON::~TRON()
+{
+}
+
+void TRON::tron(double *w)
+{
+ // Parameters for updating the iterates.
+ double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75;
+
+ // Parameters for updating the trust region size delta.
+ double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4;
+
+ int n = fun_obj->get_nr_variable();
+ int i, cg_iter;
+ double delta, snorm, one=1.0;
+ double alpha, f, fnew, prered, actred, gs;
+ int search = 1, iter = 1, inc = 1;
+ double *s = new double[n];
+ double *r = new double[n];
+ double *w_new = new double[n];
+ double *g = new double[n];
+
+ for (i=0; ifun(w);
+ fun_obj->grad(w, g);
+ delta = dnrm2_(&n, g, &inc);
+ double gnorm1 = delta;
+ double gnorm = gnorm1;
+
+ if (gnorm <= eps*gnorm1)
+ search = 0;
+
+ iter = 1;
+
+ while (iter <= max_iter && search)
+ {
+ cg_iter = trcg(delta, g, s, r);
+
+ memcpy(w_new, w, sizeof(double)*n);
+ daxpy_(&n, &one, s, &inc, w_new, &inc);
+
+ gs = ddot_(&n, g, &inc, s, &inc);
+ prered = -0.5*(gs-ddot_(&n, s, &inc, r, &inc));
+ fnew = fun_obj->fun(w_new);
+
+ // Compute the actual reduction.
+ actred = f - fnew;
+
+ // On the first iteration, adjust the initial step bound.
+ snorm = dnrm2_(&n, s, &inc);
+ if (iter == 1)
+ delta = min(delta, snorm);
+
+ // Compute prediction alpha*snorm of the step.
+ if (fnew - f - gs <= 0)
+ alpha = sigma3;
+ else
+ alpha = max(sigma1, -0.5*(gs/(fnew - f - gs)));
+
+ // Update the trust region bound according to the ratio of actual to predicted reduction.
+ if (actred < eta0*prered)
+ delta = min(max(alpha, sigma1)*snorm, sigma2*delta);
+ else if (actred < eta1*prered)
+ delta = max(sigma1*delta, min(alpha*snorm, sigma2*delta));
+ else if (actred < eta2*prered)
+ delta = max(sigma1*delta, min(alpha*snorm, sigma3*delta));
+ else
+ delta = max(delta, min(alpha*snorm, sigma3*delta));
+
+ info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d\n", iter, actred, prered, delta, f, gnorm, cg_iter);
+
+ if (actred > eta0*prered)
+ {
+ iter++;
+ memcpy(w, w_new, sizeof(double)*n);
+ f = fnew;
+ fun_obj->grad(w, g);
+
+ gnorm = dnrm2_(&n, g, &inc);
+ if (gnorm <= eps*gnorm1)
+ break;
+ }
+ if (f < -1.0e+32)
+ {
+ info("WARNING: f < -1.0e+32\n");
+ break;
+ }
+ if (fabs(actred) <= 0 && prered <= 0)
+ {
+ info("WARNING: actred and prered <= 0\n");
+ break;
+ }
+ if (fabs(actred) <= 1.0e-12*fabs(f) &&
+ fabs(prered) <= 1.0e-12*fabs(f))
+ {
+ info("WARNING: actred and prered too small\n");
+ break;
+ }
+ }
+
+ delete[] g;
+ delete[] r;
+ delete[] w_new;
+ delete[] s;
+}
+
+int TRON::trcg(double delta, double *g, double *s, double *r)
+{
+ int i, inc = 1;
+ int n = fun_obj->get_nr_variable();
+ double one = 1;
+ double *d = new double[n];
+ double *Hd = new double[n];
+ double rTr, rnewTrnew, alpha, beta, cgtol;
+
+ for (i=0; iHv(d, Hd);
+
+ alpha = rTr/ddot_(&n, d, &inc, Hd, &inc);
+ daxpy_(&n, &alpha, d, &inc, s, &inc);
+ if (dnrm2_(&n, s, &inc) > delta)
+ {
+ info("cg reaches trust region boundary\n");
+ alpha = -alpha;
+ daxpy_(&n, &alpha, d, &inc, s, &inc);
+
+ double std = ddot_(&n, s, &inc, d, &inc);
+ double sts = ddot_(&n, s, &inc, s, &inc);
+ double dtd = ddot_(&n, d, &inc, d, &inc);
+ double dsq = delta*delta;
+ double rad = sqrt(std*std + dtd*(dsq-sts));
+ if (std >= 0)
+ alpha = (dsq - sts)/(std + rad);
+ else
+ alpha = (rad - std)/dtd;
+ daxpy_(&n, &alpha, d, &inc, s, &inc);
+ alpha = -alpha;
+ daxpy_(&n, &alpha, Hd, &inc, r, &inc);
+ break;
+ }
+ alpha = -alpha;
+ daxpy_(&n, &alpha, Hd, &inc, r, &inc);
+ rnewTrnew = ddot_(&n, r, &inc, r, &inc);
+ beta = rnewTrnew/rTr;
+ dscal_(&n, &beta, d, &inc);
+ daxpy_(&n, &one, r, &inc, d, &inc);
+ rTr = rnewTrnew;
+ }
+
+ delete[] d;
+ delete[] Hd;
+
+ return(cg_iter);
+}
+
+double TRON::norm_inf(int n, double *x)
+{
+ double dmax = fabs(x[0]);
+ for (int i=1; i= dmax)
+ dmax = fabs(x[i]);
+ return(dmax);
+}
+
+void TRON::set_print_string(void (*print_string) (const char *buf))
+{
+ tron_print_string = print_string;
+}
diff --git a/Bing/Src/LibLinear/tron.h b/Bing/Src/LibLinear/tron.h
new file mode 100644
index 000000000..4fe34a5c6
--- /dev/null
+++ b/Bing/Src/LibLinear/tron.h
@@ -0,0 +1,36 @@
+#ifndef _TRON_H
+#define _TRON_H
+
+#pragma warning(disable:4996)
+
+class function
+{
+public:
+ virtual double fun(double *w) = 0 ;
+ virtual void grad(double *w, double *g) = 0 ;
+ virtual void Hv(double *s, double *Hs) = 0 ;
+
+ virtual int get_nr_variable(void) = 0 ;
+ virtual ~function(void){}
+};
+
+class TRON
+{
+public:
+ TRON(const function *fun_obj, double eps = 0.1, int max_iter = 1000);
+ ~TRON();
+
+ void tron(double *w);
+ void set_print_string(void (*i_print) (const char *buf));
+
+private:
+ int trcg(double delta, double *g, double *s, double *r);
+ double norm_inf(int n, double *x);
+
+ double eps;
+ int max_iter;
+ function *fun_obj;
+ void info(const char *fmt,...);
+ void (*tron_print_string)(const char *buf);
+};
+#endif
diff --git a/Bing/Src/Objectness.cpp b/Bing/Src/Objectness.cpp
new file mode 100644
index 000000000..01cbc9541
--- /dev/null
+++ b/Bing/Src/Objectness.cpp
@@ -0,0 +1,1118 @@
+#include "kyheader.h"
+#include "Objectness.h"
+#include "CmShow.h"
+
+#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
+void print_null(const char *s) {}
+const char* Objectness::_clrName[3] = {"MAXBGR", "HSV", "I"};
+const int CN = 21; // Color Number
+const char* COLORs[CN] = {"'k'", "'b'", "'g'", "'r'", "'c'", "'m'", "'y'",
+ "':k'", "':b'", "':g'", "':r'", "':c'", "':m'", "':y'",
+ "'--k'", "'--b'", "'--g'", "'--r'", "'--c'", "'--m'", "'--y'"
+};
+
+
+// base for window size quantization, R orientation channels, and feature window size (_W, _W)
+Objectness::Objectness(DataSetVOC &voc, double base, int W, int NSS)
+ : _voc(voc)
+ , _base(base)
+ , _W(W)
+ , _NSS(NSS)
+ , _logBase(log(_base))
+ , _minT(cvCeil(log(10.)/_logBase))
+ , _maxT(cvCeil(log(500.)/_logBase))
+ , _numT(_maxT - _minT + 1)
+ , _Clr(MAXBGR)
+{
+ setColorSpace(_Clr);
+}
+
+Objectness::~Objectness(void)
+{
+}
+
+void Objectness::setColorSpace(int clr)
+{
+ _Clr = clr;
+ _modelName = _voc.resDir + format("ObjNessB%gW%d%s", _base, _W, _clrName[_Clr]);
+ _trainDirSI = _voc.localDir + format("TrainS1B%gW%d%s/", _base, _W, _clrName[_Clr]);
+ _bbResDir = _voc.resDir + format("BBoxesB%gW%d%s/", _base, _W, _clrName[_Clr]);
+}
+
+int Objectness::loadTrainedModel(string modelName) // Return -1, 0, or 1 if partial, none, or all loaded
+{
+ if (modelName.size() == 0)
+ modelName = _modelName;
+ CStr s1 = modelName + ".wS1", s2 = modelName + ".wS2", sI = modelName + ".idx";
+ Mat filters1f, reW1f, idx1i, show3u;
+ if (!matRead(s1, filters1f) || !matRead(sI, idx1i)){
+ printf("Can't load model: %s or %s\n", _S(s1), _S(sI));
+ return 0;
+ }
+
+ //filters1f = aFilter(0.8f, 8);
+ //normalize(filters1f, filters1f, p, 1, NORM_MINMAX);
+
+ normalize(filters1f, show3u, 1, 255, NORM_MINMAX, CV_8U);
+ CmShow::showTinyMat(_voc.resDir + "Filter.png", show3u);
+ _tigF.update(filters1f);
+ _tigF.reconstruct(filters1f);
+
+ _svmSzIdxs = idx1i;
+ CV_Assert(_svmSzIdxs.size() > 1 && filters1f.size() == Size(_W, _W) && filters1f.type() == CV_32F);
+ _svmFilter = filters1f;
+
+ if (!matRead(s2, _svmReW1f) || _svmReW1f.size() != Size(2, _svmSzIdxs.size())){
+ _svmReW1f = Mat();
+ return -1;
+ }
+ return 1;
+}
+
+void Objectness::predictBBoxSI(CMat &img3u, ValStructVec &valBoxes, vecI &sz, int NUM_WIN_PSZ, bool fast)
+{
+ const int numSz = _svmSzIdxs.size();
+ const int imgW = img3u.cols, imgH = img3u.rows;
+ valBoxes.reserve(10000);
+ sz.clear(); sz.reserve(10000);
+ for (int ir = numSz - 1; ir >= 0; ir--){
+ int r = _svmSzIdxs[ir];
+ int height = cvRound(pow(_base, r/_numT + _minT)), width = cvRound(pow(_base, r%_numT + _minT));
+ if (height > imgH * _base || width > imgW * _base)
+ continue;
+
+ height = min(height, imgH), width = min(width, imgW);
+ Mat im3u, matchCost1f, mag1u;
+ resize(img3u, im3u, Size(cvRound(_W*imgW*1.0/width), cvRound(_W*imgH*1.0/height)));
+ gradientMag(im3u, mag1u);
+
+ //imwrite(_voc.localDir + format("%d.png", r), mag1u);
+ //Mat mag1f;
+ //mag1u.convertTo(mag1f, CV_32F);
+ //matchTemplate(mag1f, _svmFilter, matchCost1f, CV_TM_CCORR);
+
+ matchCost1f = _tigF.matchTemplate(mag1u);
+
+ ValStructVec matchCost;
+ nonMaxSup(matchCost1f, matchCost, _NSS, NUM_WIN_PSZ, fast);
+
+ // Find true locations and match values
+ double ratioX = width/_W, ratioY = height/_W;
+ int iMax = min(matchCost.size(), NUM_WIN_PSZ);
+ for (int i = 0; i < iMax; i++){
+ float mVal = matchCost(i);
+ Point pnt = matchCost[i];
+ Vec4i box(cvRound(pnt.x * ratioX), cvRound(pnt.y*ratioY));
+ box[2] = cvRound(min(box[0] + width, imgW));
+ box[3] = cvRound(min(box[1] + height, imgH));
+ box[0] ++;
+ box[1] ++;
+ valBoxes.pushBack(mVal, box);
+ sz.push_back(ir);
+ }
+ }
+ //exit(0);
+}
+
+void Objectness::predictBBoxSII(ValStructVec &valBoxes, const vecI &sz)
+{
+ int numI = valBoxes.size();
+ for (int i = 0; i < numI; i++){
+ const float* svmIIw = _svmReW1f.ptr(sz[i]);
+ valBoxes(i) = valBoxes(i) * svmIIw[0] + svmIIw[1];
+ }
+ valBoxes.sort();
+}
+
+// Get potential bounding boxes, each of which is represented by a Vec4i for (minX, minY, maxX, maxY).
+// The trained model should be prepared before calling this function: loadTrainedModel() or trainStageI() + trainStageII().
+// Use numDet to control the final number of proposed bounding boxes, and number of per size (scale and aspect ratio)
+void Objectness::getObjBndBoxes(CMat &img3u, ValStructVec &valBoxes, int numDetPerSize)
+{
+ CV_Assert_(filtersLoaded() , ("SVM filters should be initialized before getting object proposals\n"));
+ vecI sz;
+ predictBBoxSI(img3u, valBoxes, sz, numDetPerSize, false);
+ predictBBoxSII(valBoxes, sz);
+ return;
+}
+
+void Objectness::nonMaxSup(CMat &matchCost1f, ValStructVec &matchCost, int NSS, int maxPoint, bool fast)
+{
+ const int _h = matchCost1f.rows, _w = matchCost1f.cols;
+ Mat isMax1u = Mat::ones(_h, _w, CV_8U), costSmooth1f;
+ ValStructVec valPnt;
+ matchCost.reserve(_h * _w);
+ valPnt.reserve(_h * _w);
+ if (fast){
+ blur(matchCost1f, costSmooth1f, Size(3, 3));
+ for (int r = 0; r < _h; r++){
+ const float* d = matchCost1f.ptr(r);
+ const float* ds = costSmooth1f.ptr(r);
+ for (int c = 0; c < _w; c++)
+ if (d[c] >= ds[c])
+ valPnt.pushBack(d[c], Point(c, r));
+ }
+ }
+ else{
+ for (int r = 0; r < _h; r++){
+ const float* d = matchCost1f.ptr(r);
+ for (int c = 0; c < _w; c++)
+ valPnt.pushBack(d[c], Point(c, r));
+ }
+ }
+
+ valPnt.sort();
+ for (int i = 0; i < valPnt.size(); i++){
+ Point &pnt = valPnt[i];
+ if (isMax1u.at(pnt)){
+ matchCost.pushBack(valPnt(i), pnt);
+ for (int dy = -NSS; dy <= NSS; dy++) for (int dx = -NSS; dx <= NSS; dx++){
+ Point neighbor = pnt + Point(dx, dy);
+ if (!CHK_IND(neighbor))
+ continue;
+ isMax1u.at(neighbor) = false;
+ }
+ }
+ if (matchCost.size() >= maxPoint)
+ return;
+ }
+}
+
+void Objectness::gradientMag(CMat &imgBGR3u, Mat &mag1u)
+{
+ switch (_Clr){
+ case MAXBGR:
+ gradientRGB(imgBGR3u, mag1u); break;
+ case G:
+ gradientGray(imgBGR3u, mag1u); break;
+ case HSV:
+ gradientHSV(imgBGR3u, mag1u); break;
+ default:
+ printf("Error: not recognized color space\n");
+ }
+}
+
+void Objectness::gradientRGB(CMat &bgr3u, Mat &mag1u)
+{
+ const int H = bgr3u.rows, W = bgr3u.cols;
+ Mat Ix(H, W, CV_32S), Iy(H, W, CV_32S);
+
+ // Left/right most column Ix
+ for (int y = 0; y < H; y++){
+ Ix.at(y, 0) = bgrMaxDist(bgr3u.at(y, 1), bgr3u.at(y, 0))*2;
+ Ix.at(y, W-1) = bgrMaxDist(bgr3u.at(y, W-1), bgr3u.at(y, W-2))*2;
+ }
+
+ // Top/bottom most column Iy
+ for (int x = 0; x < W; x++) {
+ Iy.at(0, x) = bgrMaxDist(bgr3u.at(1, x), bgr3u.at(0, x))*2;
+ Iy.at(H-1, x) = bgrMaxDist(bgr3u.at(H-1, x), bgr3u.at(H-2, x))*2;
+ }
+
+ // Find the gradient for inner regions
+ for (int y = 0; y < H; y++){
+ const Vec3b *dataP = bgr3u.ptr(y);
+ for (int x = 2; x < W; x++)
+ Ix.at(y, x-1) = bgrMaxDist(dataP[x-2], dataP[x]); // bgr3u.at(y, x+1), bgr3u.at(y, x-1));
+ }
+ for (int y = 1; y < H-1; y++){
+ const Vec3b *tP = bgr3u.ptr(y-1);
+ const Vec3b *bP = bgr3u.ptr(y+1);
+ for (int x = 0; x < W; x++)
+ Iy.at(y, x) = bgrMaxDist(tP[x], bP[x]);
+ }
+ gradientXY(Ix, Iy, mag1u);
+}
+
+void Objectness::gradientGray(CMat &bgr3u, Mat &mag1u)
+{
+ Mat g1u;
+ cvtColor(bgr3u, g1u, CV_BGR2GRAY);
+ const int H = g1u.rows, W = g1u.cols;
+ Mat Ix(H, W, CV_32S), Iy(H, W, CV_32S);
+
+ // Left/right most column Ix
+ for (int y = 0; y < H; y++){
+ Ix.at(y, 0) = abs(g1u.at(y, 1) - g1u.at(y, 0)) * 2;
+ Ix.at(y, W-1) = abs(g1u.at(y, W-1) - g1u.at(y, W-2)) * 2;
+ }
+
+ // Top/bottom most column Iy
+ for (int x = 0; x < W; x++) {
+ Iy.at(0, x) = abs(g1u.at(1, x) - g1u.at(0, x)) * 2;
+ Iy.at(H-1, x) = abs(g1u.at(H-1, x) - g1u.at(H-2, x)) * 2;
+ }
+
+ // Find the gradient for inner regions
+ for (int y = 0; y < H; y++)
+ for (int x = 1; x < W-1; x++)
+ Ix.at(y, x) = abs(g1u.at(y, x+1) - g1u.at(y, x-1));
+ for (int y = 1; y < H-1; y++)
+ for (int x = 0; x < W; x++)
+ Iy.at(y, x) = abs(g1u.at(y+1, x) - g1u.at(y-1, x));
+
+ gradientXY(Ix, Iy, mag1u);
+}
+
+
+void Objectness::gradientHSV(CMat &bgr3u, Mat &mag1u)
+{
+ Mat hsv3u;
+ cvtColor(bgr3u, hsv3u, CV_BGR2HSV);
+ const int H = hsv3u.rows, W = hsv3u.cols;
+ Mat Ix(H, W, CV_32S), Iy(H, W, CV_32S);
+
+ // Left/right most column Ix
+ for (int y = 0; y < H; y++){
+ Ix.at(y, 0) = vecDist3b(hsv3u.at(y, 1), hsv3u.at(y, 0));
+ Ix.at(y, W-1) = vecDist3b(hsv3u.at(y, W-1), hsv3u.at(y, W-2));
+ }
+
+ // Top/bottom most column Iy
+ for (int x = 0; x < W; x++) {
+ Iy.at(0, x) = vecDist3b(hsv3u.at(1, x), hsv3u.at(0, x));
+ Iy.at(H-1, x) = vecDist3b(hsv3u.at(H-1, x), hsv3u.at(H-2, x));
+ }
+
+ // Find the gradient for inner regions
+ for (int y = 0; y < H; y++)
+ for (int x = 1; x < W-1; x++)
+ Ix.at(y, x) = vecDist3b(hsv3u.at(y, x+1), hsv3u.at(y, x-1))/2;
+ for (int y = 1; y < H-1; y++)
+ for (int x = 0; x < W; x++)
+ Iy.at(y, x) = vecDist3b(hsv3u.at(y+1, x), hsv3u.at(y-1, x))/2;
+
+ gradientXY(Ix, Iy, mag1u);
+}
+
+void Objectness::gradientXY(CMat &x1i, CMat &y1i, Mat &mag1u)
+{
+ const int H = x1i.rows, W = x1i.cols;
+ mag1u.create(H, W, CV_8U);
+ for (int r = 0; r < H; r++){
+ const int *x = x1i.ptr(r), *y = y1i.ptr(r);
+ byte* m = mag1u.ptr(r);
+ for (int c = 0; c < W; c++)
+ m[c] = min(x[c] + y[c], 255); //((int)sqrt(sqr(x[c]) + sqr(y[c])), 255);
+ }
+}
+
+void Objectness::trainObjectness(int numDetPerSize)
+{
+ CmTimer tm1("Train1"), tm2("Train 2");
+
+ //* Learning stage I
+ generateTrianData();
+ tm1.Start();
+ trainStageI();
+ tm1.Stop();
+ printf("Learning stage I takes %g seconds... \n", tm1.TimeInSeconds()); //*/
+
+ //* Learning stage II
+ tm2.Start();
+ trainStateII(numDetPerSize);
+ tm2.Stop();
+ printf("Learning stage II takes %g seconds... \n", tm2.TimeInSeconds()); //*/
+ return;
+}
+
+void Objectness::generateTrianData()
+{
+ const int NUM_TRAIN = _voc.trainNum;
+ const int FILTER_SZ = _W*_W;
+ vector> xTrainP(NUM_TRAIN), xTrainN(NUM_TRAIN);
+ vector szTrainP(NUM_TRAIN); // Corresponding size index.
+ const int NUM_NEG_BOX = 100; // Number of negative windows sampled from each image
+
+#pragma omp parallel for
+ for (int i = 0; i < NUM_TRAIN; i++) {
+ const int NUM_GT_BOX = (int)_voc.gtTrainBoxes[i].size();
+ vector &xP = xTrainP[i], &xN = xTrainN[i];
+ vecI &szP = szTrainP[i];
+ xP.reserve(NUM_GT_BOX*4), szP.reserve(NUM_GT_BOX*4), xN.reserve(NUM_NEG_BOX);
+ Mat im3u = imread(format(_S(_voc.imgPathW), _S(_voc.trainSet[i])));
+
+ // Get positive training data
+ for (int k = 0; k < NUM_GT_BOX; k++){
+ const Vec4i& bbgt = _voc.gtTrainBoxes[i][k];
+ vector bbs; // bounding boxes;
+ vecI bbR; // Bounding box ratios
+ int nS = gtBndBoxSampling(bbgt, bbs, bbR);
+ for (int j = 0; j < nS; j++){
+ bbs[j][2] = min(bbs[j][2], im3u.cols);
+ bbs[j][3] = min(bbs[j][3], im3u.rows);
+ Mat mag1f = getFeature(im3u, bbs[j]), magF1f;
+ flip(mag1f, magF1f, CV_FLIP_HORIZONTAL);
+ xP.push_back(mag1f);
+ xP.push_back(magF1f);
+ szP.push_back(bbR[j]);
+ szP.push_back(bbR[j]);
+ }
+ }
+ // Get negative training data
+ for (int k = 0; k < NUM_NEG_BOX; k++){
+ int x1 = rand() % im3u.cols + 1, x2 = rand() % im3u.cols + 1;
+ int y1 = rand() % im3u.rows + 1, y2 = rand() % im3u.rows + 1;
+ Vec4i bb(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2));
+ if (maxIntUnion(bb, _voc.gtTrainBoxes[i]) < 0.5)
+ xN.push_back(getFeature(im3u, bb));
+ }
+ }
+
+ const int NUM_R = _numT * _numT + 1;
+ vecI szCount(NUM_R); // Object counts of each size (combination of scale and aspect ratio)
+ int numP = 0, numN = 0, iP = 0, iN = 0;
+ for (int i = 0; i < NUM_TRAIN; i++){
+ numP += xTrainP[i].size();
+ numN += xTrainN[i].size();
+ const vecI &rP = szTrainP[i];
+ for (size_t j = 0; j < rP.size(); j++)
+ szCount[rP[j]]++;
+ }
+ vecI szActive; // Indexes of active size
+ for (int r = 1; r < NUM_R; r++){
+ if (szCount[r] > 50) // If only 50- positive samples at this size, ignore it.
+ szActive.push_back(r-1);
+ }
+ matWrite(_modelName + ".idx", Mat(szActive));
+
+ Mat xP1f(numP, FILTER_SZ, CV_32F), xN1f(numN, FILTER_SZ, CV_32F);
+ for (int i = 0; i < NUM_TRAIN; i++) {
+ vector &xP = xTrainP[i], &xN = xTrainN[i];
+ for (size_t j = 0; j < xP.size(); j++)
+ memcpy(xP1f.ptr(iP++), xP[j].data, FILTER_SZ*sizeof(float));
+ for (size_t j = 0; j < xN.size(); j++)
+ memcpy(xN1f.ptr(iN++), xN[j].data, FILTER_SZ*sizeof(float));
+ }
+ CV_Assert(numP == iP && numN == iN);
+ matWrite(_modelName + ".xP", xP1f);
+ matWrite(_modelName + ".xN", xN1f);
+}
+
+Mat Objectness::getFeature(CMat &img3u, const Vec4i &bb)
+{
+ int x = bb[0] - 1, y = bb[1] - 1;
+ Rect reg(x, y, bb[2] - x, bb[3] - y);
+ Mat subImg3u, mag1f, mag1u;
+ resize(img3u(reg), subImg3u, Size(_W, _W));
+ gradientMag(subImg3u, mag1u);
+ mag1u.convertTo(mag1f, CV_32F);
+ return mag1f;
+}
+
+int Objectness::gtBndBoxSampling(const Vec4i &bbgt, vector &samples, vecI &bbR)
+{
+ double wVal = bbgt[2] - bbgt[0] + 1, hVal = (bbgt[3] - bbgt[1]) + 1;
+ wVal = log(wVal)/_logBase, hVal = log(hVal)/_logBase;
+ int wMin = max((int)(wVal - 0.5), _minT), wMax = min((int)(wVal + 1.5), _maxT);
+ int hMin = max((int)(hVal - 0.5), _minT), hMax = min((int)(hVal + 1.5), _maxT);
+ for (int h = hMin; h <= hMax; h++) for (int w = wMin; w <= wMax; w++){
+ int wT = tLen(w) - 1, hT = tLen(h) - 1;
+ Vec4i bb(bbgt[0], bbgt[1], bbgt[0] + wT, bbgt[1] + hT);
+ if (DataSetVOC::interUnio(bb, bbgt) >= 0.5){
+ samples.push_back(bb);
+ bbR.push_back(sz2idx(w, h));
+ //if (bbgt[3] > hT){
+ // bb = Vec4i(bbgt[0], bbgt[3] - hT, bbgt[0] + wT, bbgt[3]);
+ // CV_Assert(DataSetVOC::interUnio(bb, bbgt) >= 0.5);
+ // samples.push_back(bb);
+ // bbR.push_back(sz2idx(w, h));
+ //}
+ //if (bbgt[2] > wT){
+ // bb = Vec4i(bbgt[2] - wT, bbgt[1], bbgt[2], bbgt[1] + hT);
+ // CV_Assert(DataSetVOC::interUnio(bb, bbgt) >= 0.5);
+ // samples.push_back(bb);
+ // bbR.push_back(sz2idx(w, h));
+ //}
+ //if (bbgt[2] > wT && bbgt[3] > hT){
+ // bb = Vec4i(bbgt[2] - wT, bbgt[3] - hT, bbgt[2], bbgt[3]);
+ // CV_Assert(DataSetVOC::interUnio(bb, bbgt) >= 0.5);
+ // samples.push_back(bb);
+ // bbR.push_back(sz2idx(w, h));
+ //}
+ }
+ }
+ return samples.size();
+}
+
+void Objectness::trainStateII(int numPerSz)
+{
+ loadTrainedModel();
+ const int NUM_TRAIN = _voc.trainNum;
+ vector SZ(NUM_TRAIN), Y(NUM_TRAIN);
+ vector VAL(NUM_TRAIN);
+
+#pragma omp parallel for
+ for (int i = 0; i < _voc.trainNum; i++) {
+ const vector &bbgts = _voc.gtTrainBoxes[i];
+ ValStructVec valBoxes;
+ vecI &sz = SZ[i], &y = Y[i];
+ vecF &val = VAL[i];
+ CStr imgPath = format(_S(_voc.imgPathW), _S(_voc.trainSet[i]));
+ predictBBoxSI(imread(imgPath), valBoxes, sz, numPerSz, false);
+ const int num = valBoxes.size();
+ CV_Assert(sz.size() == num);
+ y.resize(num), val.resize(num);
+ for (int j = 0; j < num; j++){
+ Vec4i bb = valBoxes[j];
+ val[j] = valBoxes(j);
+ y[j] = maxIntUnion(bb, bbgts) >= 0.5 ? 1 : -1;
+ }
+ }
+
+ const int NUM_SZ = _svmSzIdxs.size();
+ const int maxTrainNum = 100000;
+ vector rXP(NUM_SZ), rXN(NUM_SZ);
+ for (int r = 0; r < NUM_SZ; r++){
+ rXP[r].reserve(maxTrainNum);
+ rXN[r].reserve(1000000);
+ }
+ for (int i = 0; i < NUM_TRAIN; i++){
+ const vecI &sz = SZ[i], &y = Y[i];
+ vecF &val = VAL[i];
+ int num = sz.size();
+ for (int j = 0; j < num; j++){
+ int r = sz[j];
+ CV_Assert(r >= 0 && r < NUM_SZ);
+ if (y[j] == 1)
+ rXP[r].push_back(Mat(1, 1, CV_32F, &val[j]));
+ else
+ rXN[r].push_back(Mat(1, 1, CV_32F, &val[j]));
+ }
+ }
+
+ Mat wMat(NUM_SZ, 2, CV_32F);
+ for (int i = 0; i < NUM_SZ; i++){
+ const vecM &xP = rXP[i], &xN = rXN[i];
+ if (xP.size() < 10 || xN.size() < 10)
+ printf("Warning %s:%d not enough training sample for r[%d] = %d. P = %d, N = %d\n", __FILE__, __LINE__, i, _svmSzIdxs[i], xP.size(), xN.size());
+ for (size_t k = 0; k < xP.size(); k++)
+ CV_Assert(xP[k].size() == Size(1, 1) && xP[k].type() == CV_32F);
+
+ Mat wr = trainSVM(xP, xN, L1R_L2LOSS_SVC, 100, 1);
+ CV_Assert(wr.size() == Size(2, 1));
+ wr.copyTo(wMat.row(i));
+ }
+ matWrite(_modelName + ".wS2", wMat);
+ _svmReW1f = wMat;
+}
+
+void Objectness::meanStdDev(CMat &data1f, Mat &mean1f, Mat &stdDev1f)
+{
+ const int DIM = data1f.cols, NUM = data1f.rows;
+ mean1f = Mat::zeros(1, DIM, CV_32F), stdDev1f = Mat::zeros(1, DIM, CV_32F);
+ for (int i = 0; i < NUM; i++)
+ mean1f += data1f.row(i);
+ mean1f /= NUM;
+ for (int i = 0; i < NUM; i++){
+ Mat tmp;
+ pow(data1f.row(i) - mean1f, 2, tmp);
+ stdDev1f += tmp;
+ }
+ pow(stdDev1f/NUM, 0.5, stdDev1f);
+}
+
+vecD Objectness::getVector(const Mat &_t1f)
+{
+ Mat t1f;
+ _t1f.convertTo(t1f, CV_64F);
+ return (vecD)(t1f.reshape(1, 1));
+}
+
+void Objectness::illustrate()
+{
+ Mat xP1f, xN1f;
+ CV_Assert(matRead(_modelName + ".xP", xP1f) && matRead(_modelName + ".xN", xN1f));
+ CV_Assert(xP1f.cols == xN1f.cols && xP1f.cols == _W*_W && xP1f.type() == CV_32F && xN1f.type() == CV_32F);
+ Mat meanP, meanN, stdDevP, stdDevN;
+ meanStdDev(xP1f, meanP, stdDevP);
+ meanStdDev(xN1f, meanN, stdDevN);
+ Mat meanV(_W, _W*2, CV_32F), stdDev(_W, _W*2, CV_32F);
+ meanP.reshape(1, _W).copyTo(meanV.colRange(0, _W));
+ meanN.reshape(1, _W).copyTo(meanV.colRange(_W, _W*2));
+ stdDevP.reshape(1, _W).copyTo(stdDev.colRange(0, _W));
+ stdDevN.reshape(1, _W).copyTo(stdDev.colRange(_W, _W*2));
+ normalize(meanV, meanV, 0, 255, NORM_MINMAX, CV_8U);
+ CmShow::showTinyMat(_voc.resDir + "PosNeg.png", meanV);
+
+ FILE* f = fopen(_S(_voc.resDir + "PosNeg.m"), "w");
+ CV_Assert(f != NULL);
+ fprintf(f, "figure(1);\n\n");
+ PrintVector(f, getVector(meanP), "MeanP");
+ PrintVector(f, getVector(meanN), "MeanN");
+ PrintVector(f, getVector(stdDevP), "StdDevP");
+ PrintVector(f, getVector(stdDevN), "StdDevN");
+ PrintVector(f, getVector(_svmFilter), "Filter");
+ fprintf(f, "hold on;\nerrorbar(MeanP, StdDevP, 'r');\nerrorbar(MeanN, StdDevN, 'g');\nhold off;");
+ fclose(f);
+}
+
+void Objectness::trainStageI()
+{
+ vecM pX, nX;
+ pX.reserve(200000), nX.reserve(200000);
+ Mat xP1f, xN1f;
+ CV_Assert(matRead(_modelName + ".xP", xP1f) && matRead(_modelName + ".xN", xN1f));
+ for (int r = 0; r < xP1f.rows; r++)
+ pX.push_back(xP1f.row(r));
+ for (int r = 0; r < xN1f.rows; r++)
+ nX.push_back(xN1f.row(r));
+ Mat crntW = trainSVM(pX, nX, L1R_L2LOSS_SVC, 10, 1);
+ crntW = crntW.colRange(0, crntW.cols - 1).reshape(1, _W);
+ CV_Assert(crntW.size() == Size(_W, _W));
+ matWrite(_modelName + ".wS1", crntW);
+}
+
+// Training SVM with feature vector X and label Y.
+// Each row of X is a feature vector, with corresponding label in Y.
+// Return a CV_32F weight Mat
+Mat Objectness::trainSVM(CMat &X1f, const vecI &Y, int sT, double C, double bias, double eps)
+{
+ // Set SVM parameters
+ parameter param; {
+ param.solver_type = sT; // L2R_L2LOSS_SVC_DUAL;
+ param.C = C;
+ param.eps = eps; // see setting below
+ param.p = 0.1;
+ param.nr_weight = 0;
+ param.weight_label = NULL;
+ param.weight = NULL;
+ set_print_string_function(print_null);
+ CV_Assert(X1f.rows == Y.size() && X1f.type() == CV_32F);
+ }
+
+ // Initialize a problem
+ feature_node *x_space = NULL;
+ problem prob;{
+ prob.l = X1f.rows;
+ prob.bias = bias;
+ prob.y = Malloc(double, prob.l);
+ prob.x = Malloc(feature_node*, prob.l);
+ const int DIM_FEA = X1f.cols;
+ prob.n = DIM_FEA + (bias >= 0 ? 1 : 0);
+ x_space = Malloc(feature_node, (prob.n + 1) * prob.l);
+ int j = 0;
+ for (int i = 0; i < prob.l; i++){
+ prob.y[i] = Y[i];
+ prob.x[i] = &x_space[j];
+ const float* xData = X1f.ptr(i);
+ for (int k = 0; k < DIM_FEA; k++){
+ x_space[j].index = k + 1;
+ x_space[j++].value = xData[k];
+ }
+ if (bias >= 0){
+ x_space[j].index = prob.n;
+ x_space[j++].value = bias;
+ }
+ x_space[j++].index = -1;
+ }
+ CV_Assert(j == (prob.n + 1) * prob.l);
+ }
+
+ // Training SVM for current problem
+ const char* error_msg = check_parameter(&prob, ¶m);
+ if(error_msg){
+ fprintf(stderr,"ERROR: %s\n",error_msg);
+ exit(1);
+ }
+ model *svmModel = train(&prob, ¶m);
+ Mat wMat(1, prob.n, CV_64F, svmModel->w);
+ wMat.convertTo(wMat, CV_32F);
+ free_and_destroy_model(&svmModel);
+ destroy_param(¶m);
+ free(prob.y);
+ free(prob.x);
+ free(x_space);
+ return wMat;
+}
+
+// pX1f, nX1f are positive and negative training samples, each is a row vector
+Mat Objectness::trainSVM(const vector &pX1f, const vector