SHOGUN  4.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LinearARDKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2015 Wu Lin
8  * Written (W) 2012 Jacob Walker
9  *
10  * Adapted from WeightedDegreeRBFKernel.cpp
11  */
12 
14 using namespace shogun;
15 
17 {
18  init();
19 }
20 
22 {
24 }
25 
26 void CLinearARDKernel::init()
27 {
30  m_weights.set_const(1.0);
31  SG_ADD(&m_weights, "weights", "Feature weights", MS_AVAILABLE,
33 }
34 
35 #ifdef HAVE_LINALG_LIB
37 
39 {
40  init();
41 }
42 
44  CDotFeatures* r, int32_t size) : CDotKernel(size)
45 {
46  init();
47  init(l,r);
48 }
49 
50 
51 
52 bool CLinearARDKernel::init(CFeatures* l, CFeatures* r)
53 {
54  cleanup();
55  CDotKernel::init(l, r);
56  int32_t dim=((CDotFeatures*) l)->get_dim_feature_space();
57  if (m_ARD_type==KT_FULL)
58  {
59  REQUIRE(m_weights.num_cols==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
60  dim, m_weights.num_cols);
61  }
62  else if (m_ARD_type==KT_DIAG)
63  {
64  REQUIRE(m_weights.num_rows==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
65  dim, m_weights.num_rows);
66  }
67  return init_normalizer();
68 }
69 
70 
71 SGMatrix<float64_t> CLinearARDKernel::compute_right_product(SGVector<float64_t>right_vec,
72  float64_t & scalar_weight)
73 {
74  SGMatrix<float64_t> right;
75 
76  if (m_ARD_type==KT_SCALAR)
77  {
78  right=SGMatrix<float64_t>(right_vec.vector,right_vec.vlen,1,false);
79  scalar_weight*=m_weights[0];
80  }
81  else
82  {
84 
85  SGMatrix<float64_t> rtmp(right_vec.vector,right_vec.vlen,1,false);
86 
87  if(m_ARD_type==KT_DIAG)
88  linalg::elementwise_product(m_weights, rtmp, right);
89  else if(m_ARD_type==KT_FULL)
90  linalg::matrix_product(m_weights, rtmp, right);
91  else
92  SG_ERROR("Unsupported ARD kernel\n");
93  }
94  return right;
95 }
96 
97 float64_t CLinearARDKernel::compute_helper(SGVector<float64_t> avec, SGVector<float64_t>bvec)
98 {
100 
101  float64_t scalar_weight=1.0;
102  if (m_ARD_type==KT_SCALAR)
103  {
104  left=SGMatrix<float64_t>(avec.vector,1,avec.vlen,false);
105  scalar_weight=m_weights[0];
106  }
107  else
108  {
110 
111  SGMatrix<float64_t> ltmp(avec.vector,avec.vlen,1,false);
112 
113  SGMatrix<float64_t> left_transpose(left.matrix,left.num_cols,1,false);
114  if(m_ARD_type==KT_DIAG)
115  linalg::elementwise_product(m_weights, ltmp, left_transpose);
116  else if(m_ARD_type==KT_FULL)
117  linalg::matrix_product(m_weights, ltmp, left_transpose);
118  else
119  SG_ERROR("Unsupported ARD kernel\n");
120  }
121 
122  SGMatrix<float64_t> res(1,1);
123  SGMatrix<float64_t> right=compute_right_product(bvec, scalar_weight);
124  linalg::matrix_product(left, right, res);
125  return res[0]*scalar_weight;
126 }
127 
128 float64_t CLinearARDKernel::compute(int32_t idx_a, int32_t idx_b)
129 {
130  REQUIRE(lhs && rhs, "Features not set!\n")
131 
132  SGVector<float64_t> avec=((CDotFeatures *)lhs)->get_computed_dot_feature_vector(idx_a);
133  SGVector<float64_t> bvec=((CDotFeatures *)rhs)->get_computed_dot_feature_vector(idx_b);
134 
135  return compute_helper(avec, bvec);
136 }
137 
138 float64_t CLinearARDKernel::compute_gradient_helper(SGVector<float64_t> avec,
139  SGVector<float64_t> bvec, float64_t scale, index_t index)
140 {
141  float64_t result;
142 
143  if(m_ARD_type==KT_DIAG)
144  {
145  result=2.0*avec[index]*bvec[index]*m_weights[index];
146  }
147  else
148  {
149  SGMatrix<float64_t> left(avec.vector,1,avec.vlen,false);
150  SGMatrix<float64_t> right(bvec.vector,bvec.vlen,1,false);
151  SGMatrix<float64_t> res(1,1);
152 
153  if (m_ARD_type==KT_SCALAR)
154  {
155  linalg::matrix_product(left, right, res);
156  result=2.0*res[0]*m_weights[0];
157  }
158  else if(m_ARD_type==KT_FULL)
159  {
160  int32_t row_index=index%m_weights.num_rows;
161  int32_t col_index=index/m_weights.num_rows;
162  //index is a linearized index of m_weights (column-major)
163  //m_weights is a d-by-p matrix, where p is #dimension of features
164  SGVector<float64_t> row_vec=m_weights.get_row_vector(row_index);
165  SGMatrix<float64_t> row_vec_r(row_vec.vector,row_vec.vlen,1,false);
166 
167  linalg::matrix_product(left, row_vec_r, res);
168  result=res[0]*bvec[col_index];
169 
170  SGMatrix<float64_t> row_vec_l(row_vec.vector,1,row_vec.vlen,false);
171  linalg::matrix_product(row_vec_l, right, res);
172  result+=res[0]*avec[col_index];
173 
174  }
175  else
176  {
177  SG_ERROR("Unsupported ARD kernel\n");
178  }
179 
180  }
181  return result*scale;
182 }
183 
184 
186  const TParameter* param, index_t index)
187 {
188  REQUIRE(lhs && rhs, "Features not set!\n");
189 
190  int32_t row_index, col_index;
191  if (m_ARD_type!=KT_SCALAR)
192  {
193  REQUIRE(index>=0, "Index (%d) must be non-negative\n",index);
194  if (m_ARD_type==KT_DIAG)
195  {
196  REQUIRE(index<m_weights.num_rows, "Index (%d) must be within #dimension of weights (%d)\n",
197  index, m_weights.num_rows);
198  }
199  else if(m_ARD_type==KT_FULL)
200  {
201  row_index=index%m_weights.num_rows;
202  col_index=index/m_weights.num_rows;
203  REQUIRE(row_index<m_weights.num_rows,
204  "Row index (%d) must be within #row of weights (%d)\n",
205  row_index, m_weights.num_rows);
206  REQUIRE(col_index<m_weights.num_cols,
207  "Column index (%d) must be within #column of weights (%d)\n",
208  col_index, m_weights.num_cols);
209  }
210  }
211  if (!strcmp(param->m_name, "weights"))
212  {
213  SGMatrix<float64_t> derivative(num_lhs, num_rhs);
214 
215  for (index_t j=0; j<num_lhs; j++)
216  {
217  SGVector<float64_t> avec=((CDotFeatures *)lhs)->get_computed_dot_feature_vector(j);
218  for (index_t k=0; k<num_rhs; k++)
219  {
220  SGVector<float64_t> bvec=((CDotFeatures *)rhs)->get_computed_dot_feature_vector(k);
221  derivative(j,k)=compute_gradient_helper(avec, bvec, 1.0, index);
222  }
223  }
224  return derivative;
225  }
226  else
227  {
228  SG_ERROR("Can't compute derivative wrt %s parameter\n", param->m_name);
229  return SGMatrix<float64_t>();
230  }
231 }
232 
233 SGMatrix<float64_t> CLinearARDKernel::get_weights()
234 {
236 }
237 
238 void CLinearARDKernel::set_weights(SGMatrix<float64_t> weights)
239 {
240  REQUIRE(weights.num_cols>0 && weights.num_rows>0,
241  "Weight Matrix (%d-by-%d) must not be empty\n",
242  weights.num_rows, weights.num_cols);
243  if (weights.num_cols>1)
244  {
246  }
247  else
248  {
249  if (weights.num_rows==1)
250  {
252  }
253  else
254  {
256  }
257  }
258  m_weights=weights;
259 }
260 
261 void CLinearARDKernel::set_scalar_weights(float64_t weight)
262 {
263  SGMatrix<float64_t> weights(1,1);
264  weights(0,0)=weight;
265  set_weights(weights);
266 }
267 
268 void CLinearARDKernel::set_vector_weights(SGVector<float64_t> weights)
269 {
270  SGMatrix<float64_t> weights_mat(weights.vlen,1);
271  std::copy(weights.vector, weights.vector+weights.vlen, weights_mat.matrix);
272  set_weights(weights_mat);
273 }
274 
275 void CLinearARDKernel::set_matrix_weights(SGMatrix<float64_t> weights)
276 {
277  set_weights(weights);
278 }
279 #endif //HAVE_LINALG_LIB
virtual void cleanup()
Definition: Kernel.cpp:162
SGVector< T > get_row_vector(index_t row) const
Definition: SGMatrix.cpp:1064
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:1057
Linear Kernel with Automatic Relevance Detection computed on CDotFeatures.
parameter struct
Definition: Parameter.h:32
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
SGMatrix< float64_t > m_weights
virtual float64_t compute(int32_t idx_a, int32_t idx_b)
Definition: DotKernel.h:123
Features that support dot products among other operations.
Definition: DotFeatures.h:44
Template class DotKernel is the base class for kernels working on DotFeatures.
Definition: DotKernel.h:31
double float64_t
Definition: common.h:50
index_t num_rows
Definition: SGMatrix.h:329
index_t num_cols
Definition: SGMatrix.h:331
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:1055
virtual bool init_normalizer()
Definition: Kernel.cpp:157
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:1049
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:1047
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
Definition: Kernel.h:851
void set_const(T const_elem)
Definition: SGMatrix.cpp:109
#define SG_ADD(...)
Definition: SGObject.h:81
index_t vlen
Definition: SGVector.h:481

SHOGUN Machine Learning Toolbox - Documentation