-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathneural_network.mli
71 lines (59 loc) · 1.37 KB
/
neural_network.mli
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
module Layer : sig
type t
val of_size : int -> t
val to_size : t -> int
end
module Parameter : sig
module Node_from : sig
type t =
| Bias
| Index of int
[@@deriving sexp,compare,hash]
end
module Node_to : sig
type t =
| Index of int
[@@deriving sexp,compare,hash]
end
type t =
{ layer_from_index : int
; node_from : Node_from.t
; node_to : Node_to.t
} [@@deriving sexp,compare,hash]
include Core.Hashable.S with type t := t
end
type t =
{ layers : Layer.t list
; parameters : Parameter.t list
; index_of_parameter : Parameter.t -> int option
; parameterized_output : float list -> Autodiff.Float.t list
}
val create_exn
: ?activation:Autodiff.Float.Univar.t
-> (int * int) list list
-> t
val create_full_exn
: ?activation:Autodiff.Float.Univar.t
-> int list
-> t
module Method : sig
type t =
| Newton
| Gradient_descent_with_step_size of float
[@@ deriving sexp]
val arg : t Core.Command.Arg_type.t
end
val train_parameters
: ?cost_of_output_and_answer:Autodiff.Float.t
-> ?regularization:float
-> ?epsilon_init:float
-> ?robust:bool
-> ?method_:Method.t
-> ?iterations:int
-> t
-> inputs_and_answers:(float list * float list) list
-> float list * Newton.Status.t
val output
: t
-> trained_parameters:float list
-> (float list -> float list) Core.Staged.t