... | ... |
@@ -4,8 +4,8 @@ import iris_data |
4 | 4 |
|
5 | 5 |
(train_x, train_y), (test_x, test_y) = iris_data.load_data() |
6 | 6 |
|
7 |
-print np.shape(train_x) |
|
8 |
-print np.shape(train_y) |
|
7 |
+print (np.shape(train_x)) |
|
8 |
+print (np.shape(train_y)) |
|
9 | 9 |
|
10 | 10 |
feature_columns = [] |
11 | 11 |
for key in train_x.keys(): |
... | ... |
@@ -41,7 +41,7 @@ def test_input_fn(x, y, batch_size): |
41 | 41 |
return dataset |
42 | 42 |
|
43 | 43 |
result = classifier.evaluate(input_fn = lambda:iris_data.eval_input_fn(test_x, test_y, batch_size)) |
44 |
-print result |
|
44 |
+print (result) |
|
45 | 45 |
|
46 | 46 |
predict_x = { |
47 | 47 |
'SepalLength': [5.1, 5.9, 6.9], |
... | ... |
@@ -53,4 +53,4 @@ predict_x = { |
53 | 53 |
predictions = classifier.predict(input_fn=lambda:test_input_fn(predict_x, y=None, batch_size=batch_size)) |
54 | 54 |
|
55 | 55 |
for p in predictions: |
56 |
- print p |
|
56 |
+ print (p) |
1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,56 @@ |
1 |
+import tensorflow as tf |
|
2 |
+import numpy as np |
|
3 |
+import iris_data |
|
4 |
+ |
|
5 |
+(train_x, train_y), (test_x, test_y) = iris_data.load_data() |
|
6 |
+ |
|
7 |
+print np.shape(train_x) |
|
8 |
+print np.shape(train_y) |
|
9 |
+ |
|
10 |
+feature_columns = [] |
|
11 |
+for key in train_x.keys(): |
|
12 |
+ feature_columns.append(tf.feature_column.numeric_column(key)) |
|
13 |
+ |
|
14 |
+classifier = tf.estimator.DNNClassifier( |
|
15 |
+ feature_columns = feature_columns, |
|
16 |
+ hidden_units=[10, 10], |
|
17 |
+ n_classes = 3, |
|
18 |
+) |
|
19 |
+ |
|
20 |
+batch_size = 100 |
|
21 |
+steps = 1000 |
|
22 |
+ |
|
23 |
+def train_input_fn(x, y, batch_size): |
|
24 |
+ dataset = tf.data.Dataset.from_tensor_slices((dict(x), y)) |
|
25 |
+ dataset = dataset.shuffle(1000).repeat().batch(batch_size) |
|
26 |
+ |
|
27 |
+ return dataset |
|
28 |
+ |
|
29 |
+classifier.train(input_fn = lambda:train_input_fn(train_x, train_y, batch_size), steps = steps) |
|
30 |
+ |
|
31 |
+def test_input_fn(x, y, batch_size): |
|
32 |
+ x=dict(x) |
|
33 |
+ if y is None: |
|
34 |
+ inputs = x |
|
35 |
+ else: |
|
36 |
+ inputs = (x, y) |
|
37 |
+ |
|
38 |
+ dataset = tf.data.Dataset.from_tensor_slices(inputs) |
|
39 |
+ dataset = dataset.batch(batch_size) |
|
40 |
+ |
|
41 |
+ return dataset |
|
42 |
+ |
|
43 |
+result = classifier.evaluate(input_fn = lambda:iris_data.eval_input_fn(test_x, test_y, batch_size)) |
|
44 |
+print result |
|
45 |
+ |
|
46 |
+predict_x = { |
|
47 |
+ 'SepalLength': [5.1, 5.9, 6.9], |
|
48 |
+ 'SepalWidth': [3.3, 3.0, 3.1], |
|
49 |
+ 'PetalLength': [1.7, 4.2, 5.4], |
|
50 |
+ 'PetalWidth': [0.5, 1.5, 2.1], |
|
51 |
+} |
|
52 |
+ |
|
53 |
+predictions = classifier.predict(input_fn=lambda:test_input_fn(predict_x, y=None, batch_size=batch_size)) |
|
54 |
+ |
|
55 |
+for p in predictions: |
|
56 |
+ print p |