# TensorFlow入门¶

## TensorFlow安装¶

pip install tensorflow


# CPU only
docker run -it -p 8888:8888 tensorflow/tensorflow
# GPU version
nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:latest-gpu


## TensorFlow入门¶

from __future__ import print_function, division
import tensorflow as tf



## 简单示例¶

import tensorflow as tf

a = tf.constant(5, name="input_a")
b = tf.constant(3, name="input_b")
c = tf.mul(a, b, name="mul_c")

with tf.Session() as sess:
print sess.run(e) # output => 23
writer = tf.train.SummaryWriter("./hello_graph", sess.graph)


tensorboard --logdir="hello_graph"


## Tensor简单示例¶

import tensorflow as tf

a = tf.constant([5,3], name="input_a")
b = tf.reduce_prod(a, name="prod_b")
c = tf.reduce_sum(a, name="sum_c")

with tf.Session() as sess:
print sess.run(d) # => 23


## 基本类型¶

Tensorflow中所有的数据都称之为Tensor，可以是一个变量、数组或者多维数组。Tensor有几个重要的属性:

• Rank：Tensor的纬数，比如scalar rank=0, vector rank=1, matrix rank=2
• 类型：数据类型，比如tf.float32, tc.uint8等
• Shape：Tensor的形状，比如vector shape=[D0], matrix shape=[D0, D1]

# Constant

a = tf.constant(2)
b = tf.constant(3)

with tf.Session() as sess:
print sess.run(a+b)  # => 5


# Variable
# Variables maintain state across executions of the
# graph. The following example shows a variable serving
# as a simple counter.

v1 = tf.Variable(10)
v2 = tf.Variable(5)

with tf.Session() as sess:
# variables must be initialized first.
tf.initialize_all_variables().run(session=sess)
print sess.run(v1+v2) # => 15

# Placeholder and feed
# Placeholder is used as Graph input when running session
# A feed temporarily replaces the output of an operation
# with a tensor value. You supply feed data as an argument
# to a run() call. The feed is only used for the run call
# to which it is passed. The most common use case involves
# designating specific operations to be "feed" operations
# by using tf.placeholder() to create them

a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)

# Define some operations
mul = tf.mul(a, b)

with tf.Session() as sess:
print sess.run(add, feed_dict={a: 2, b: 3})  # ==> 5
print sess.run(mul, feed_dict={a: 2, b: 3})  # ==> 6

# Matrix

# Create a Constant op that produces a 1x2 matrix.  The op is
# added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
matrix1 = tf.constant([[3., 3.]])

# Create another Constant that produces a 2x1 matrix.
matrix2 = tf.constant([[2.],[2.]])

# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
# The returned value, 'product', represents the result of the matrix
# multiplication.
product = tf.matmul(matrix1, matrix2)
with tf.Session() as sess:
print product.eval() # => 12


## 数据类型¶

Tensorflow有着丰富的数据类型，比如tf.int32, tf.float64等，这些类型跟numpy是一致的。

import tensorflow as tf
import numpy as np

a = np.array([2, 3], dtype=np.int32)
b = np.array([4, 5], dtype=np.int32)
# Use tf.add() to initialize an "add" Operation

with tf.Session() as sess:
print sess.run(c) # ==> [6 8]


tf.convert_to_tensor(value, dtype=tf.float32)是一个非常有用的转换函数，一般用来构造新的Operation，借助tf.convert_to_tensor可以同时接受python原生类型、numpy数据以及Tensor数据。

## 数学计算¶

TF内置了很多的数学计算操作，包括常见的各种数值计算、矩阵运算以及优化算法。

import tensorflow as tf
sess = tf.InteractiveSession()

x = tf.constant([[2, 5, 3, -5],
[0, 3,-2,  5],
[4, 3, 5,  3],
[6, 1, 4,  0]])
y = tf.constant([[4, -7, 4, -3, 4],
[6, 4,-7,  4, 7],
[2, 3, 2,  1, 4],
[1, 5, 5,  5, 2]])

floatx = tf.constant([[2., 5., 3., -5.],
[0., 3.,-2.,  5.],
[4., 3., 5.,  3.],
[6., 1., 4.,  0.]])

print tf.transpose(x).eval()
print tf.matmul(x, y).eval()
print tf.matrix_determinant(tf.to_float(x)).eval()
print tf.matrix_inverse(tf.to_float(x)).eval()
print tf.matrix_solve(tf.to_float(x), [[1],[1],[1],[1]]).eval()


Reduction对制定的维度进行操作，并返回降维后的结果：

import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.constant([[1,  2, 3],
[3,  2, 1],
[-1,-2,-3]])

boolean_tensor = tf.constant([[True,  False, True],
[False, False, True],
[True, False, False]])

print tf.reduce_prod(x).eval() # => -216
print tf.reduce_prod(x, reduction_indices=1).eval() # => [6,6,-6]
print tf.reduce_min(x, reduction_indices=1).eval() # => [ 1  1 -3]
print tf.reduce_max(x, reduction_indices=1).eval() # => [ 3  3 -1]
print tf.reduce_mean(x, reduction_indices=1).eval() # => [ 2  2 -2]
# Computes the "logical and" of elements
print tf.reduce_all(boolean_tensor, reduction_indices=1).eval() # => [False False False]

# Computes the "logical or" of elements
print tf.reduce_any(boolean_tensor, reduction_indices=1).eval() # => [ True  True  True]


Segmentation

Segmentation对制定的维度组进行操作，并返回降维后的结果：

import tensorflow as tf
sess = tf.InteractiveSession()

seg_ids = tf.constant([0,1,1,2,2]); # Group indexes : 0|1,2|3,4
x = tf.constant([[2, 5, 3, -5],
[0, 3,-2,  5],
[4, 3, 5,  3],
[6, 1, 4,  0],
[6, 1, 4,  0]])

print tf.segment_sum(x, seg_ids).eval()
print tf.segment_prod(x, seg_ids).eval()
print tf.segment_min(x, seg_ids).eval()
print tf.segment_max(x, seg_ids).eval()
print tf.segment_mean(x, seg_ids).eval()


import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.constant([[2, 5, 3, -5],
[0, 3,-2,  5],
[4, 3, 5,  3],
[6, 1, 4,  0]])
listx = tf.constant([1,2,5,3,4,5,6,7,8,3,2])
boolx = tf.constant([[True,False], [False,True]])

# Position of the minimum value of columns
# [1 3 1 0]
print tf.argmin(x, 0).eval()
# Position of the maximum value of rows
# [1 3 2 0]
print tf.argmax(x, 1).eval()
# Show true values
# [[0 0]
#  [1 1]]
print tf.where(boolx).eval()
# Unique values in list
# [1 2 5 3 4 6 7 8]
print tf.unique(listx)[0].eval()


## Rank与Shape¶

Rank | Shape | Dimension number | Example—- | —————— | —————- | ————————————— 0 | [] | 0-D | A 0-D tensor. A scalar.1 | [D0] | 1-D | A 1-D tensor with shape [5].2 | [D0, D1] | 2-D | A 2-D tensor with shape [3, 4].3 | [D0, D1, D2] | 3-D | A 3-D tensor with shape [1, 4, 3].n | [D0, D1, ... Dn-1] | n-D | A tensor with shape [D0, D1, ... Dn-1].

import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.constant([[2, 5, 3, -5],
[0, 3,-2,  5],
[4, 3, 5,  3],
[6, 1, 4,  0]])

print tf.shape(x).eval() # => [4 4]
print tf.size(x).eval()  # => 16
print tf.rank(x).eval()  # => 2
print tf.reshape(x, [8, 2]).eval()
print tf.squeeze(x).eval()
print tf.expand_dims(x,1).eval()


## 图和会话¶

A TensorFlow graph is a description of computations. To compute anything, a graph must be launched in a Session. A Session places the graph ops onto Devices, such as CPUs or GPUs, and provides methods to execute them. These methods return tensors produced by ops as numpy ndarray objects in Python, and as tensorflow::Tensor instances in C and C++.

graph = tf.Graph()
with graph.as_default():
value1 = tf.constant([1., 2.])
value2 = tf.Variable([3., 4.])
result = value1*value2
with tf.Session(graph=graph) as sess:
tf.initialize_all_variables().run()
print sess.run(result)
print result.eval()

# result =>
#    [ 3.  8.]
#    [ 3.  8.]


with tf.Session() as sess:
with tf.device("/gpu:1"):
matrix1 = tf.constant([[3., 3.]])
matrix2 = tf.constant([[2.],[2.]])
product = tf.matmul(matrix1, matrix2)
...


sess = tf.InteractiveSession()

x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])

# Initialize 'x' using the run() method of its initializer op.
x.initializer.run()

# Add an op to subtract 'a' from 'x'.  Run it and print the result
sub = tf.sub(x, a)
print(sub.eval())  # ==> [-2. -1.]

# Close the Session when we're done.
sess.close()


## Name Scope¶

Name scopes可以把复杂操作分成小的命名块，方便组织复杂的图，并方便在TensorBoard展示。

import tensorflow as tf

with tf.name_scope("Scope_A"):
b = tf.mul(a, 3, name="A_mul")

with tf.name_scope("Scope_B"):
d = tf.mul(c, 6, name="B_mul")

writer = tf.train.SummaryWriter('./name_scope_1', graph=tf.get_default_graph())
writer.close()


## 一个完整示例¶

import tensorflow as tf

# Define a new Graph
graph = tf.Graph()

with graph.as_default():

with tf.name_scope("variables"):
# Variable to keep track of how many times the graph has been run
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")

# Variable that keeps track of the sum of all output values over time:
total_output = tf.Variable(0.0, dtype=tf.float32, trainable=False, name="total_output")

# Primary transformation Operations
with tf.name_scope("transformation"):
# Separate input layer
with tf.name_scope("input"):
# Create input placeholder- takes in a Vector
a = tf.placeholder(tf.float32, shape=[None], name="input_placeholder_a")

# Separate middle layer
with tf.name_scope("intermediate_layer"):
b = tf.reduce_prod(a, name="product_b")
c = tf.reduce_sum(a, name="sum_c")

# Separate output layer
with tf.name_scope("output"):

with tf.name_scope("update"):
# Increments the total_output Variable by the latest output

# Increments the above global_step Variable, should be run whenever the graph is run

# Summary Operations
with tf.name_scope("summaries"):
avg = tf.div(update_total, tf.cast(increment_step, tf.float32), name="average")

# Creates summaries for output node
tf.scalar_summary(b'Output', output, name="output_summary")
tf.scalar_summary(b'Sum of outputs over time', update_total, name="total_summary")
tf.scalar_summary(b'Average of outputs over time', avg, name="average_summary")

# Global Variables and Operations
with tf.name_scope("global_ops"):
# Initialization Op
init = tf.initialize_all_variables()
# Merge all summaries into one Operation
merged_summaries = tf.merge_all_summaries()

# Start a Session, using the explicitly created Graph
sess = tf.Session(graph=graph)

# Open a SummaryWriter to save summaries
writer = tf.train.SummaryWriter('./improved_graph', graph)

# Initialize Variables
sess.run(init)

def run_graph(input_tensor):
"""
Helper function; runs the graph with given input tensor and saves summaries
"""
feed_dict = {a: input_tensor}
_, step, summary = sess.run([output, increment_step, merged_summaries],
feed_dict=feed_dict)

# run graph with some inputs
run_graph([2,8])
run_graph([3,1,3,3])
run_graph([8])
run_graph([1,2,3])
run_graph([11,4])
run_graph([4,1])
run_graph([7,3,1])
run_graph([6,3])
run_graph([0,2])
run_graph([4,5,6])

# flush summeries to disk
writer.flush()

# close writer and session
writer.close()
sess.close()


Tensorboard图：

Tensorboard事件：