Tensorflow 1.x

Published by onesixx on

Tensorflow 1.x 와 2.x :: [Book] Deep-learning-with-TensorFlow-2-and-Keras

https://community.rstudio.com/t/recreating-tensorflow-tutorials-in-r/5061
https://cinema4dr12.tistory.com/1155

계산그래프, 세션을 사용한 그래프 실행

import tensorflow as tf
#import tensorflow.compat.v1  as tf
tf.compat.v1.disable_v2_behavior()
tf.compat.v1.disable_eager_execution()

myGraph=tf.Graph()
with myGraph.as_default() as graph:
  msg=tf.constant('what the heck!')
# tf.__version__
# '2.1.2'
# tf.compat.v1.disable_eager_execution()

# with tf. as sess:
#   print(sess.run(msg).decode())

# with tf.compat.v1.Session(graph=g1) as sess:
#   sess.run(msg)

sess=tf.compat.v1.Session(graph=myGraph)
print(sess.run(msg).decode())
# what the heck!
sess.close()
#source("/home/sixx_skcc/RCODE/00.global_dl.R")

# 2.x에서는 (1.x와 달리) session정의후 run수행하는 과정이 생략되고 바로실행되는 형태이기 때문에
tf$compat$v1$disable_eager_execution()

myGraph <- tf$Graph()
with(myGraph$as_default(), {
  msg <- tf$constant('what the heck!')  
})
# tf$version$VERSION
# tf_version()







sess <- tf$compat$v1$Session(graph=myGraph)
sess$run(msg)$decode()
#[1] "what the heck!"
sess$close()

계산 그래프의 프로그램 구조

2개의 벡터를 덧셈하는 예

import tensorflow as tf

myGraph=tf.Graph()
with myGraph.as_default() as graph:
  v_1=tf.constant([1,2,3,4])
  v_2=tf.constant([2,1,5,3])
  v_add=tf.add(v_1,v_2)

sess=tf.compat.v1.Session(graph=myGraph)
print(sess.run(v_add))
# [3 3 8 7]
print(sess.run([v_add,v_1, v_2]))
# [array([3, 3, 8, 7], dtype=int32), array([1, 2, 3, 4], dtype=int32), array([2, 1, 5, 3], dtype=int32)]
sess.close()
source('/home/sixx_skcc/RCODE/00.global_dl.R')

myGraph <- tf$Graph()
with(myGraph$as_default(), {
  v_1=tf$constant(c(1,2,3,4))
  v_2=tf$constant(c(2,1,5,3))
  v_add=tf$add(v_1,v_2) 
})

sess <- tf$compat$v1$Session(graph=myGraph)
sess$run(v_add)
#[1] 3 3 8 7
sess$run(c(v_add,v_1, v_2))
# [[1]]
# [1] 3 3 8 7
# 
# [[2]]
# [1] 1 2 3 4
# 
# [[3]]
# [1] 2 1 5 3
sess$close()

constant

import tensorflow as tf

###  CONSTANT -----------------------------
tf.constant(6)
#
tf.constant([3.0,2,1])
#
t_2=tf.constant([3,2,1])
t_2
#
tf.zeros([2,3], tf.float32)
# 
print(tf.zeros([2,3], tf.float32).shape)
#(2,3)
myTensor=tf.zeros([2,3], tf.float32)
myTensor.shape
#TensorShape([2, 3])
tf.ones_like( tf.constant([3,2,1]))
#
oneT=tf.ones(shape=[2,3], dtype=tf.int32)
oneT
# 
t=tf.Variable([[0.,1.,2.],[3.,4.,5.],[6.,7.,8]])
print(t)
print(t*2)
# tf.Tensor(
# [[ 0.  2.  4.]
#  [ 6.  8. 10.]
#  [12. 14. 16.]], shape=(3, 3), dtype=float32)
library(tensorflow)

###  CONSTANT -----------------------------
tf$constant(6L)
# tf.Tensor(6, shape=(), dtype=int32)
tf$constant(c(3.0,2L,1L))
# tf.Tensor([3. 2. 1.], shape=(3,), dtype=float32)
t_2 <- tf$constant(c(3L,2L,1L))
print(t_2)
# tf.Tensor([3 2 1], shape=(3,), dtype=int32)
tf$zeros(c(2L,3L), tf$float32)
# tf.Tensor(
#   [[0. 0. 0.]
#    [0. 0. 0.]], shape=(2, 3), dtype=float32)
tf$zeros(c(2L,3L), tf$float32)$shape
# (2, 3)
myTensor <- tf$zeros(c(2L,3L), tf$float32)
myTensor$shape
# (2, 3)
tf$ones_like( tf$constant(c(3L,2L,1L)) )
# tf.Tensor([1 1 1], shape=(3,), dtype=int32)
oneT=tf$ones(shape=c(2L,3L), dtype=tf$int32)
oneT
# tf.Tensor(
# [[1 1 1]
#  [1 1 1]], shape=(2, 3), dtype=int32)
t <- tf$Variable(list(c(0.,1.,2.),c(3.,4.,5.),c(6.,7.,8)))
print(t)
print(t*2)
#tf.Tensor(
# [[ 0.  2.  4.]
#  [ 6.  8. 10.]
#  [12. 14. 16.]], shape=(3, 3), dtype=float32)

sequence

import tensorflow as tf

###  CONSTANT -----------------------------
tf.constant(6)
#
tf.constant([3.0,2,1])
#
t_2=tf.constant([3,2,1])
t_2
#
tf.zeros([2,3], tf.float32)
# 
print(tf.zeros([2,3], tf.float32).shape)
#(2,3)
myTensor=tf.zeros([2,3], tf.float32)
myTensor.shape
#TensorShape([2, 3])
tf.ones_like( tf.constant([3,2,1]))
#
oneT=tf.ones(shape=[2,3], dtype=tf.int32)
oneT
# 
t=tf.Variable([[0.,1.,2.],[3.,4.,5.],[6.,7.,8]])
print(t)
print(t*2)
# tf.Tensor(
# [[ 0.  2.  4.]
#  [ 6.  8. 10.]
#  [12. 14. 16.]], shape=(3, 3), dtype=float32)
library(tensorflow)

###  CONSTANT -----------------------------
tf$constant(6L)
# tf.Tensor(6, shape=(), dtype=int32)
tf$constant(c(3.0,2L,1L))
# tf.Tensor([3. 2. 1.], shape=(3,), dtype=float32)
t_2 <- tf$constant(c(3L,2L,1L))
print(t_2)
# tf.Tensor([3 2 1], shape=(3,), dtype=int32)
tf$zeros(c(2L,3L), tf$float32)
# tf.Tensor(
#   [[0. 0. 0.]
#    [0. 0. 0.]], shape=(2, 3), dtype=float32)
tf$zeros(c(2L,3L), tf$float32)$shape
# (2, 3)
myTensor <- tf$zeros(c(2L,3L), tf$float32)
myTensor$shape
# (2, 3)
tf$ones_like( tf$constant(c(3L,2L,1L)) )
# tf.Tensor([1 1 1], shape=(3,), dtype=int32)
oneT=tf$ones(shape=c(2L,3L), dtype=tf$int32)
oneT
# tf.Tensor(
# [[1 1 1]
#  [1 1 1]], shape=(2, 3), dtype=int32)
t <- tf$Variable(list(c(0.,1.,2.),c(3.,4.,5.),c(6.,7.,8)))
print(t)
print(t*2)
#tf.Tensor(
# [[ 0.  2.  4.]
#  [ 6.  8. 10.]
#  [12. 14. 16.]], shape=(3, 3), dtype=float32)

random tensor

tf.compat.v1.random_normal(shape=[2,3])
tf.compat.v1.random_normal([2,3], mean=0, stddev=1, seed=666)
tf.compat.v1.random_normal([2,3], mean=2.0, stddev=4, seed=666)
# 
tf.compat.v1.truncated_normal([1,5], stddev=2, seed=12)
# 

randomV = tf.compat.v1.random_normal(shape=[2,3])
tf.compat.v1.random_crop(randomV, [2,5], seed=6)
tf.compat.v1.random_uniform([2,3], maxval=4, seed=6)
tf.compat.v1.random_shuffle(randomV)
tf.compat.v1.set_random_seed(66)
tf$compat$v1$random_normal(shape=c(2L,3L))
tf$compat$v1$random_normal(c(2L,3L), mean=0, stddev=1, seed=666)
tf$compat$v1$random_normal(c(2L,3L), mean=2., stddev=4, seed=666)
# tf.Tensor(
#   [[ 0.22593403  2.1758626   7.2318034 ]
#    [ 4.070051    0.28119385 -4.2169256 ]], shape=(2, 3), dtype=float32)
tf$compat$v1$truncated_normal(c(1L,5L), stddev=2, seed=12)
# tf.Tensor([[-0.3447433  -0.72559273  0.7772784  -0.00816497 -0.79996455]], shape=(1, 5), dtype=float32)



randomV = tf$compat$v1$random_normal(shape=c(2L,3L))
tf$compat$v1$random_crop(randomV, c(2L,5L), seed=6)
tf$compat$v1$random_uniform(c(2L,3L), maxval=4, seed=6)
tf$compat$v1$random_shuffle(randomV)
tf$compat$v1$set_random_seed(66)
myGraph <- tf$Graph()
with(myGraph$as_default(), {
  n = 100000L
  R <- tf$compat$v1$random_normal(c(n,1L))
  T <- tf$compat$v1$truncated_normal(c(n,1L))
})

sess <- tf$compat$v1$Session(graph=myGraph)
r <- sess$run(R)
t <- sess$run(T)
ggarrange(
  data.table(r) %>% ggplot(aes(x=V1)) + geom_histogram(bins=1000) ,
  data.table(t) %>% ggplot(aes(x=V1)) + geom_histogram(bins=1000) + scale_x_continuous(limits= c(-5, 5))
)

tf.truncated_normal 은 너무 작거나 너무 큰 값이 아닌 값으로 랜덤한 값

변수

initVal=tf.compat.v1.random_uniform(shape=[50,50], minval=0, maxval=10, seed=6)
t_a=tf.Variable(initVal)
t_b=tf.Variable(initVal)
#  변수 선언 및 초기화
weight = tf.Variable(tf.compat.v1.random_normal([100,100], stddev=2))
bias   = tf.Variable(tf.zeros(100), name='biases')
# 변수 저장
saver=tf.compat.v1.train.Saver()
# 플레이스홀더
tf.compat.v1.placeholder("float")
Categories: Keras

onesixx

Blog Owner

Subscribe
Notify of
guest

0 Comments
Inline Feedbacks
View all comments
0
Would love your thoughts, please comment.x
()
x