Commit 4ffb854f authored by Evgeny Belyaev's avatar Evgeny Belyaev

Обновлённая версия задания. Улучшен базовый кодек и вывод на экран, уточнено задание.

parent f008ba1a
#This code is the simplest example of image compression based on neural networks
#Comparison with JPEG is provided as well
#It is a demonstation for Information Theory course
#Written by Evgeny Belyaev, February 2024.
#Written by Evgeny Belyaev, July 2024.
import os
import math
import numpy
......@@ -12,7 +12,12 @@ import tensorflow
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from keras import backend as K
#from keras import backend as K
import tensorflow.keras.backend as K
from skimage.metrics import structural_similarity as ssim
#from tensorflow.keras.callbacks import ModelCheckpoint
#import C-implementation of Witten&Neal&Cleary-1987 arithmetic coding as a external module
from EntropyCodec import *
......@@ -25,23 +30,25 @@ trainfolder = './train/'
w=128
h=128
#If 0, then the training will be started, otherwise the model will be readed from a file
LoadModel = 1
#Training parameters
batch_size = 10
LoadModel = 0
#Number of bits for representation of the layers sample in the training process
bt = 3
epochs = 3000
#epochs = 100
bt = 2
#Training parameters
epochs = 2000
#epochs = 500
#Model parameters
n1=128
n2=32
n3=16
batch_sizeM1 = 24
n1M1=128
n2M1=32
n3M1=16
#Number of images to be compressed and shown from the test folder
NumImagesToShow = 5
#Number of bits for representation of the layers sample
b = 3
b = 2
#Compute PSNR in RGB domain
def PSNR_RGB(image1,image2):
......@@ -95,22 +102,24 @@ def LoadImagesFromFolder (foldername):
return x
#Model training function
def ImageCodecModel(trainfolder):
def ImageCodecModel(trainfolder,trainwithnoise):
input = layers.Input(shape=(w, h, 3))
# Encoder
e1 = layers.Conv2D(n1, (7, 7), activation="relu", padding="same")(input)
e1 = layers.MaxPooling2D((2, 2), padding="same")(e1)
e2 = layers.Conv2D(n2, (5, 5), activation="relu", padding="same")(e1)
e2 = layers.MaxPooling2D((2, 2), padding="same")(e2)
e3 = layers.Conv2D(n3, (3, 3), activation="relu", padding="same")(e2)
e3 = layers.MaxPooling2D((2, 2), padding="same")(e3)
#add noise during training (needed for layer quantinzation)
e3 = e3 + tensorflow.random.uniform(tensorflow.shape(e3), 0, tensorflow.math.reduce_max(e3)/pow(2, bt+1))
e1 = layers.Conv2D(n1M1, (7, 7), activation="relu", padding="same")(input)
e1 = layers.AveragePooling2D((2, 2), padding="same")(e1)
e2 = layers.Conv2D(n2M1, (5, 5), activation="relu", padding="same")(e1)
e2 = layers.AveragePooling2D((2, 2), padding="same")(e2)
e3 = layers.Conv2D(n3M1, (3, 3), activation="relu", padding="same")(e2)
e3 = layers.AveragePooling2D((2, 2), padding="same")(e3)
layers.BatchNormalization()
if trainwithnoise==1:
maxt = tensorflow.keras.ops.max(e3)
e3 = e3 + maxt*keras.random.uniform(shape=(16,16,16), minval=-1.0/pow(2, bt+1), maxval=1.0/pow(2, bt+1), dtype=None, seed=None)
# Decoder
x = layers.Conv2DTranspose(n3, (3, 3), strides=2, activation="relu", padding="same")(e3)
x = layers.Conv2DTranspose(n2, (5, 5), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(n1, (7, 7), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(n3M1, (3, 3), strides=2, activation="relu", padding="same")(e3)
x = layers.Conv2DTranspose(n2M1, (5, 5), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(n1M1, (7, 7), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2D(3, (3, 3), activation="sigmoid", padding="same")(x)
# Autoencoder
......@@ -121,18 +130,29 @@ def ImageCodecModel(trainfolder):
autoencoder.summary()
if LoadModel == 0:
print("Num GPUs Available: ", len(tensorflow.config.list_physical_devices('GPU')))
xtrain = LoadImagesFromFolder(trainfolder)
xtrain = xtrain / 255
autoencoder.fit(xtrain, xtrain, epochs=epochs, batch_size=batch_size,shuffle=True)
autoencoder.save('autoencodertemp.mdl')
encoder.save('encoder.mdl')
decoder.save('decoder.mdl')
with tensorflow.device('gpu'):
autoencoder.fit(xtrain, xtrain, epochs=epochs, batch_size=batch_sizeM1,shuffle=True)
if trainwithnoise==1:
encoder.save('encoder2.keras')
decoder.save('decoder2.keras')
else:
encoder.save('encoder.keras')
decoder.save('decoder.keras')
else:
autoencoder = keras.models.load_model('autoencodertemp.mdl')
encoder = keras.models.load_model('encoder.mdl')
decoder = keras.models.load_model('decoder.mdl')
if trainwithnoise==1:
encoder = keras.models.load_model('encoder2.keras',safe_mode=False)
decoder = keras.models.load_model('decoder2.keras',safe_mode=False)
else:
encoder = keras.models.load_model('encoder.keras',safe_mode=False)
decoder = keras.models.load_model('decoder.keras',safe_mode=False)
return encoder,decoder
#Compresses input layer by multi-alphabet arithmetic coding using memoryless source model
def EntropyEncoder (filename,enclayers,size_z,size_h,size_w):
temp = numpy.zeros((size_z, size_h, size_w), numpy.uint8, 'C')
......@@ -182,21 +202,24 @@ def JPEGRDSingleImage(X,TargetBPP,i):
realbpp=bpp
realpsnr=psnr
realQ = Q
image.save('test.jpeg', "JPEG", quality=realQ)
image_dec = Image.open('test.jpeg')
I1 = numpy.array(image.getdata()).reshape(image.size[0], image.size[1], 3)
I2 = numpy.array(image_dec.getdata()).reshape(image_dec.size[0], image_dec.size[1], 3)
#print('\n\n Size = ',numpy.shape(I1))
psnr = ssim(I1[ :, :, 0], I2[ :, :, 0],data_range=255.0)
psnr = psnr + ssim(I1[ :, :, 1], I2[ :, :, 1],data_range=255.0)
psnr = psnr + ssim(I1[ :, :, 2], I2[ :, :, 2],data_range=255.0)
realpsnr=psnr/3.0
JPEGfilename = 'image%i.jpeg' % i
image.save(JPEGfilename, "JPEG", quality=realQ)
return realQ, realbpp, realpsnr
# Main function
if __name__ == '__main__':
#Load test images
xtest = LoadImagesFromFolder(testfolder)
xtest = xtest / 255
#Train the model
encoder, decoder = ImageCodecModel(trainfolder)
def NeuralCompressor(enc,dec):
#Run the model for first NumImagesToShow images from the test set
encoded_layers = encoder.predict(xtest, batch_size=NumImagesToShow)
encoded_layers = enc.predict(xtest, batch_size=NumImagesToShow)
max_encoded_layers = numpy.zeros(NumImagesToShow, numpy.float16, 'C')
#normalization the layer to interval [0,1)
......@@ -227,38 +250,75 @@ if __name__ == '__main__':
for i in range(NumImagesToShow):
encoded_layers_quantized[i] = K.cast(declayers[i]*max_encoded_layers[i], "float32")
encoded_layers[i] = K.cast(encoded_layers[i] * max_encoded_layers[i], "float32")
decoded_imgs = decoder.predict(encoded_layers, batch_size=NumImagesToShow)
decoded_imgsQ = decoder.predict(encoded_layers_quantized, batch_size=NumImagesToShow)
decoded_imgs = dec.predict(encoded_layers, batch_size=NumImagesToShow)
decoded_imgsQ = dec.predict(encoded_layers_quantized, batch_size=NumImagesToShow)
return bpp, decoded_imgs, decoded_imgsQ
# Main function
if __name__ == '__main__':
#Load test images
xtest = LoadImagesFromFolder(testfolder)
xtest = xtest / 255
#Train/load the model
encoder, decoder = ImageCodecModel(trainfolder,0)
encoder2, decoder2 = ImageCodecModel(trainfolder,1)
bpp, decoded_imgs, decoded_imgsQ = NeuralCompressor(encoder,decoder)
bpp2, decoded_imgs2, decoded_imgsQ2 = NeuralCompressor(encoder2,decoder2)
#Shows NumImagesToShow images from the test set
#For each image the following results are presented
#Original image
#Image, represented by the model (without quantization)
#Image, represented by the model with quantization and compression of the layers samples
#Original image (RAW)
#Image, represented by the model (without noise additing during training)
#Image, represented by the model (with noise additing during training)
#Corresponding JPEG image at the same compression level
#Q is the quality metric measured as SSIM
#bpp is bit per pixel after compression (bpp for RAW data is 24 bpp)
for i in range(NumImagesToShow):
title = ''
plt.subplot(4, NumImagesToShow, i + 1).set_title(title, fontsize=10)
plt.imshow(xtest[i, :, :, :], interpolation='nearest')
plt.axis(False)
title=''
plt.subplot(4, NumImagesToShow, i + 1).set_title(title, fontsize=10)
if i==0:
plt.subplot(4, NumImagesToShow, i + 1).text(-50, 64, 'RAW')
plt.imshow(xtest[i, :, :, :], interpolation='nearest')
plt.axis(False)
for i in range(NumImagesToShow):
psnr = PSNR(xtest[i, :, :, :], decoded_imgs[i, :, :, :])
title = '%2.2f' % psnr
#psnr = PSNR(xtest[i, :, :, :], decoded_imgsQ[i, :, :, :])
psnr = ssim(xtest[i, :, :, 0], decoded_imgsQ[i, :, :, 0],data_range=1.0)
psnr = psnr + ssim(xtest[i, :, :, 1], decoded_imgsQ[i, :, :, 1],data_range=1.0)
psnr = psnr + ssim(xtest[i, :, :, 2], decoded_imgsQ[i, :, :, 2],data_range=1.0)
psnr=psnr/3.0
#title = '%2.2f %2.2f' % (psnr, bpp[i])
title = 'Q=%2.2f bpp=%2.2f' % (psnr, bpp[i])
plt.subplot(4, NumImagesToShow, NumImagesToShow + i + 1).set_title(title, fontsize=10)
plt.imshow(decoded_imgs[i, :, :, :], interpolation='nearest')
if i==0:
plt.subplot(4, NumImagesToShow, NumImagesToShow + i + 1).text(-50, 64, 'AE1')
plt.imshow(decoded_imgsQ[i, :, :, :], interpolation='nearest')
plt.axis(False)
for i in range(NumImagesToShow):
psnr = PSNR(xtest[i, :, :, :], decoded_imgsQ[i, :, :, :])
title = '%2.2f %2.2f' % (psnr, bpp[i])
#psnr = PSNR(xtest[i, :, :, :], decoded_imgsQ2[i, :, :, :])
psnr = ssim(xtest[i, :, :, 0], decoded_imgsQ2[i, :, :, 0],data_range=1.0)
psnr = psnr + ssim(xtest[i, :, :, 1], decoded_imgsQ2[i, :, :, 1],data_range=1.0)
psnr = psnr + ssim(xtest[i, :, :, 2], decoded_imgsQ2[i, :, :, 2],data_range=1.0)
psnr=psnr/3.0
#title = '%2.2f %2.2f' % (psnr, bpp2[i])
title = 'Q=%2.2f bpp=%2.2f' % (psnr, bpp2[i])
plt.subplot(4, NumImagesToShow, 2*NumImagesToShow + i + 1).set_title(title, fontsize=10)
plt.imshow(decoded_imgsQ[i, :, :, :], interpolation='nearest')
if i==0:
plt.subplot(4, NumImagesToShow, 2*NumImagesToShow + i + 1).text(-50, 64, 'AE2')
plt.imshow(decoded_imgsQ2[i, :, :, :], interpolation='nearest')
plt.axis(False)
for i in range(NumImagesToShow):
JPEGQP,JPEGrealbpp, JPEGrealpsnr = JPEGRDSingleImage(xtest[i, :, :, :], bpp[i],i)
JPEGfilename = 'image%i.jpeg' % i
JPEGimage = Image.open(JPEGfilename)
title = '%2.2f %2.2f' % (JPEGrealpsnr,JPEGrealbpp)
#title = '%2.2f %2.2f' % (JPEGrealpsnr,JPEGrealbpp)
title = 'Q=%2.2f bpp=%2.2f' % (JPEGrealpsnr,JPEGrealbpp)
plt.subplot(4, NumImagesToShow, 3*NumImagesToShow + i + 1).set_title(title, fontsize=10)
if i==0:
plt.subplot(4, NumImagesToShow, 3*NumImagesToShow + i + 1).text(-50, 64, 'JPEG')
plt.imshow(JPEGimage, interpolation='nearest')
plt.axis(False)
plt.show()
\ No newline at end of file
from setuptools import setup, Extension
import os
import sys
import pybind11
#functions_module = Extension(
# name='EntropyCodec',
# sources=['wrapper.cpp'],
# include_dirs=[os.path.join(os.getenv('PYTHON_DIR'), 'include'),
# os.path.join(pybind11.__path__[0], 'include')]
#)
functions_module = Extension(
name='EntropyCodec',
sources=['wrapper.cpp'],
include_dirs=[os.path.join('/home/eabelyaev/miniconda3/bin/', 'include'),
os.path.join(pybind11.__path__[0], 'include')]
)
#print(pybind11.__path__[0])
#print(os.getenv('PYTHONPATH'))
#print(sys.path)
setup(ext_modules=[functions_module], options={"build_ext": {"build_lib": ".."}})
# Учебный проект, посвященный сжатию изображений при помощи нейронных сетей
# Учебный проект, посвященный сжатию изображений при автокодировщика
## Описание работы кодека
Данный простой кодек сжимает изображения размером 128x128 при помощи свёрточных нейронных сетей, квантования и адаптивного арифметического кодирования.
Данный простой кодек сжимает изображения размером 128x128 при помощи автокодировщика, квантования и адаптивного арифметического кодирования.
В папке ./train/ находятся изображения, которые были использованы для обучения сети, в папке ./test/ находятся изображения для демонстрации результатов.
Структура кодера:
1. Слой 1. Принимает x размером 128x128x3. Выполняет свёртку 7x7, ReLu, MaxPooling2D 2x2. На выходе вектор y1 размером 64x64x128.
2. Слой 2. Принимает y1. Выполняет свёртку 5x5, ReLu, MaxPooling2D 2x2. На выходе вектор y2 размером 32x32x32.
3. Слой 3. Принимает y2. Выполняет свёртку 3x3, ReLu, MaxPooling2D 2x2. На выходе вектор y3 размером 16x16x16.
4. Вычисление максимума $y_{max} = \max_i{y^i_3}$.
5. Квантование x_i = \left\lfloor clip(y^i_3/y_{max},[0,1))\cdot 2^b\right\rfloor, в результате которого x_i \in \{0,1,...,2^{b}-1\}.
6. Сжатие {x_i} при помощи адаптивного арифметического кодера (ААК) из [1].
1. Слой 1. Принимает x размером 128x128x3. Выполняет свёртку 7x7, ReLu, AveragePooling2D 2x2. На выходе вектор y1 размером 64x64x128.
2. Слой 2. Принимает y1. Выполняет свёртку 5x5, ReLu, AveragePooling2D 2x2. На выходе вектор y2 размером 32x32x32.
3. Слой 3. Принимает y2. Выполняет свёртку 3x3, ReLu, AveragePooling2D 2x2. На выходе вектор y3 размером 16x16x16.
4. Выполняется батч нормализация.
5. Вычисление максимума $y_{max} = \max_i{y^i_3}$.
6. Квантование x_i = \left\lfloor clip(y^i_3/y_{max},[0,1))\cdot 2^b\right\rfloor, в результате которого x_i \in \{0,1,...,2^{b}-1\}.
7. Сжатие {x_i} при помощи адаптивного арифметического кодера (ААК) из [1].
Структура декодера:
1. Декодирование {x_i} при помощи АAК из [1].
......@@ -19,21 +20,22 @@
4. Слой 2. Принимает \hat y_2. Свёртка 5\times 5, ReLu. На выходе \hat y_1 размером 64x64x32.
4. Слой 3. Принимает \hat y_1. Свёртка 7\times 7, ReLu. На выходе изображение \hat x размером 128x128x3.
На следующем примере показан случай, когда при обучении к вектору y_3 не прибавляется шум, соразмерный квантованию с шагом 2^3. Как можно заметить, это приводит
к появлению артефактов.
![Обучения без добавления шума. Epoch=3000](./doc/AI_Epoch3000_NoNoise.png)
На следующем примере показан случай, когда при обучении к вектору y_3 добавляется шум, соразмерный квантованию с шагом 2^3. Как можно заметить, в большей степени
артефакты устранились, что привело к тому, что данная простейшая модель выигрывает у JPEG по качеству на высоких степенях сжатия.
На следующем примере показано сравнение эффективности работы данной модели, при b=2. Здесь RAW помечена строка исходных изображений,
AE1 -- автокодировщик без добавления шума при обучении, AE2 -- автокодировщик c добавлением шума при обучении,
JPEG -- алгоритм JPEG в котором степень сжатия подобрана так, чтобы быть близкой к тому, что достигают автокодировщики.
Здесь Q -- это метрика качества изображения SSIM, а bpp -- это число бит на пиксель после сжатия (в исходном формате bpp=24).
![Обучения c добавлением шума. Epoch=3000](./doc/AI_Epoch3000_Noisebt3.png)
Как можно заметить, автокодировщик AE2 выигрывает у JPEG по качеству на высоких степенях сжатия. Также, пример показывает
важность добавления шума при обучении, вследствие чего автокодировщик адаптируется к ошибке квантования.
ААК из [1] реализован на языке Си и подключается к коду как библиотека. Для сборки модуля необходимо выполнить команды, которые находятся в файле
EntropyCompile.bat
EntropyCompile.bat или EntropyCompile.sh
## Описание задания к лабораторной работе
Улучшить учебный кодек так, чтобы он:
- либо на том же сжатии показывал лучшее субъективное визуальное качество;
- либо на том же сжатии обеспечивал выигрыш по PSNR от 0.1 дБ и выше;
- либо на том же сжатии обеспечивал выигрыш по SSIM от 0.05 и выше;
- либо при том же уровне качества обеспечивал от 3\% выигрыша по bpp.
Можно улучшать следующие модули:
......@@ -44,6 +46,8 @@ EntropyCompile.bat
Требования к реализации:
- Результаты должны быть продемонстрированы на изображениях из папки ./test/
- При обучении должны использоваться только изображения из папки ./train/
- При сравнении заменить строчку изображений автокодировщика AE1 на результаты разработанного кодека как показано на рисунке выше.
- Построить график функции средний SSIM/bpp для тестовых изображений для b=2,3,4,5 для AE2, JPEG и предложенного кодека.
На почту eabelyaev@itmo.ru прислать отчет в виде презентации в pdf формате, который включает в себя:
- ФИО студента, номер группы.
......
File added
ގœ浂* ȾG(e2
\ No newline at end of file
Aroot"_tf_keras_network*A{"name": "model_1", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": false, "class_name": "Functional", "config": {"name": "model_1", "trainable": true, "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 16, 16, 16]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_2"}, "name": "input_2", "inbound_nodes": []}, {"class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose", "trainable": true, "dtype": "float32", "filters": 16, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "name": "conv2d_transpose", "inbound_nodes": [[["input_2", 0, 0, {}]]]}, {"class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose_1", "trainable": true, "dtype": "float32", "filters": 32, "kernel_size": {"class_name": "__tuple__", "items": [5, 5]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "name": "conv2d_transpose_1", "inbound_nodes": [[["conv2d_transpose", 0, 0, {}]]]}, {"class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": {"class_name": "__tuple__", "items": [7, 7]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "name": "conv2d_transpose_2", "inbound_nodes": [[["conv2d_transpose_1", 0, 0, {}]]]}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d_3", "inbound_nodes": [[["conv2d_transpose_2", 0, 0, {}]]]}], "input_layers": [["input_2", 0, 0]], "output_layers": [["conv2d_3", 0, 0]]}, "shared_object_id": 13, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, 16, 16, 16]}, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {}}}], "build_input_shape": {"class_name": "TensorShape", "items": [null, 16, 16, 16]}, "is_graph_network": true, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 16, 16, 16]}, "float32", null]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 16, 16, 16]}, "float32", null]}, "keras_version": "2.15.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model_1", "trainable": true, "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 16, 16, 16]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_2"}, "name": "input_2", "inbound_nodes": [], "shared_object_id": 0}, {"class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose", "trainable": true, "dtype": "float32", "filters": 16, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "name": "conv2d_transpose", "inbound_nodes": [[["input_2", 0, 0, {}]]], "shared_object_id": 3}, {"class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose_1", "trainable": true, "dtype": "float32", "filters": 32, "kernel_size": {"class_name": "__tuple__", "items": [5, 5]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 4}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 5}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "name": "conv2d_transpose_1", "inbound_nodes": [[["conv2d_transpose", 0, 0, {}]]], "shared_object_id": 6}, {"class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": {"class_name": "__tuple__", "items": [7, 7]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 7}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 8}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "name": "conv2d_transpose_2", "inbound_nodes": [[["conv2d_transpose_1", 0, 0, {}]]], "shared_object_id": 9}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 10}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 11}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d_3", "inbound_nodes": [[["conv2d_transpose_2", 0, 0, {}]]], "shared_object_id": 12}], "input_layers": [["input_2", 0, 0]], "output_layers": [["conv2d_3", 0, 0]]}}}2
 root.layer-0"_tf_keras_input_layer*{"class_name": "InputLayer", "name": "input_2", "dtype": "float32", "sparse": false, "ragged": false, "batch_input_shape": {"class_name": "__tuple__", "items": [null, 16, 16, 16]}, "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 16, 16, 16]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_2"}}2
root.layer_with_weights-0"_tf_keras_layer*
{"name": "conv2d_transpose", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose", "trainable": true, "dtype": "float32", "filters": 16, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "inbound_nodes": [[["input_2", 0, 0, {}]]], "shared_object_id": 3, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {"-1": 16}}, "shared_object_id": 15}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16, 16, 16]}}2
root.layer_with_weights-1"_tf_keras_layer*
{"name": "conv2d_transpose_1", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose_1", "trainable": true, "dtype": "float32", "filters": 32, "kernel_size": {"class_name": "__tuple__", "items": [5, 5]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 4}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 5}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "inbound_nodes": [[["conv2d_transpose", 0, 0, {}]]], "shared_object_id": 6, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {"-1": 16}}, "shared_object_id": 16}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 32, 32, 16]}}2
root.layer_with_weights-2"_tf_keras_layer*
{"name": "conv2d_transpose_2", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Conv2DTranspose", "config": {"name": "conv2d_transpose_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": {"class_name": "__tuple__", "items": [7, 7]}, "strides": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 7}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 8}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null, "output_padding": null}, "inbound_nodes": [[["conv2d_transpose_1", 0, 0, {}]]], "shared_object_id": 9, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {"-1": 32}}, "shared_object_id": 17}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 64, 64, 32]}}2
root.layer_with_weights-3"_tf_keras_layer*
{"name": "conv2d_3", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 10}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 11}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["conv2d_transpose_2", 0, 0, {}]]], "shared_object_id": 12, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 4, "axes": {"-1": 128}}, "shared_object_id": 18}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 128, 128, 128]}}2
\ No newline at end of file
File added
doc/AI_Epoch3000_Noisebt3.png

564 KB | W: | H:

doc/AI_Epoch3000_Noisebt3.png

642 KB | W: | H:

doc/AI_Epoch3000_Noisebt3.png
doc/AI_Epoch3000_Noisebt3.png
doc/AI_Epoch3000_Noisebt3.png
doc/AI_Epoch3000_Noisebt3.png
  • 2-up
  • Swipe
  • Onion skin
File added
Ֆ֯i ǹL(Ͳ2
\ No newline at end of file
Xroot"_tf_keras_network*X{"name": "model", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": false, "class_name": "Functional", "config": {"name": "model", "trainable": true, "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 128, 128, 3]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}, "name": "input_1", "inbound_nodes": []}, {"class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": {"class_name": "__tuple__", "items": [7, 7]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d", "inbound_nodes": [[["input_1", 0, 0, {}]]]}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "name": "max_pooling2d", "inbound_nodes": [[["conv2d", 0, 0, {}]]]}, {"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 32, "kernel_size": {"class_name": "__tuple__", "items": [5, 5]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d_1", "inbound_nodes": [[["max_pooling2d", 0, 0, {}]]]}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "name": "max_pooling2d_1", "inbound_nodes": [[["conv2d_1", 0, 0, {}]]]}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 16, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d_2", "inbound_nodes": [[["max_pooling2d_1", 0, 0, {}]]]}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "name": "max_pooling2d_2", "inbound_nodes": [[["conv2d_2", 0, 0, {}]]]}, {"class_name": "TFOpLambda", "config": {"name": "tf.math.reduce_max", "trainable": true, "dtype": "float32", "function": "math.reduce_max"}, "name": "tf.math.reduce_max", "inbound_nodes": [["max_pooling2d_2", 0, 0, {}]]}, {"class_name": "TFOpLambda", "config": {"name": "tf.compat.v1.shape", "trainable": true, "dtype": "float32", "function": "compat.v1.shape"}, "name": "tf.compat.v1.shape", "inbound_nodes": [["max_pooling2d_2", 0, 0, {"name": null, "out_type": "int32"}]]}, {"class_name": "TFOpLambda", "config": {"name": "tf.math.truediv", "trainable": true, "dtype": "float32", "function": "math.truediv"}, "name": "tf.math.truediv", "inbound_nodes": [["tf.math.reduce_max", 0, 0, {"y": 16, "name": null}]]}, {"class_name": "TFOpLambda", "config": {"name": "tf.random.uniform", "trainable": true, "dtype": "float32", "function": "random.uniform"}, "name": "tf.random.uniform", "inbound_nodes": [["tf.compat.v1.shape", 0, 0, {"minval": 0, "maxval": ["tf.math.truediv", 0, 0]}]]}, {"class_name": "TFOpLambda", "config": {"name": "tf.__operators__.add", "trainable": true, "dtype": "float32", "function": "__operators__.add"}, "name": "tf.__operators__.add", "inbound_nodes": [["max_pooling2d_2", 0, 0, {"y": ["tf.random.uniform", 0, 0], "name": null}]]}], "input_layers": [["input_1", 0, 0]], "output_layers": [["tf.__operators__.add", 0, 0]]}, "shared_object_id": 18, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, 128, 128, 3]}, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {}}}], "build_input_shape": {"class_name": "TensorShape", "items": [null, 128, 128, 3]}, "is_graph_network": true, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 128, 128, 3]}, "float32", "input_1"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 128, 128, 3]}, "float32", "input_1"]}, "keras_version": "2.15.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "trainable": true, "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 128, 128, 3]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}, "name": "input_1", "inbound_nodes": [], "shared_object_id": 0}, {"class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": {"class_name": "__tuple__", "items": [7, 7]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d", "inbound_nodes": [[["input_1", 0, 0, {}]]], "shared_object_id": 3}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "name": "max_pooling2d", "inbound_nodes": [[["conv2d", 0, 0, {}]]], "shared_object_id": 4}, {"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 32, "kernel_size": {"class_name": "__tuple__", "items": [5, 5]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 5}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 6}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d_1", "inbound_nodes": [[["max_pooling2d", 0, 0, {}]]], "shared_object_id": 7}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "name": "max_pooling2d_1", "inbound_nodes": [[["conv2d_1", 0, 0, {}]]], "shared_object_id": 8}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 16, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 9}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 10}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "conv2d_2", "inbound_nodes": [[["max_pooling2d_1", 0, 0, {}]]], "shared_object_id": 11}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "name": "max_pooling2d_2", "inbound_nodes": [[["conv2d_2", 0, 0, {}]]], "shared_object_id": 12}, {"class_name": "TFOpLambda", "config": {"name": "tf.math.reduce_max", "trainable": true, "dtype": "float32", "function": "math.reduce_max"}, "name": "tf.math.reduce_max", "inbound_nodes": [["max_pooling2d_2", 0, 0, {}]], "shared_object_id": 13}, {"class_name": "TFOpLambda", "config": {"name": "tf.compat.v1.shape", "trainable": true, "dtype": "float32", "function": "compat.v1.shape"}, "name": "tf.compat.v1.shape", "inbound_nodes": [["max_pooling2d_2", 0, 0, {"name": null, "out_type": "int32"}]], "shared_object_id": 14}, {"class_name": "TFOpLambda", "config": {"name": "tf.math.truediv", "trainable": true, "dtype": "float32", "function": "math.truediv"}, "name": "tf.math.truediv", "inbound_nodes": [["tf.math.reduce_max", 0, 0, {"y": 16, "name": null}]], "shared_object_id": 15}, {"class_name": "TFOpLambda", "config": {"name": "tf.random.uniform", "trainable": true, "dtype": "float32", "function": "random.uniform"}, "name": "tf.random.uniform", "inbound_nodes": [["tf.compat.v1.shape", 0, 0, {"minval": 0, "maxval": ["tf.math.truediv", 0, 0]}]], "shared_object_id": 16}, {"class_name": "TFOpLambda", "config": {"name": "tf.__operators__.add", "trainable": true, "dtype": "float32", "function": "__operators__.add"}, "name": "tf.__operators__.add", "inbound_nodes": [["max_pooling2d_2", 0, 0, {"y": ["tf.random.uniform", 0, 0], "name": null}]], "shared_object_id": 17}], "input_layers": [["input_1", 0, 0]], "output_layers": [["tf.__operators__.add", 0, 0]]}}}2
 root.layer-0"_tf_keras_input_layer*{"class_name": "InputLayer", "name": "input_1", "dtype": "float32", "sparse": false, "ragged": false, "batch_input_shape": {"class_name": "__tuple__", "items": [null, 128, 128, 3]}, "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 128, 128, 3]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}}2
root.layer_with_weights-0"_tf_keras_layer* {"name": "conv2d", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": {"class_name": "__tuple__", "items": [7, 7]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["input_1", 0, 0, {}]]], "shared_object_id": 3, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 4, "axes": {"-1": 3}}, "shared_object_id": 20}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 128, 128, 3]}}2
 root.layer-2"_tf_keras_layer*{"name": "max_pooling2d", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "inbound_nodes": [[["conv2d", 0, 0, {}]]], "shared_object_id": 4, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {}}, "shared_object_id": 21}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 128, 128, 128]}}2
root.layer_with_weights-1"_tf_keras_layer*
{"name": "conv2d_1", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 32, "kernel_size": {"class_name": "__tuple__", "items": [5, 5]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 5}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 6}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["max_pooling2d", 0, 0, {}]]], "shared_object_id": 7, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 4, "axes": {"-1": 128}}, "shared_object_id": 22}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 64, 64, 128]}}2
 root.layer-4"_tf_keras_layer*{"name": "max_pooling2d_1", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "inbound_nodes": [[["conv2d_1", 0, 0, {}]]], "shared_object_id": 8, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {}}, "shared_object_id": 23}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 64, 64, 32]}}2
root.layer_with_weights-2"_tf_keras_layer*
{"name": "conv2d_2", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 16, "kernel_size": {"class_name": "__tuple__", "items": [3, 3]}, "strides": {"class_name": "__tuple__", "items": [1, 1]}, "padding": "same", "data_format": "channels_last", "dilation_rate": {"class_name": "__tuple__", "items": [1, 1]}, "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 9}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 10}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["max_pooling2d_1", 0, 0, {}]]], "shared_object_id": 11, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 4, "axes": {"-1": 32}}, "shared_object_id": 24}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 32, 32, 32]}}2
 root.layer-6"_tf_keras_layer*{"name": "max_pooling2d_2", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": {"class_name": "__tuple__", "items": [2, 2]}, "padding": "same", "strides": {"class_name": "__tuple__", "items": [2, 2]}, "data_format": "channels_last"}, "inbound_nodes": [[["conv2d_2", 0, 0, {}]]], "shared_object_id": 12, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {}}, "shared_object_id": 25}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 32, 32, 16]}}2
 root.layer-7"_tf_keras_layer*{"name": "tf.math.reduce_max", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": true, "preserve_input_structure_in_config": true, "autocast": false, "class_name": "TFOpLambda", "config": {"name": "tf.math.reduce_max", "trainable": true, "dtype": "float32", "function": "math.reduce_max"}, "inbound_nodes": [["max_pooling2d_2", 0, 0, {}]], "shared_object_id": 13, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16, 16, 16]}}2
  root.layer-8"_tf_keras_layer*{"name": "tf.compat.v1.shape", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": true, "preserve_input_structure_in_config": true, "autocast": false, "class_name": "TFOpLambda", "config": {"name": "tf.compat.v1.shape", "trainable": true, "dtype": "float32", "function": "compat.v1.shape"}, "inbound_nodes": [["max_pooling2d_2", 0, 0, {"name": null, "out_type": "int32"}]], "shared_object_id": 14, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16, 16, 16]}}2

 root.layer-9"_tf_keras_layer*{"name": "tf.math.truediv", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": true, "preserve_input_structure_in_config": true, "autocast": false, "class_name": "TFOpLambda", "config": {"name": "tf.math.truediv", "trainable": true, "dtype": "float32", "function": "math.truediv"}, "inbound_nodes": [["tf.math.reduce_max", 0, 0, {"y": 16, "name": null}]], "shared_object_id": 15, "build_input_shape": {"class_name": "TensorShape", "items": []}}2
  root.layer-10"_tf_keras_layer*{"name": "tf.random.uniform", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": true, "preserve_input_structure_in_config": true, "autocast": false, "class_name": "TFOpLambda", "config": {"name": "tf.random.uniform", "trainable": true, "dtype": "float32", "function": "random.uniform"}, "inbound_nodes": [["tf.compat.v1.shape", 0, 0, {"minval": 0, "maxval": ["tf.math.truediv", 0, 0]}]], "shared_object_id": 16, "build_input_shape": {"class_name": "TensorShape", "items": [4]}}2
  root.layer-11"_tf_keras_layer*{"name": "tf.__operators__.add", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": true, "preserve_input_structure_in_config": true, "autocast": false, "class_name": "TFOpLambda", "config": {"name": "tf.__operators__.add", "trainable": true, "dtype": "float32", "function": "__operators__.add"}, "inbound_nodes": [["max_pooling2d_2", 0, 0, {"y": ["tf.random.uniform", 0, 0], "name": null}]], "shared_object_id": 17, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16, 16, 16]}}2
\ No newline at end of file
File added
pip install -r requirements.txt
pause
\ No newline at end of file
keras==2.15.0
matplotlib==3.8.4
numpy==1.23.5
Pillow==9.5.0
Pillow==10.3.0
pybind11==2.11.1
setuptools==58.1.0
tensorflow==2.15.0
tensorflow_intel==2.15.0
keras==3.4.1
matplotlib==3.9.1
numpy==1.24.2
Pillow==10.4.0
pybind11==2.13.1
setuptools==69.5.1
scikit-image
tensorflow==2.16.2
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment