Traitement de données massives
John Samuel
CPE Lyon
Year: 2021-2022
Email: john(dot)samuel(at)cpe(dot)fr
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
# Créer un modèle séquentiel
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(3,)))
model.add(Dense(units=2, activation='softmax'))
# Compilation du modèle
sgd = SGD(lr=0.01)
model.compile(loss='mean_squared_error',
optimizer=sgd,metrics=['accuracy'])
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
# Créer un modèle séquentiel (réseaux de neurones convolutionnels)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10)
#Compilation du modèle
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))