mirror of
https://github.com/jung-geun/PSO.git
synced 2025-12-19 20:44:39 +09:00
23-10-18
모델 save 시 score 확인 추가 배치 사이즈 오류 수정
This commit is contained in:
@@ -33,23 +33,6 @@ def get_data():
|
||||
return x_train, y_train, x_test, y_test
|
||||
|
||||
|
||||
def get_data_test():
|
||||
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
|
||||
x_test = x_test / 255.0
|
||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
||||
|
||||
y_train, y_test = tf.one_hot(y_train, 10), tf.one_hot(y_test, 10)
|
||||
|
||||
x_train, x_test = tf.convert_to_tensor(
|
||||
x_train), tf.convert_to_tensor(x_test)
|
||||
y_train, y_test = tf.convert_to_tensor(
|
||||
y_train), tf.convert_to_tensor(y_test)
|
||||
|
||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||
|
||||
return x_test, y_test
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(
|
||||
|
||||
@@ -21,6 +21,7 @@ if gpus:
|
||||
|
||||
def get_data():
|
||||
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
|
||||
print(f"y_train : {y_train[0]} | y_test : {y_test[0]}")
|
||||
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
x_train = x_train.reshape((60000, 28, 28, 1))
|
||||
@@ -36,9 +37,9 @@ class _batch_generator:
|
||||
def __init__(self, x, y, batch_size: int = 32):
|
||||
self.batch_size = batch_size
|
||||
self.index = 0
|
||||
dataset = tf.data.Dataset.from_tensor_slices((x, y))
|
||||
self.dataset = list(dataset.batch(batch_size))
|
||||
self.max_index = len(dataset) // batch_size
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.setBatchSize(batch_size)
|
||||
|
||||
def next(self):
|
||||
self.index += 1
|
||||
@@ -46,18 +47,50 @@ class _batch_generator:
|
||||
self.index = 0
|
||||
return self.dataset[self.index][0], self.dataset[self.index][1]
|
||||
|
||||
def getMaxIndex(self):
|
||||
return self.max_index
|
||||
|
||||
def getIndex(self):
|
||||
return self.index
|
||||
|
||||
def setIndex(self, index):
|
||||
self.index = index
|
||||
|
||||
def getBatchSize(self):
|
||||
return self.batch_size
|
||||
|
||||
def setBatchSize(self, batch_size):
|
||||
self.batch_size = batch_size
|
||||
self.dataset = list(
|
||||
tf.data.Dataset.from_tensor_slices(
|
||||
(self.x, self.y)).batch(batch_size)
|
||||
)
|
||||
self.max_index = len(self.dataset)
|
||||
|
||||
def getDataset(self):
|
||||
return self.dataset
|
||||
|
||||
# BEGIN: 5f8d9bcejpp
|
||||
from keras.applications.resnet50 import ResNet50
|
||||
|
||||
def make_model():
|
||||
model = ResNet50(weights=None, input_shape=(28, 28, 1), classes=10)
|
||||
model = Sequential()
|
||||
model.add(
|
||||
Conv2D(32, kernel_size=(5, 5), activation="relu",
|
||||
input_shape=(28, 28, 1))
|
||||
)
|
||||
model.add(MaxPooling2D(pool_size=(3, 3)))
|
||||
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Dropout(0.25))
|
||||
model.add(Flatten())
|
||||
model.add(Dense(256, activation="relu"))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
# END: 5f8d9bcejpp
|
||||
|
||||
|
||||
model = make_model()
|
||||
x_train, y_train, x_test, y_test = get_data()
|
||||
print(x_train.shape)
|
||||
y_train = tf.one_hot(y_train, 10)
|
||||
y_test = tf.one_hot(y_test, 10)
|
||||
|
||||
@@ -67,13 +100,14 @@ model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])
|
||||
|
||||
count = 0
|
||||
|
||||
while count < 50:
|
||||
while count < 100:
|
||||
x_batch, y_batch = dataset.next()
|
||||
count += 1
|
||||
print("Training model...")
|
||||
model.fit(x_batch, y_batch, epochs=1, batch_size=1, verbose=1)
|
||||
|
||||
print(count)
|
||||
print(f"Max index : {dataset.getMaxIndex()}")
|
||||
|
||||
print("Evaluating model...")
|
||||
model.evaluate(x_test, y_test, verbose=2)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .optimizer import Optimizer as optimizer
|
||||
from .particle import Particle as particle
|
||||
|
||||
__version__ = "0.1.8"
|
||||
__version__ = "0.1.9"
|
||||
|
||||
__all__ = [
|
||||
"optimizer",
|
||||
|
||||
@@ -232,9 +232,9 @@ class Optimizer:
|
||||
def __init__(self, x, y, batch_size: int = 32):
|
||||
self.batch_size = batch_size
|
||||
self.index = 0
|
||||
dataset = tf.data.Dataset.from_tensor_slices((x, y))
|
||||
self.dataset = list(dataset.batch(batch_size))
|
||||
self.max_index = len(dataset) // batch_size
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.setBatchSize(batch_size)
|
||||
|
||||
def next(self):
|
||||
self.index += 1
|
||||
@@ -242,6 +242,32 @@ class Optimizer:
|
||||
self.index = 0
|
||||
return self.dataset[self.index][0], self.dataset[self.index][1]
|
||||
|
||||
def getMaxIndex(self):
|
||||
return self.max_index
|
||||
|
||||
def getIndex(self):
|
||||
return self.index
|
||||
|
||||
def setIndex(self, index):
|
||||
self.index = index
|
||||
|
||||
def getBatchSize(self):
|
||||
return self.batch_size
|
||||
|
||||
def setBatchSize(self, batch_size):
|
||||
self.batch_size = batch_size
|
||||
if self.batch_size > len(self.x):
|
||||
self.batch_size = len(self.x)
|
||||
print(f"batch size : {self.batch_size}")
|
||||
self.dataset = list(
|
||||
tf.data.Dataset.from_tensor_slices(
|
||||
(self.x, self.y)).batch(batch_size)
|
||||
)
|
||||
self.max_index = len(self.dataset)
|
||||
|
||||
def getDataset(self):
|
||||
return self.dataset
|
||||
|
||||
def fit(
|
||||
self,
|
||||
x,
|
||||
@@ -575,7 +601,7 @@ class Optimizer:
|
||||
print(e)
|
||||
|
||||
finally:
|
||||
self.model_save(save_path)
|
||||
self.model_save(x, y, save_path)
|
||||
print("model save")
|
||||
if save_info:
|
||||
self.save_info(save_path)
|
||||
@@ -658,7 +684,7 @@ class Optimizer:
|
||||
model = self.get_best_model()
|
||||
model.save_weights(save_path)
|
||||
|
||||
def model_save(self, save_path: str = "./result"):
|
||||
def model_save(self, x, y, save_path: str = "./result"):
|
||||
"""
|
||||
최고 점수를 받은 모델 저장
|
||||
|
||||
@@ -669,6 +695,8 @@ class Optimizer:
|
||||
(keras.models): 모델
|
||||
"""
|
||||
model = self.get_best_model()
|
||||
score = model.evaluate(x, y, verbose=1)
|
||||
print(f"model acc : {score[1]}, loss : {score[0]}")
|
||||
model.save(
|
||||
f"./{save_path}/{self.day}/{self.n_particles}_{self.c0}_{self.c1}_{self.w_min}.h5"
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user