Files
PSO/test.ipynb
jung-geun 983913f2d2 23-06-24
패키지 호출 단순 수정
2023-06-24 03:31:40 +00:00

428 lines
24 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"ename": "FileNotFoundError",
"evalue": "[Errno 2] No such file or directory: './result/iris/05-26-07-03_100_100_0.5_1.5_0.75.csv'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[1], line 5\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mnumpy\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mnp\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mpandas\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mpd\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m iris \u001b[39m=\u001b[39m pd\u001b[39m.\u001b[39;49mread_csv(\u001b[39m\"\u001b[39;49m\u001b[39m./result/iris/05-26-07-03_100_100_0.5_1.5_0.75.csv\u001b[39;49m\u001b[39m\"\u001b[39;49m, header\u001b[39m=\u001b[39;49m\u001b[39mNone\u001b[39;49;00m)\n\u001b[1;32m 6\u001b[0m \u001b[39mprint\u001b[39m(iris\u001b[39m.\u001b[39mshape)\n\u001b[1;32m 7\u001b[0m \u001b[39mprint\u001b[39m(iris\u001b[39m.\u001b[39mhead())\n",
"File \u001b[0;32m~/miniconda3/envs/pso/lib/python3.8/site-packages/pandas/io/parsers/readers.py:912\u001b[0m, in \u001b[0;36mread_csv\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, date_format, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options, dtype_backend)\u001b[0m\n\u001b[1;32m 899\u001b[0m kwds_defaults \u001b[39m=\u001b[39m _refine_defaults_read(\n\u001b[1;32m 900\u001b[0m dialect,\n\u001b[1;32m 901\u001b[0m delimiter,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 908\u001b[0m dtype_backend\u001b[39m=\u001b[39mdtype_backend,\n\u001b[1;32m 909\u001b[0m )\n\u001b[1;32m 910\u001b[0m kwds\u001b[39m.\u001b[39mupdate(kwds_defaults)\n\u001b[0;32m--> 912\u001b[0m \u001b[39mreturn\u001b[39;00m _read(filepath_or_buffer, kwds)\n",
"File \u001b[0;32m~/miniconda3/envs/pso/lib/python3.8/site-packages/pandas/io/parsers/readers.py:577\u001b[0m, in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 574\u001b[0m _validate_names(kwds\u001b[39m.\u001b[39mget(\u001b[39m\"\u001b[39m\u001b[39mnames\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mNone\u001b[39;00m))\n\u001b[1;32m 576\u001b[0m \u001b[39m# Create the parser.\u001b[39;00m\n\u001b[0;32m--> 577\u001b[0m parser \u001b[39m=\u001b[39m TextFileReader(filepath_or_buffer, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwds)\n\u001b[1;32m 579\u001b[0m \u001b[39mif\u001b[39;00m chunksize \u001b[39mor\u001b[39;00m iterator:\n\u001b[1;32m 580\u001b[0m \u001b[39mreturn\u001b[39;00m parser\n",
"File \u001b[0;32m~/miniconda3/envs/pso/lib/python3.8/site-packages/pandas/io/parsers/readers.py:1407\u001b[0m, in \u001b[0;36mTextFileReader.__init__\u001b[0;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[1;32m 1404\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39moptions[\u001b[39m\"\u001b[39m\u001b[39mhas_index_names\u001b[39m\u001b[39m\"\u001b[39m] \u001b[39m=\u001b[39m kwds[\u001b[39m\"\u001b[39m\u001b[39mhas_index_names\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[1;32m 1406\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhandles: IOHandles \u001b[39m|\u001b[39m \u001b[39mNone\u001b[39;00m \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[0;32m-> 1407\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_engine \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_make_engine(f, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mengine)\n",
"File \u001b[0;32m~/miniconda3/envs/pso/lib/python3.8/site-packages/pandas/io/parsers/readers.py:1661\u001b[0m, in \u001b[0;36mTextFileReader._make_engine\u001b[0;34m(self, f, engine)\u001b[0m\n\u001b[1;32m 1659\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39mb\u001b[39m\u001b[39m\"\u001b[39m \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m mode:\n\u001b[1;32m 1660\u001b[0m mode \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mb\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m-> 1661\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhandles \u001b[39m=\u001b[39m get_handle(\n\u001b[1;32m 1662\u001b[0m f,\n\u001b[1;32m 1663\u001b[0m mode,\n\u001b[1;32m 1664\u001b[0m encoding\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moptions\u001b[39m.\u001b[39;49mget(\u001b[39m\"\u001b[39;49m\u001b[39mencoding\u001b[39;49m\u001b[39m\"\u001b[39;49m, \u001b[39mNone\u001b[39;49;00m),\n\u001b[1;32m 1665\u001b[0m compression\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moptions\u001b[39m.\u001b[39;49mget(\u001b[39m\"\u001b[39;49m\u001b[39mcompression\u001b[39;49m\u001b[39m\"\u001b[39;49m, \u001b[39mNone\u001b[39;49;00m),\n\u001b[1;32m 1666\u001b[0m memory_map\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moptions\u001b[39m.\u001b[39;49mget(\u001b[39m\"\u001b[39;49m\u001b[39mmemory_map\u001b[39;49m\u001b[39m\"\u001b[39;49m, \u001b[39mFalse\u001b[39;49;00m),\n\u001b[1;32m 1667\u001b[0m is_text\u001b[39m=\u001b[39;49mis_text,\n\u001b[1;32m 1668\u001b[0m errors\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moptions\u001b[39m.\u001b[39;49mget(\u001b[39m\"\u001b[39;49m\u001b[39mencoding_errors\u001b[39;49m\u001b[39m\"\u001b[39;49m, \u001b[39m\"\u001b[39;49m\u001b[39mstrict\u001b[39;49m\u001b[39m\"\u001b[39;49m),\n\u001b[1;32m 1669\u001b[0m storage_options\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moptions\u001b[39m.\u001b[39;49mget(\u001b[39m\"\u001b[39;49m\u001b[39mstorage_options\u001b[39;49m\u001b[39m\"\u001b[39;49m, \u001b[39mNone\u001b[39;49;00m),\n\u001b[1;32m 1670\u001b[0m )\n\u001b[1;32m 1671\u001b[0m \u001b[39massert\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhandles \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 1672\u001b[0m f \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhandles\u001b[39m.\u001b[39mhandle\n",
"File \u001b[0;32m~/miniconda3/envs/pso/lib/python3.8/site-packages/pandas/io/common.py:859\u001b[0m, in \u001b[0;36mget_handle\u001b[0;34m(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options)\u001b[0m\n\u001b[1;32m 854\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39misinstance\u001b[39m(handle, \u001b[39mstr\u001b[39m):\n\u001b[1;32m 855\u001b[0m \u001b[39m# Check whether the filename is to be opened in binary mode.\u001b[39;00m\n\u001b[1;32m 856\u001b[0m \u001b[39m# Binary mode does not support 'encoding' and 'newline'.\u001b[39;00m\n\u001b[1;32m 857\u001b[0m \u001b[39mif\u001b[39;00m ioargs\u001b[39m.\u001b[39mencoding \u001b[39mand\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39mb\u001b[39m\u001b[39m\"\u001b[39m \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m ioargs\u001b[39m.\u001b[39mmode:\n\u001b[1;32m 858\u001b[0m \u001b[39m# Encoding\u001b[39;00m\n\u001b[0;32m--> 859\u001b[0m handle \u001b[39m=\u001b[39m \u001b[39mopen\u001b[39;49m(\n\u001b[1;32m 860\u001b[0m handle,\n\u001b[1;32m 861\u001b[0m ioargs\u001b[39m.\u001b[39;49mmode,\n\u001b[1;32m 862\u001b[0m encoding\u001b[39m=\u001b[39;49mioargs\u001b[39m.\u001b[39;49mencoding,\n\u001b[1;32m 863\u001b[0m errors\u001b[39m=\u001b[39;49merrors,\n\u001b[1;32m 864\u001b[0m newline\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39m\"\u001b[39;49m,\n\u001b[1;32m 865\u001b[0m )\n\u001b[1;32m 866\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m 867\u001b[0m \u001b[39m# Binary mode\u001b[39;00m\n\u001b[1;32m 868\u001b[0m handle \u001b[39m=\u001b[39m \u001b[39mopen\u001b[39m(handle, ioargs\u001b[39m.\u001b[39mmode)\n",
"\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: './result/iris/05-26-07-03_100_100_0.5_1.5_0.75.csv'"
]
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"iris = pd.read_csv(\"./result/iris/05-26-07-03_100_100_0.5_1.5_0.75.csv\", header=None)\n",
"print(iris.shape)\n",
"print(iris.head())\n",
"\n",
"loss = []\n",
"acc = []\n",
"for i in range(len(iris.iloc[0])):\n",
" if i % 2 == 0:\n",
" loss.append(iris[i])\n",
" else:\n",
" acc.append(iris[i])\n",
"\n",
"print(len(loss))\n",
"print(len(acc))\n",
"\n",
"plt.subplot(2,1,1)\n",
"for i in range(len(loss)):\n",
" plt.plot(loss[i], label=f\"loss_{i}\")\n",
"\n",
"plt.subplot(2,1,2)\n",
"for i in range(len(acc)):\n",
" plt.plot(acc[i], label=f\"acc_{i}\")"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-05-26 03:53:32.688348: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2023-05-26 03:53:32.796903: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2023-05-26 03:53:33.196478: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvrtc.so.11.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/cuda-11.2/lib64:/usr/local/TensorRT/lib:/usr/local/cuda-11.2/lib64:/usr/local/TensorRT/lib:\n",
"2023-05-26 03:53:33.196616: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvrtc.so.11.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/cuda-11.2/lib64:/usr/local/TensorRT/lib:/usr/local/cuda-11.2/lib64:/usr/local/TensorRT/lib:\n",
"2023-05-26 03:53:33.196622: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n"
]
}
],
"source": [
"import numpy as np\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras.layers import (Conv2D, Dense, Dropout, Flatten,\n",
" MaxPooling2D)\n",
"from tensorflow.keras.models import Sequential\n",
"\n",
"\n",
"def make_model():\n",
" model = Sequential()\n",
" model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=(28,28,1)))\n",
" model.add(MaxPooling2D(pool_size=(3, 3)))\n",
" model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\n",
" model.add(MaxPooling2D(pool_size=(2, 2)))\n",
" model.add(Dropout(0.25))\n",
" model.add(Flatten())\n",
" model.add(Dense(128, activation='relu'))\n",
" model.add(Dense(10, activation='softmax'))\n",
"\n",
" # model.summary()\n",
"\n",
" return model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-05-26 03:53:33.924839: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:33.928891: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:33.929032: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:33.929450: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2023-05-26 03:53:33.929902: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:33.930018: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:33.930117: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:34.287172: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:34.287322: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:34.287430: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2023-05-26 03:53:34.287524: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 10109 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3060, pci bus id: 0000:09:00.0, compute capability: 8.6\n"
]
}
],
"source": [
"model = make_model()\n",
"# json_ = model.to_json()\n",
"# print(json_)\n",
"# for layer in model.get_weights():\n",
" # print(layer.shape)\n",
"weight = model.get_weights()"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0 ~ 800\n",
"(5, 5, 1, 32)\n",
"800 ~ 832\n",
"(32,)\n",
"832 ~ 19264\n",
"(3, 3, 32, 64)\n",
"19264 ~ 19328\n",
"(64,)\n",
"19328 ~ 93056\n",
"(576, 128)\n",
"93056 ~ 93184\n",
"(128,)\n",
"93184 ~ 94464\n",
"(128, 10)\n",
"94464 ~ 94474\n",
"(10,)\n",
"[800, 32, 18432, 64, 73728, 128, 1280, 10]\n",
"[(5, 5, 1, 32), (32,), (3, 3, 32, 64), (64,), (576, 128), (128,), (128, 10), (10,)]\n"
]
}
],
"source": [
"from time import time\n",
"\n",
"import cupy as cp\n",
"\n",
"\n",
"def encode(weights):\n",
" w_gpu = cp.array([])\n",
" lenght = []\n",
" shape = []\n",
" for layer in weights:\n",
" shape.append(layer.shape)\n",
" w_ = layer.reshape(-1)\n",
" lenght.append(len(w_))\n",
" w_gpu = cp.append(w_gpu, w_)\n",
" \n",
" return w_gpu, shape, lenght\n",
"\n",
"def decode(weight, shape, lenght):\n",
" weights = []\n",
" start = 0\n",
" for i in range(len(shape)):\n",
" end = start + lenght[i]\n",
" print(f\"{start} ~ {end}\")\n",
" print(f\"{shape[i]}\")\n",
" w_ = weight[start:end]\n",
" w_ = w_.reshape(shape[i])\n",
" weights.append(w_)\n",
" start = end\n",
"\n",
" return weights\n",
"\n",
"w = 0.8\n",
"v,_,_ = encode(weight)\n",
"c0 = 0.5\n",
"c1 = 1.5\n",
"r0 = 0.2\n",
"r1 = 0.8\n",
"p_best,_,_ = encode(weight)\n",
"g_best,_,_ = encode(weight)\n",
"layer,shape,leng = encode(weight)\n",
"\n",
"# new_v = w*v[i]\n",
"# new_v = new_v + c0*r0*(p_best[i] - layer)\n",
"# new_v = new_v + c1*r1*(self.g_best[i] - layer)\n",
"\n",
"start = time()\n",
"new_velocity = w * v + c0 * r0 * (p_best - layer) + c1 * r1 * (g_best - layer)\n",
"\n",
"# print(new_velocity)\n",
"\n",
"we2 = decode(new_velocity, shape, leng)\n",
"# print(we2)\n",
"\n",
"\n",
"# # s= [1,2]\n",
"# print(w)\n",
"print(leng)\n",
"print(shape)\n",
"\n",
"# w2 = w\n",
"# c1 = c\n",
"\n",
"# tf_start = time()\n",
"# w3 = tf.multiply(w2, w)\n",
"# tf_end = time()\n",
"# mul_start = time()\n",
"# w4 = w2 * w\n",
"# mul_end = time()\n",
"# cuda_start = time()\n",
"# w5 = c1 * c\n",
"# cuda_end = time()\n",
"\n",
"# print(f\"tf 연산 > {tf_end-tf_start} | {w3}\")\n",
"# print(f\"곱셈 연산 > {mul_end-mul_start} | {w4}\")\n",
"# print(f\"cuda 연산 > {cuda_end-cuda_start} | {w5}\")\n",
"\n",
"# for i in range(len(w)):\n",
"# if w[i] != w2[i]:\n",
"# print(\"not same\")\n",
"# break\n",
"# else:\n",
"# print(\"same\")\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1/1 [==============================] - 0s 452ms/step\n",
"[[0.0000000e+00 1.0000000e+00 8.5117706e-28]\n",
" [0.0000000e+00 1.0000000e+00 0.0000000e+00]\n",
" [1.0000000e+00 3.3700031e-35 0.0000000e+00]\n",
" [1.0000000e+00 1.3158974e-19 0.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 0.0000000e+00]\n",
" [0.0000000e+00 0.0000000e+00 1.0000000e+00]\n",
" [1.0000000e+00 1.4602315e-27 0.0000000e+00]\n",
" [0.0000000e+00 0.0000000e+00 1.0000000e+00]\n",
" [1.0000000e+00 2.4845295e-16 0.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 1.6942224e-33]\n",
" [1.0000000e+00 0.0000000e+00 0.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 0.0000000e+00]\n",
" [1.0000000e+00 9.0455008e-36 0.0000000e+00]\n",
" [1.0000000e+00 0.0000000e+00 0.0000000e+00]\n",
" [0.0000000e+00 1.8117375e-33 1.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 6.7984806e-36]\n",
" [0.0000000e+00 1.7472901e-25 1.0000000e+00]\n",
" [0.0000000e+00 6.2991115e-37 1.0000000e+00]\n",
" [0.0000000e+00 0.0000000e+00 1.0000000e+00]\n",
" [0.0000000e+00 1.0598510e-30 1.0000000e+00]\n",
" [1.0000000e+00 1.7519910e-30 0.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 0.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 0.0000000e+00]\n",
" [1.0000000e+00 7.4562871e-27 0.0000000e+00]\n",
" [0.0000000e+00 0.0000000e+00 1.0000000e+00]\n",
" [0.0000000e+00 0.0000000e+00 1.0000000e+00]\n",
" [0.0000000e+00 0.0000000e+00 1.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 0.0000000e+00]\n",
" [0.0000000e+00 1.0000000e+00 0.0000000e+00]\n",
" [1.0000000e+00 0.0000000e+00 0.0000000e+00]]\n",
"[[0. 1. 0.]\n",
" [0. 1. 0.]\n",
" [1. 0. 0.]\n",
" [1. 0. 0.]\n",
" [0. 1. 0.]\n",
" [0. 0. 1.]\n",
" [1. 0. 0.]\n",
" [0. 0. 1.]\n",
" [1. 0. 0.]\n",
" [0. 1. 0.]\n",
" [1. 0. 0.]\n",
" [0. 1. 0.]\n",
" [1. 0. 0.]\n",
" [1. 0. 0.]\n",
" [0. 0. 1.]\n",
" [0. 1. 0.]\n",
" [0. 0. 1.]\n",
" [0. 0. 1.]\n",
" [0. 0. 1.]\n",
" [0. 0. 1.]\n",
" [1. 0. 0.]\n",
" [0. 1. 0.]\n",
" [0. 1. 0.]\n",
" [1. 0. 0.]\n",
" [0. 0. 1.]\n",
" [0. 0. 1.]\n",
" [0. 0. 1.]\n",
" [0. 1. 0.]\n",
" [0. 1. 0.]\n",
" [1. 0. 0.]]\n",
"1/1 [==============================] - 0s 88ms/step - loss: 0.0000e+00 - accuracy: 1.0000\n",
"[0.0, 1.0]\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-06-02 14:34:49.851147: I tensorflow/stream_executor/cuda/cuda_blas.cc:1614] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.\n"
]
}
],
"source": [
"import numpy as np\n",
"import tensorflow as tf\n",
"from sklearn.datasets import load_iris\n",
"from sklearn.model_selection import train_test_split\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"from tensorflow.keras.models import Sequential\n",
"\n",
"\n",
"def get_xor():\n",
" x = np.array([[0,0],[0,1],[1,0],[1,1]])\n",
" y = np.array([[0],[1],[1],[0]])\n",
"\n",
" return x,y\n",
"\n",
"def get_iris():\n",
" iris = load_iris()\n",
" x = iris.data\n",
" y = iris.target\n",
"\n",
" y = keras.utils.to_categorical(y, 3)\n",
"\n",
" x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=True, stratify=y)\n",
"\n",
" return x_train, x_test, y_train, y_test\n",
"\n",
"# model = keras.models.load_model(\"./result/xor/06-02-13-31/75_0.35_0.8_0.6.h5\")\n",
"model = keras.models.load_model(\"./result/iris/06-02-13-48/50_0.4_0.8_0.7.h5\")\n",
"# x,y = get_xor()\n",
"x_train, x_test, y_train, y_test = get_iris()\n",
"\n",
"print(model.predict(x_test))\n",
"print(y_test)\n",
"print(model.evaluate(x_test,y_test))"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-06-11 14:01:31.378413: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
"To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"TensorFlow version: 2.12.0\n",
"Linked TRT ver: (8, 4, 3)\n"
]
}
],
"source": [
"import tensorflow as tf\n",
"\n",
"print(\"TensorFlow version:\", tf.__version__)\n",
"import tensorflow.compiler as tf_cc\n",
"import tensorrt as trt\n",
"\n",
"linked_trt_ver=tf_cc.tf2tensorrt._pywrap_py_utils.get_linked_tensorrt_version()\n",
"print(f\"Linked TRT ver: {linked_trt_ver}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "pso",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.16"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}