ヘリウムはIoTデバイスが低コストでインターネットに送信するネットワーク機器(ホットスポット)
家に設置されたヘリウムは、ノードの位置確認、データのシーケンス確認、ネットワーク経由でデータを送信するデバイスの位置確認など、ネットワークに有益なタスクを実行
公式: https://www.helium.com/
network

eplore
https://explorer.helium.com/
うむ、マイニング装置を置くだけでheliumを貰える理由がよくわらんな…
随机应变 ABCD: Always Be Coding and … : хороший
ヘリウムはIoTデバイスが低コストでインターネットに送信するネットワーク機器(ホットスポット)
家に設置されたヘリウムは、ノードの位置確認、データのシーケンス確認、ネットワーク経由でデータを送信するデバイスの位置確認など、ネットワークに有益なタスクを実行
公式: https://www.helium.com/
network

eplore
https://explorer.helium.com/
うむ、マイニング装置を置くだけでheliumを貰える理由がよくわらんな…
Bitcoinのウォレットとは?
-> 一般的なユーザがビットコインを管理するためのアプリ
-> 主な機能はビットコインの保管と送受信
– 秘密鍵
– ビットコインアドレス
– トランザクションの作成
– 電子署名の作成
秘密鍵とビットコインアドレスで個人を識別している
一人で複数のアドレスを持つことも当たり前となっている
Hierarchical Deterministic Wallet
– 一つのマスター鍵から複数の秘密鍵・アドレスを階層的に生成できる
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from pycoin.tx import Tx
from pycoin.services import spendables_for_address
from pycoin.services.blockchain_info import BlockchainInfoProvider, send_tx
from pycoin.tx.tx_utils import create_signed_tx
from PyQt5.QtWidgets import (QLabel, QLineEdit, QGridLayout, QWidget,
QApplication, QPushButton, QDesktopWidget,
QTextEdit)
class MainWindow(QWidget):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
address = ""
destination = ""
self.fromaddress = QLineEdit()
self.fromaddress.setText(address)
self.sendto = QLineEdit()
self.sendto.setText(destination)
self.transaction = QTextEdit()
self.transaction.setReadOnly(True)
sendTxBtn = QPushButton('send Tx', self)
sndTxBtn.clicked.connect(self.createTx)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(QLabel('From'), 1,0)
grid.addWidget(self.fromaddress, 1,1)
grid.addWidget(QLabel('Send to'), 2,0)
grid.addWidget(self.sendto, 2,1)
grid.addWidget(QLabel('Transaction'), 3,0)
grid.addWidget(self.transaction, 3, 1, 1, 2)
get.addWidget(sndTxBtn, 4, 1)
self.setLayout(grid)
self.resize(320, 180)
self.center()
self.setWindowTitle('Create transaction demo')
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def createTx(self):
address = self.fromaddress.text()
destination = self.sendto.text()
wif = ""
sndtx = BlockchainInfoProvider('BTC')
sendables = sendables_for_address(address, "BTC")
tx = create_signed_tx(sendables, payables=[destination], wifs=[wif])
sndtx.broadcast_tx(tx)
self.transaction.setText(tx.as_hex())
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec())
generate-key.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from pycoin.tx import Tx
from pycoin.key.BIP32Node import BIP32Node
from PyQt5.QtWidgets import (QLabel, QLineEdit, QGridLayout, QWidget,
QApplication, QPushButton, QDesktopWidget,
QTextEdit)
def KeyGenerater(s):
key = BIP32Node.from_master_secret(s.encode("utf8"))
return key.wif(), key.address()
class MainWindow(QWidget):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.inputLine = QLineEidt()
self.privateKey = QTextEdit()
self.privateKey.setReadOnly(True)
self.publicKey = QTextEdit()
self.publicKey.setReadOnly(True)
generateBtn = QPushButton('Generate', self)
generateBtn.clicked.connect(self.showKeys)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(QLabel('Input'), 1,0)
grid.addWidget(self.inputLine, 1,1)
grid.addWidget(generateBtn, 1, 2)
grid.addWidget(QLabel('Private Key'), 2,0)
grid.addWidget(self.privateKey, 2,1, 1, 2)
grid.addWidget(QLabel('Public Key'), 3,0)
grid.addWidget(self.publicKey, 3, 1, 1, 2)
self.setLayout(grid)
self.resize(320, 150)
self.center()
self.setWindowTitle('Key generate demo')
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def showKeys(self):
text = self.inputLine.text()
private, public = KeyGenerater(text)
self.publicKey.setText(private)
self.publicKey.setText(public)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec())
OK, keep going
from hashlib import sha256
text = "ABC"
print(sha256(text.encode('ascii')).hexdigest())
$ python3 main.py
b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78
from hashlib import sha256
MAX_NONCE = 10000000000
def SHA256(text):
return sha256(text.encode('ascii')).hexdigest()
def mine(block_number, transactions, previous_hash, prefix_zeros):
prefix_str = '0'*prefix_zeros
for nonce in range(MAX_NONCE):
text = str(block_number) + transactions + previous_hash + str(nonce)
new_hash = SHA256(text)
if new_hash.startswith(prefix_str):
print(f"Yay! Successfully mined bitcoins with nonce value:{nonce}")
return new_hash
raise BaseException(f"Couldn't find correct has after trying {MAX_NONCE} times")
if __name__ == '__main__':
transactions='''
Bob->Alice->20,
Ken->Ryu->45
'''
difficulty=6
new_hash = mine(5, transactions, '00000000000008a3a41b85b8b29ad444def299fee21793cd8b9e567eab02cd81', difficulty)
print(new_hash)
bitcoin hardware
https://www.buybitcoinworldwide.com/mining/hardware/
bitcoin explore
https://www.blockchain.com/explorer
https://www.blockchain.com/btc/blocks?page=1
bitcoin block1
https://www.blockchain.com/btc/block/1
なるほど、前後の処理が知りたいのう
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
FILENAME = 'test.jpg'
text = pytesseract.image_to_string(Image.open(FILENAME))
lines = text.split("\n")
for line in lines:
if '<<<' in line:
t = line.replace(' ', '')
print(t)
test1.png P<GBRUK<SPECIMEN<<ANGELA<ZOE<<<<<<<<<<<<<<<< 5334755143GBR8812049F2509286<<<<<<<<<<c<<<04 test2.png PDCYPPOLITIS<<ZINONAS<<<<<<<<<<<<<<KKKKKKKKK FOOOOOD005CYP8012148M3006151<<<<<<<<<<<<<<02 test3.png PTNOROESTENBYEN<<AASAMUND<SPECIMEN<<<<<<<<<< FHCO023539NOR5604230M2506126<<<<<<<<<<<<<<00
なぜか変な文字列が混ざりますね
def convert(Filename):
img = Image.open(Filename)
img=img.convert('RGB')
size=img.size
img2=Image.new('RGB',size)
border=110
for x in range(size[0]):
for y in range(size[1]):
r,g,b=img.getpixel((x,y))
if r > border or g > border or b > border:
r = 255
g = 255
b = 255
img2.putpixel((x,y),(r,g,b))
return img2
text = pytesseract.image_to_string(convert('test.jpg'))
lines = text.split("\n")
for line in lines:
if '<<<' in line:
print(line)
うーむ、ちょっと違うかな
IPアドレス指定でアクセスする場合のSSLには対応していないので、独自ドメインをあらかじめ取得・設定しておく必要がある
$ sudo a2enmod ssl
$ sudo a2ensite default-ssl
$ service apache2 reload
$ sudo vi /etc/apache2/sites-available/virtual.host.conf
<VirtualHost *:80>
DocumentRoot /var/www/node
ServerName hoge.site
#ServerAlias www.hoge.site
# Other directives here
RewriteEngine on
RewriteCond %{SERVER_NAME} =hoge.site [OR]
RewriteCond %{SERVER_NAME} =www.hoge.site
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent]
</VirtualHost>
<VirtualHost _default_:443>
DocumentRoot /var/www/node
ServerName hoge.site
ServerAlias www.hoge.site
# Other directives here
SSLCertificateFile /etc/letsencrypt/live/hoge.site/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/hoge.site/privkey.pem
Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>
$ sudo apt install firewalld
$ sudo firewall-cmd –add-service=http –zone=public –permanent
$ sudo firewall-cmd –add-service=https –zone=public –permanent
$ sudo systemctl restart firewalld
$ sudo firewall-cmd –list-all
services: dhcpv6-client http https ssh
$ sudo apt-get install certbot python3-certbot-apache
$ certbot –apache -d hoge-test.site

ほう
$ sudo apt -y install python3-six python3-wheel python3-numpy python3-grpcio python3-protobuf python3-termcolor python3-typing-extensions python3-h5py python3-markdown python3-werkzeug python3-requests-oauthlib python3-rsa python3-cachetools python3-google-auth
$ sudo apt -y install python3-numpy python3-sklearn python3-matplotlib python3-seaborn
$ sudo pip3 install -U tensorflow tensorflow_datasets
$ pip3 install pixellib
$ curl -LO https://github.com/ayoolaolafenwa/PixelLib/releases/download/1.2/mask_rcnn_coco.h5
sample2.jpegをDLしてdataフォルダに格納
https://pixellib.readthedocs.io/en/latest/Image_instance.html
import pixellib
from pixellib.instance import instance_segmentation
segment_image = instance_segmentation()
segment_image.load_model("data/mask_rcnn_coco.h5")
segment_image.segmentImage("data/sample2.jpeg", output_image_name = "image_new.jpg")

どういう仕組みなのかは理解したが、イマイチ上手くいかんな…
環境のせいか…
短時間フーリエ変換、MFCCを利用する
import numpy as np
import librosa
import librosa.display
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm
from scipy import fftpack
# 音声データを読み込む
speakers = {'kirishima' : 0, 'suzutsuki' : 1, 'belevskaya' : 2}
# 特徴量を返す
def get_feat(file_name):
a, sr = librosa.load(file_name)
y = np.abs(librosa.stft(a))
plt.figure(figsize=(10, 4))
librosa.display.specshow(librosa.amplitude_to_db(y, ref=np.max), y_axis='log', x_axis='time', sr=sr)
plt.colorbar(format='%+2.0fdB')
plt.tight_layout()
return y
# 特徴量と分類のラベル済みのラベルの組を返す
def get_data(dir_name):
data_X = []
data_y = []
for file_name in sorted(os.listdir(path=dir_name)):
print("read: {}".format(file_name))
speaker = file_name[0:file_name.index('_')]
data_X.append(get_feat(os.path.join(dir_name, file_name)))
data_y.append((speakers[speaker], file_name))
return (np.array(data_X), np.array(data_y))
# data_X, data_y = get_data('voiceset')
get_feat('sample/hi.wav')
get_feat('sample/lo.wav')

speakers = {'kirishima' : 0, 'suzutsuki' : 1, 'belevskaya' : 2}
# 特徴量を返す
def get_feat(file_name):
a, sr = librosa.load(file_name)
y = np.abs(librosa.stft(a))
# plt.figure(figsize=(10, 4))
# librosa.display.specshow(librosa.amplitude_to_db(y, ref=np.max), y_axis='log', x_axis='time', sr=sr)
# plt.colorbar(format='%+2.0fdB')
# plt.tight_layout()
return y
# 特徴量と分類のラベル済みのラベルの組を返す
def get_data(dir_name):
data_X = []
data_y = []
for file_name in sorted(os.listdir(path=dir_name)):
print("read: {}".format(file_name))
speaker = file_name[0:file_name.index('_')]
data_X.append(get_feat(os.path.join(dir_name, file_name)))
data_y.append((speakers[speaker], file_name))
return (data_X, data_y)
data_X, data_y = get_data('voiceset')
train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, random_state=11813)
print("{} -> {}, {}".format(len(data_X), len(train_X), len(test_X)))
def predict(X):
result = clf.predict(X.T)
return np.argmax(np.bincount(result))
ok_count = 0
for X, y in zip(test_X, test_y):
actual = predict(X)
expected = y[0]
file_name = y[1]
ok_count += 1 if actual == expected else 0
result = 'o' if actual == expected else 'x'
print("{} file: {}, actual: {}, expected: {}".format(result, file_name, actual, expected))
print("{}/{}".format(ok_count, len(test_X)))
MFCC
def get_feat(file_name):
a, sr = librosa.load(file_name)
y = librosa.feature.mfcc(y=a, sr=sr)
# plt.figure(figsize=(10, 4))
# librosa.display.specshow(librosa.amplitude_to_db(y, ref=np.max), y_axis='log', x_axis='time', sr=sr)
# plt.colorbar(format='%+2.0fdB')
# plt.tight_layout()
return y
o file: suzutsuki_b06.wav, actual: 1, expected: 1
o file: kirishima_04_su.wav, actual: 0, expected: 0
o file: kirishima_c01.wav, actual: 0, expected: 0
o file: belevskaya_b04.wav, actual: 2, expected: 2
o file: belevskaya_b14.wav, actual: 2, expected: 2
o file: kirishima_b04.wav, actual: 0, expected: 0
o file: suzutsuki_b08.wav, actual: 1, expected: 1
o file: belevskaya_b07.wav, actual: 2, expected: 2
o file: suzutsuki_b03.wav, actual: 1, expected: 1
o file: belevskaya_b10.wav, actual: 2, expected: 2
o file: kirishima_b01.wav, actual: 0, expected: 0
o file: belevskaya_07_su.wav, actual: 2, expected: 2
12/12
MFCC凄すぎんだろこれ
元の波形を周波数ごとに分ける -> 周波数ごとに集計したもの:パワースペクトル
時間を考慮せず、周波数に対応する数値として捉える
import numpy as np
import librosa
import librosa.display
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm
from scipy import fftpack
# 音声データを読み込む
speakers = {'kirishima' : 0, 'suzutsuki' : 1, 'belevskaya' : 2}
# 特徴量を返す
def get_feat(file_name):
a, sr = librosa.load(file_name)
fft_wave = fftpack.rfft(a, n=sr)
fft_freq = fftpack.rfftfreq(n=sr, d=1/sr)
y = librosa.amplitude_to_db(fft_wave, ref=np.max)
plt.plot(fft_freq, y)
plt.show()
return y
# 特徴量と分類のラベル済みのラベルの組を返す
def get_data(dir_name):
data_X = []
data_y = []
for file_name in sorted(os.listdir(path=dir_name)):
print("read: {}".format(file_name))
speaker = file_name[0:file_name.index('_')]
data_X.append(get_feat(os.path.join(dir_name, file_name)))
data_y.append((speakers[speaker], file_name))
return (np.array(data_X), np.array(data_y))
get_feat('sample/hi.wav')
get_feat('sample/lo.wav')
横軸が周波数
hi

low

# 教師データとテストデータに分ける
train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, random_state=813)
print("{} -> {}, {}".format(len(data_X), len(train_X), len(test_X)))
clf = svm.SVC(gamma=0.0000001, C=10)
clf.fit(train_X, train_y.T[0])
ok_count = 0
for X, y in zip(test_X, test_y):
actual = clf.predict(np.array([X]))[0]
expected = y[0]
file_name = y[1]
ok_count += 1 if actual == expected else 0
result = 'o' if actual == expected else 'x'
print("{} file: {}, actual: {}, expected: {}".format(result, file_name, actual, expected))
print("{}/{}".format(ok_count, len(test_X)))
o file: belevskaya_b11.wav, actual: 2, expected: 2
o file: kirishima_c01.wav, actual: 0, expected: 0
x file: kirishima_c09.wav, actual: 2, expected: 0
x file: kirishima_04_su.wav, actual: 2, expected: 0
o file: belevskaya_b14.wav, actual: 2, expected: 2
o file: kirishima_b07.wav, actual: 0, expected: 0
x file: suzutsuki_b06.wav, actual: 2, expected: 1
x file: kirishima_c02.wav, actual: 2, expected: 0
o file: kirishima_b03.wav, actual: 0, expected: 0
o file: suzutsuki_b08.wav, actual: 1, expected: 1
o file: suzutsuki_b02.wav, actual: 1, expected: 1
o file: kirishima_b05.wav, actual: 0, expected: 0
8/12
精度が上がっている
import librosa
import librosa.display
import matplotlib.pyplot as plt
a, sr = librosa.load('voiceset/kirishima_b01.wav')
librosa.display.waveplot(a, sr)

print(a) print(len(a)) print(sr)
print(a)
print(len(a))
print(sr)
print(a)
print(len(a))
print(sr)
[ 1.3803428e-06 -2.3314392e-06 7.8938438e-06 … 0.0000000e+00
0.0000000e+00 0.0000000e+00]
132300
22050 // 波形のデータが1秒間に幾つの振幅を持つか
### 高音と低音の比較
a, sr = librosa.load('sample/hi.wav')
librosa.display.waveplot(a, sr)
plt.show()
a, sr = librosa.load('sample/lo.wav')
librosa.display.waveplot(a, sr)
plt.show()


highはlowよりも細かく振動している
振動数が多いと音が高くなる傾向にある
この特性を元に、SVNに与えて話者認識を行う
import numpy as np
import librosa
import librosa.display
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm
dir_name = 'voiceset'
for file_name in sorted(os.listdir(path=dir_name)):
print("read: {}".format(file_name))
a, sr = librosa.load(os.path.join(dir_name, file_name))
print(a.shape)
librosa.display.waveplot(a, sr)
plt.show()
同じ人の音声でも全く異なる波形になる
speakers = {'kirishima': 0, 'suzutsuki': 1, 'belevskaya': 2}
def get_data(dir_name):
data_X = []
data_y = []
for file_name in sorted(os.listdir(path=dir_name)):
print("read: {}".format(file_name))
a, sr = librosa.load(os.path.join(dir_name, file_name))
print(a.shape)
speaker = file_name[0:file_name.index('_')]
data_X.append(a)
data_y.append((speakers[speaker], file_name))
return (np.array(data_X), np.array(data_y))
data_X, data_y = get_data("voiceset")
SVMに学習させるには、要素数を同じ数に揃えなければならない
speakers = {'kirishima': 0, 'suzutsuki': 1, 'belevskaya': 2}
def get_feat(file_name):
a, sr = librosa.load(file_name)
return a[0:5000]
def get_data(dir_name):
data_X = []
data_y = []
for file_name in sorted(os.listdir(path=dir_name)):
print("read: {}".format(file_name))
speaker = file_name[0:file_name.index('_')]
data_X.append(get_feat(os.path.join(dir_name, file_name)))
data_y.append((speakers[speaker], file_name))
return (np.array(data_X), np.array(data_y))
data_X, data_y = get_data("voiceset")
print("====data_X====")
print(data_X.shape)
print(data_X)
print("====data_y====")
print(data_y.shape)
print(data_y)
教師データとテストデータに分割する
train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, random_state=11813)
print("{}->{}, {}".format(len(data_X), len(train_X),len(test_X)))
cvmで学習
clf = svm.SVC(gamma=0.0001, C=1) clf.fit(train_X, train_y.T[0])
SVC(C=1, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=’ovr’, degree=3, gamma=0.0001, kernel=’rbf’,
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
テストデータの分類
clf.predict(np.array([test_X[0]]))
ok_count = 0
for X, y in zip(test_X, test_y):
actual = clf.predict(np.array([X]))[0]
expected = y[0]
file_name = y[1]
ok_count += 1 if actual == expected else 0
result = 'o' if actual == expected else 'x'
print("{} file: {}, actual:{}, expected: {}".format(result, file_name, actual, expected))
print("{}/{}".format(ok_count, len(test_X)))
x file: suzutsuki_b06.wav, actual:2, expected: 1
x file: kirishima_04_su.wav, actual:2, expected: 0
x file: kirishima_c01.wav, actual:2, expected: 0
o file: belevskaya_b04.wav, actual:2, expected: 2
o file: belevskaya_b14.wav, actual:2, expected: 2
x file: kirishima_b04.wav, actual:2, expected: 0
x file: suzutsuki_b08.wav, actual:2, expected: 1
o file: belevskaya_b07.wav, actual:2, expected: 2
x file: suzutsuki_b03.wav, actual:2, expected: 1
o file: belevskaya_b10.wav, actual:2, expected: 2
x file: kirishima_b01.wav, actual:2, expected: 0
o file: belevskaya_07_su.wav, actual:2, expected: 2
5/12
予測の精度を上げる必要がある
まず、mp3の音声ファイルを用意します。
ubuntuにlibrosaをinstall
$ pip3 install librosa
$ sudo apt-get install libsndfile1
$ sudo apt install ffmpeg
import librosa
import numpy as np
import matplotlib.pyplot as plt
file_name = "./test.mp3"
y, sr = librosa.load(str(file_name))
time = np.arange(0, len(y)) / sr
plt.plot(time, y)
plt.xlabel("Time(s)")
plt.ylabel("Sound Amplitude")
plt.savefig('image.jpg',dpi=100)

うおおおおおおおお、なるほど