Friday 26 February 2021

test flights 3

expense
Feb 25 - driving 180km, fuel (p), field dinner
Feb 26 - driving 180km, field dinner, dollaramma
Feb 27 - driving 200km, fuel (p)

timesheet
Feb 25 - budle, gas plant, power line new tabi 2 - 14h
Feb 26 - install tabi 2, tabi 3 - 12h
Feb 27 - bundle, mountain, gas plant, power line, lake tabi 2,3 - 12h

Wednesday 24 February 2021

python opencv 1



grey scale

resize

half size

rotate
//cmd
pip install opencv-python

//main.py
import cv2

img = cv2.imread('assets/beach.jpg', 1)
#img = cv2.resize(img, (384, 216))
img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)

cv2.imwrite('new_img.jpg', img)

cv2.imshow('beach image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

reference:

Monday 22 February 2021

nextjs 5 deploy to vercel



//powershell
npm i -g vercel

vercel --version
Vercel CLI 21.2.3

vercel login
Enter your email

PS D:\nextjs> vercel
Vercel CLI 21.2.3
? Set up and deploy “D:\nextjs”? [Y/n] y
? Which scope do you want to deploy to? chuanshuoge6
? Link to existing project? [y/N] n
? What’s your project’s name? nextjs
? In which directory is your code located? ./
Auto-detected Project Settings (Next.js):
- Build Command: `npm run build` or `next build`
- Output Directory: Next.js default
- Development Command: next dev --port $PORT
? Want to override the settings? [y/N] n

PS D:\nextjs> vercel
Vercel CLI 21.2.3
🔍  Inspect: https://vercel.com/chuanshuoge6/nextjs-a5oz7cqag/chuanshuoge6 [2s]
✅  Preview: https://nextjs-chuanshuoge6.vercel.app [copied to clipboard] [1m]
📝  To deploy to production, run `vercel --prod`-chuanshuoge6.vercel.app/_logs or run `vercel logs nextjs-a5oz7cqag-ch
PS D:\nextjs> cel.app`

Vercel CLI 21.2.3
🔍  Inspect: https://vercel.com/chuanshuoge6/nextjs-pkqd0854w/chuanshuoge6 [3s]

reference:

test flights 2

expense
Feb 12 - hard drive
Feb 15 - driving 160km, ethernet adapter, lunch
Feb 16 - driving 160km, fuel
Feb 17 - driving 200km, fuel
Feb 19 - driving 160km, fuel
Feb 22 - hard drive
Feb 23 - driving 200km, field dinner
Feb 24 - ethernet switch

timesheet
Feb 15 - ground test new tabi & uv - 6h
Feb 16 - bundle uv, new tabi - 8h
Feb 17 - bundle, power line uv, new tabi 2 - 12h
Feb 19 - bundle, city uv new tabi 2 - 8h
Feb 23 - drive test new tabi 2 - 8h

Sunday 21 February 2021

大唐不夜城

nextjs 4 api response

code linkhttps://github.com/chuanshuoge6/nextjs
api json response

fetch from api

//api/articles/index.js

import {articles} from '../../../data'

export default function handler(req, res){
    res.status(200).json(articles)
}

---------------------
//api/articles/[id].js

import {articles} from '../../../data'

export default function handler({query: {id}}, res){
    const filtered = articles.filter(article => article.id === id)
    
    if(filtered.length > 0){
        res.status(200).json(filtered[0])
    }
    else{
        res.status(404).json({message: ` Aritle with the id of ${id} is not found`})
    }
}

-----------------------------
//data.js

export const articles = [
    {
      id: '1',
      title: 'GitHub introduces dark mode and auto-merge pull request',
      excerpt:
        'GitHub today announced a bunch of new features at its virtual GitHub...',
      body:
        'GitHub today announced a bunch of new features at its virtual GitHub Universe conference including dark mode, auto-merge pull requests, and Enterprise Server 3.0. In the past couple of years, almost all major apps have rolled out a dark theme for its users, so why not GitHub?',
    },
...
----------------
//pages/index.js

import Head from 'next/head'
import ArticleList from '../components/ArticleList'
import {server} from '../config'

export default function Home({articles}) {
  //console.log(articles)
  return (
    <div>
      <Head>
        <title>WebDev Newz</title>
        <meta name='keywors' content='web development'/>
      </Head>

      <ArticleList articles={articles} />
    </div>
  )
}

export const getStaticProps = async () => {
  const res = await fetch(`${server}/api/articles`)
  const articles = await res.json()

  return{
    props:{articles}
  }
}

--------------------
//pages/articles/[id]/index.js

import {useRouter} from 'next/router'
import Link from 'next/link'
import {server} from '../../../config'

const article = ({article}) => {

    return (
    <div>
        <h1>{article.title}</h1>
        <p>{article.body}</p>
        <br/>
        <Link href='/'>Go Back</Link>
    </div>)
}

export const getStaticProps = async (context) =>{
    const res = await fetch(`${server}/api/articles/${context.params.id}`)
    
    const article = await res.json()
    
    return{
        props:{article}
      }
}

export const getStaticPaths = async () =>{
    const res = await fetch(`${server}/api/articles`)
    
    const articles = await res.json()

    const ids = articles.map(article => article.id)
    const paths = ids.map(id => ({params: {id: id.toString()}}))
    
    
    return{
        paths,
        fallback: false
      }
}

export default article

-----------------------
//config/index.js

const dev = process.env.NODE_ENV !== 'production'

export const server = dev ? 'http://localhost:3000' : 'https//yourwebsite.com'

---------------------
reference:

Sinking Cities: Ho Chi Minh City

Saturday 20 February 2021

Top 5 Furnace Problems and How to Fix Them



nextjs 3 getStaticPaths, export static site

project urlhttp://chuanshuoge-nextjs.surge.sh/
url generated on server

//pages/article/[id]/index.js
import {useRouter} from 'next/router'
import Link from 'next/link'

const article = ({article}) => {
    //const router = useRouter()
    //const {id} = router.query

    //return <div>This is article {id}</div>
    return (
    <div>
        <h1>{article.title}</h1>
        <p>{article.body}</p>
        <br/>
        <Link href='/'>Go Back</Link>
    </div>)
}

/* export const getServerSideProps = async (context) =>{
    const res = await fetch(`https://jsonplaceholder.typicode.com/posts/${context.params.id}`)
    
    const article = await res.json()
    
    return{
        props:{article}
      }
}
 */

export const getStaticProps = async (context) =>{
    const res = await fetch(`https://jsonplaceholder.typicode.com/posts/${context.params.id}`)
    
    const article = await res.json()
    
    return{
        props:{article}
      }
}

//urls are generated from server
export const getStaticPaths = async () =>{
    const res = await fetch(`https://jsonplaceholder.typicode.com/posts`)
    
    const articles = await res.json()

    const ids = articles.map(article => article.id)
    const paths = ids.map(id => ({params: {id: id.toString()}}))
    
    
    return{
        paths,
        fallback: false
      }
}

export default article

------------------------
//package.json
{
  "name": "nextjs",
  "version": "0.1.0",
  "private": true,
  "scripts": {
    "dev": "next dev",
    "build": "next build",
    "start": "next start",
    "export": "next build && next export"
  },
  "dependencies": {
    "next": "10.0.7",
    "react": "17.0.1",
    "react-dom": "17.0.1"
  }
}

//cmd - export static website, out folder is created
npm run export
npm install -g serve

static website served on port 8000
reference:

Thursday 18 February 2021

nextjs 2 getStaticProps, getServerSideProp


click on tab to see article details


//pages/index.js

import Head from 'next/head'
import ArticleList from '../components/ArticleList'

export default function Home({articles}) {
  //console.log(articles)
  return (
    <div>
      <Head>
        <title>WebDev Newz</title>
        <meta name='keywors' content='web development'/>
      </Head>

      <ArticleList articles={articles} />
    </div>
  )
}

export const getStaticProps = async () => {
  const res = await fetch(`https://jsonplaceholder.typicode.com/posts?_limit=6`)
  const articles = await res.json()

  return{
    props:{articles}
  }
}

---------------------------------
//pages/article/[id]/index.js

import {useRouter} from 'next/router'
import Link from 'next/link'

const article = ({article}) => {
    //const router = useRouter()
    //const {id} = router.query

    //return <div>This is article {id}</div>
    return (
    <div>
        <h1>{article.title}</h1>
        <p>{article.body}</p>
        <br/>
        <Link href='/'>Go Back</Link>
    </div>)
}

export const getServerSideProps = async (context) =>{
    const res = await fetch(`https://jsonplaceholder.typicode.com/posts/${context.params.id}`)
    
    const article = await res.json()
    
    return{
        props:{article}
      }
}

export default article

--------------------------------
//components/ArticleItem.js

import Link from 'next/link'
import articleStyles from '../styles/Article.module.css'

const ArticleItem = ({article}) => {
    return (
        <Link href="/article/[id]" as={`/article/${article.id}`}>
            <a className={articleStyles.card}>
                <h3>{article.title} &rarr;</h3>
            </a>
        </Link>
    )
}

export default ArticleItem

------------------------
reference:

getStaticProps vs getServerSideProps

server side fetch increases performance as the server will generally have a faster connection to the data source. It also increases security by exposing less of the data fetching logic.

Dial Vision review



Wednesday 17 February 2021

nextjs 1




//nextjs renders all web components visible in html code, easy for web crawler to pick up.

//cmd - initialize new nextjs project
npx create-next-app projectname

//pages/_app.js
import '../styles/globals.css'
import Layout from '../components/Layout'

function MyApp({ Component, pageProps }) {
  return (
  <Layout>
      <Component {...pageProps} />
  </Layout>
  )
}

export default MyApp

-----------------------
//componnents/Layout.js
import styles from '../styles/Layout.module.css'
import Nav from './Nav'
import Header from './Header'

const Layout = ({children})=>{
    return(
        <div>
            <Nav></Nav>
            <div className={styles.container}>
                <main className={styles.main}>
                    <Header></Header>
                    {children}
                </main>
            </div>
        </div>
    )
}

export default Layout

---------------------------
//components/Header.js
import headerStyles from '../styles/Header.module.css'

const Header = () => {
    const x = 2
    return (
        <div>
            <h1 className={headerStyles.title}>
                <span>WebDev</span> News
            </h1>
            
           {/*  <style jsx>
                {`
                    .title{
                        color: ${x > 3 ? 'red' : 'blue'};
                    }
                `}
            </style> */}
            <p className={headerStyles.description}>
                Keep up to date with the latest web dev news</p>
        </div>
    )
}

export default Header

------------------------
//components/Nav.js
import navStyles from '../styles/Nav.module.css'
import Link from 'next/link'

const Nav = () =>{
    return (
        <nav className={navStyles.nav}>
            <ul>
                <li>
                    <Link href='/'>Home</Link>
                </li>
                <li>
                    <Link href='/about'>About</Link>
                </li>
            </ul>
        </nav>
    )
}

export default Nav

------------------------
//pages/index.js
import Head from 'next/head'

export default function Home() {
  return (
    <div>
      <Head>
        <title>WebDev Newz</title>
        <meta name='keywors' content='web development'/>
      </Head>

      <h1>Welcome to Next</h1>
    </div>
  )
}

--------------------------------
//styles/Nav.module.css
.nav{
    height: 50px;
    padding: 10px;
    background: #000;
    color: #fff;
    display: flex;
    align-items: center;
    justify-content: flex-start;
}

.nav ul{
    display: flex;
    justify-content: center;
    align-items: center;
    list-style: none;
}

.nav ul li a {
    margin: 5px 15px;
}

------------------------
//styles/Header.module.css
.title a, .title span {
    color: #0070f3;
    text-decoration: none;
  }
  
  .title a:hover,
  .title a:focus,
  .title a:active {
    text-decoration: underline;
  }
  
  .title {
    margin: 0;
    line-height: 1.15;
    font-size: 4rem;
  }
  
  .title,
  .description {
    text-align: center;
  }
  
  .description {
    line-height: 1.5;
    font-size: 1.5rem;
  }

reference:

Tuesday 9 February 2021

python speech ICAO Phonetic Alphabet


L/N* Word Pronunciation Morse
A Alpha AL FAH .-
B Bravo BRAH VOH -...
C Charlie CHAR LEE or SHAR LEE -.-.
D Delta DELL TAH -..
E Echo ECK OH .
F Foxtrot FOKS TROT ..-.
G Golf GOLF --.
H Hotel HOH TEL ....
I India IN DEE AH ..
J Juliett JEW LEE ETT .---
K Kilo KEY LOH -.-
L Lima LEE MAH .-..
M Mike MIKE --
N November NO VEM BER -.
O Oscar OSS CAH ---
P Papa PAH PAH .--.
Q Quebec KEH BECK --.-
R Romeo ROW ME OH .-.
S Sierra SEE AIR RAH ...
T Tango TANG GO -
U Uniform YOU NEE FORM or OO NEE FORM ..-
V Victor VIK TAH ...-
W Whiskey WISS KEY .--
X X-Ray ECKS RAY -..-
Y Yankee YANG KEY -.--
Z Zulu ZOO LOO --..
1 One WUN .----
2 Two TOO ..---
3 Three TREE ...--
4 Four FOW ER ....-
5 Five FIFE .....
6 Six SIX -....
7 Seven SEV EN --...
8 Eight AIT ---..
9 Nine NIN ER ----.
0 Zero ZE RO -----
. Decimal DAY SEE MAL
Hundred HUN DRED
Thousand TOU SAND

reference:

Peter Schiff

Sunday 7 February 2021

Meat King of Dubai

Women Supercar Drivers Club

Renault Morphoz

python speech recognition + duckduckgo instant answer api

#ask a question through microphone
#computer translate to text, and query on duckduckgo
#computer receive response from duckduckgo and speaks result
PS C:\Users\bob\python-speech> python main.py
Ask me any question?
say something!
You said: Banff National Park
Banff National Park is Canada's oldest national park, established in 1885. Located in Alberta's Rocky Mountains, 110–180 kilometres west of Calgary, Banff encompasses 6,641 square kilometres of mountainous terrain, with many glaciers and ice fields, dense coniferous forest, and alpine landscapes. The Icefields Parkway extends from Lake Louise, connecting to Jasper National Park in the north. Provincial forests and Yoho National Park are neighbours to the west, while Kootenay National Park is located to the south and Kananaskis Country to the southeast. The main commercial centre of the park is the town of Banff, in the Bow River valley. The Canadian Pacific Railway was instrumental in Banff's early years, building the Banff Springs Hotel and Chateau Lake Louise, and attracting tourists through extensive advertising. In the early 20th century, roads were built in Banff, at times by war internees from World War I, and through Great Depression-era public works projects.

import time
import speech_recognition as sr
import pyttsx3
import requests
import json

def speak(audioString):
    print(audioString)

    engine = pyttsx3.init()
    engine.say(audioString)
    engine.runAndWait()

def recordAudio():
    r = sr.Recognizer()
    with sr.Microphone() as source:
        print("say something!")
        #r.adjust_for_ambient_noise(source)
        audio = r.listen(source)

        try:
            data = r.recognize_google(audio)
            print("You said: " + data)

        except sr.UnknownValueError:
            return "Couldn't understand audio"

        except sr.RequestError as e:
            return "couldn't request results; {0}" + format(e)

        return data


speak("Ask me any question?")

data = recordAudio()

res = requests.get("https://api.duckduckgo.com",
             params={"q": data,
                     "format": "json",
                     "pretty": "1"})

answer = json.loads( res.content)

reply = answer["Abstract"] + answer["Answer"]

speak(reply)
"""
if reply:
    speak(reply)
elif reply == "":
    speak("I did't find answer")
"""

reference:

duckduckgo instant answer api

Friday 5 February 2021

python speech recognition

#computer speaks: what I can do for you
#user talks to microphone: how are you today
#computer talks back: I am fine

PS C:\Users\bob\python-speech> python main.py
what can i do for you?
say something!
You said: how are you today
I am fine

------------------------
#main.py
import time
import speech_recognition as sr
#from gtts import gTTS
import pyglet
import subprocess
import os
from playsound import playsound
import pyttsx3

def speak(audioString):
    print(audioString)
    #tts = gTTS(text=audioString, lang='en')
    #tts.save("audio.mp3")

    """
    wmp = r"C:\Program Files (x86)\Windows Media Player\wmplayer.exe"
    media_file = os.path.abspath(os.path.relpath("audio.mp3"))
    p = subprocess.call([wmp, media_file])
    """

    #playsound("audio.mp3")
    engine = pyttsx3.init()
    engine.say(audioString)
    engine.runAndWait()

def recordAudio():
    r = sr.Recognizer()
    with sr.Microphone() as source:
        print("say something!")
        r.adjust_for_ambient_noise(source)
        audio = r.listen(source)

        try:
            data = r.recognize_google(audio)
            print("You said: " + data)

        except sr.UnknownValueError:
            return "Couldn't understand audio"

        except sr.RequestError as e:
            return "couldn't request results; {0}" + format(e)

        return data

speak("what can i do for you?")
#time.sleep(2)

data = recordAudio()

if "how are you" in data:
    speak("I am fine")

---------------------------
#power shell
pip install SpeechRecognition
#pip install gtts
#pip install pyglet
pip install pyttsx3

pip install PyAudio-0.2.11-cp38-cp38-win_amd64.whl

#logs
Processing c:\users\bob\python-speech\pyaudio-0.2.11-cp38-cp38-win_amd64.whl
Installing collected packages: PyAudio
Successfully installed PyAudio-0.2.11

reference:

pyaudio

text to sound

microphone to text

Factory-Built Homes

Thursday 4 February 2021

keras 19 rnn generate music 2

code link: https://github.com/chuanshuoge6/keras-rnn-music



#main.py

import glob
import numpy as np
from music21 import *
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, Activation
from tensorflow.keras.models import load_model

#extract all notes composed in songs
notes = []
for file in glob.glob("midi_songs/*.mid"):
    midi = converter.parse(file)
    notes_to_parse = None
    parts = instrument.partitionByInstrument(midi)
    if parts: # file has instrument parts
        notes_to_parse = parts.parts[0].recurse()
    else: # file has notes in a flat structure
        notes_to_parse = midi.flat.notes
    for element in notes_to_parse:
        if isinstance(element, note.Note):
            notes.append(str(element.pitch))
        elif isinstance(element, chord.Chord):
            notes.append('.'.join(str(n) for n in element.normalOrder))

#print(notes)
#'A3', 'C4', 'D3', 'F3', 'G4', 'A3', 'C4'

sequence_length = 100
# remove mulitplication of notes, sort
pitchnames = sorted(set(item for item in notes))
#print(pitchnames)
n_vocab = len(pitchnames)

# create a dictionary to map pitches to integers
note_to_int = dict((note, number) for number, note in enumerate(pitchnames))
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
#print(note_to_int)
#there are 308 different notes

network_input = []
network_output = []

#100 notes in succession produce the 101st note,
#slide 100 notes block through music to generate input array,
#gather cosresponding output to each input block to form output array
# create input sequences and the corresponding outputs
for i in range(0, len(notes) - sequence_length, 1):
    sequence_in = notes[i:i + sequence_length]
    sequence_out = notes[i + sequence_length]
    network_input.append([note_to_int[char] for char in sequence_in])
    network_output.append(note_to_int[sequence_out])
n_patterns = len(network_input)

# reshape the input into a format compatible with LSTM layers
network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))
# normalize input
network_input = network_input / float(n_vocab)
network_output = tf.keras.utils.to_categorical(network_output)

#print(network_input)
#print(network_output)
#print(network_input.shape)
#(43990, 100, 1)
#print(network_output.shape)
#(43990, 308)

# train on GPU
pysical_devices = tf.config.experimental.list_physical_devices('GPU')
# print("Num GPUs Available: ", len(pysical_devices))
tf.config.experimental.set_memory_growth(pysical_devices[0], True)
"""
model = Sequential()

model.add(LSTM(
    256,
    input_shape=(network_input.shape[1], network_input.shape[2]),
    return_sequences=True
))

model.add(Dropout(0.3))
model.add(LSTM(512, return_sequences=True))

model.add(Dropout(0.3))
model.add(LSTM(256))

model.add(Dense(256))
model.add(Dropout(0.3))

model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.summary()

model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(network_input, network_output, epochs=200, batch_size=64)
model.save('model.h5')
"""
#start generating music
#pick random note block as a start
#start = np.random.randint(0, len(network_input)-1)
start = 40000

pattern = network_input[start]
prediction_output = []

model = load_model('model.h5')

# generate 500 notes, use 100 notes as start to generate 101st note
# then use 1-101 to generate 102...
for note_index in range(500):
    prediction_input = np.reshape(pattern, (1, len(pattern), 1))
    prediction_input = prediction_input / float(n_vocab)

    prediction = model.predict(prediction_input, verbose=0)
    index = np.argmax(prediction)

    result = int_to_note[index]
    prediction_output.append(result)

    pattern = np.append(pattern, index)
    pattern = pattern[1:len(pattern)]

#music 21 translates
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
    # pattern is a chord
    if ('.' in pattern) or pattern.isdigit():
        notes_in_chord = pattern.split('.')
        notes = []
        for current_note in notes_in_chord:
            new_note = note.Note(int(current_note))
            new_note.storedInstrument = instrument.Piano()
            notes.append(new_note)
        new_chord = chord.Chord(notes)
        new_chord.offset = offset
        output_notes.append(new_chord)
    # pattern is a note
    else:
        new_note = note.Note(pattern)
        new_note.offset = offset
        new_note.storedInstrument = instrument.Piano()
        output_notes.append(new_note)
    # increase offset each iteration so that notes do not stack
    offset += 0.5

midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='test_output.mid')

reference:

How to Upload Audio to YouTube